repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
GalaxyDataset | GalaxyDataset-master/evaluationMetrics.py | 0 | 0 | 0 | py | |
GalaxyDataset | GalaxyDataset-master/preprocess.py | # -*- coding: utf-8 -*-
import torch
import torch.utils.data as Data
import numpy as np
from torchvision import datasets, transforms
import argparse
import os
import random
import yaml
import downloadData
def load_npy(path):
# npy file: [[imgs, label], [imgs, label]...., [imgs, label]]
# when allow_pickle=True, matrix needs same size
if not os.path.isfile(path):
print("files do not exists!!")
return
np_array = np.load(path, allow_pickle=True)
imgs = []
label = []
for index in range(len(np_array)):
imgs.append(np_array[index][0])
label.append(np_array[index][1])
torch_dataset = Data.TensorDataset(torch.from_numpy(np.array(imgs)), torch.from_numpy(np.array(label)))
train_loader = Data.DataLoader(
torch_dataset,
batch_size=64,
shuffle=True,
num_workers=1
)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('data', train=False, transform=transform_test),
batch_size=32,
shuffle=True,
num_workers=1
)
print("train_loader, test_loader generated succeed!")
return train_loader, test_loader
if __name__ == "__main__":
dataloder = load_npy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_3666_truck.npy") | 1,436 | 29.574468 | 107 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/custom.py |
from PIL import Image, ImageEnhance, ImageOps, ImageFilter
import numpy as np
import random
class GaussianBlur(object):
def __init__(self, sigma=None):
if sigma is None:
sigma = [.1, 2.]
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1: img = self.operation1(img, self.magnitude1)
if random.random() < self.p2: img = self.operation2(img, self.magnitude2)
return img
class CIFAR10Policy(object):
""" Randomly choose one of the best 25 Sub-policies on CIFAR10.
Example:
>>> policy = CIFAR10Policy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> CIFAR10Policy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor),
SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor),
SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor),
SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor),
SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor),
SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor),
SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor),
SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor),
SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor),
SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor),
SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor),
SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor),
SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor),
SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor),
SubPolicy(0.2, "equalize", 8, 0.6, "equalize", 4, fillcolor),
SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor),
SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor),
SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor),
SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor),
SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor),
SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment CIFAR10 Policy" | 6,520 | 48.030075 | 138 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/femnist.py | from torchvision.datasets import MNIST, utils
from PIL import Image
import os.path
import torch
class FEMNIST(MNIST):
"""
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
"""
resources = [
('https://raw.githubusercontent.com/tao-shen/FEMNIST_pytorch/master/femnist.tar.gz',
'59c65cec646fc57fe92d27d83afdf0ed')]
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(MNIST, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets, self.users_index = torch.load(os.path.join(self.processed_folder, data_file))
# print(len(self.data))
# print(len(self.targets))
# print(len(self.users_index))
self.uid = []
i = 0
for n in self.users_index:
for j in range(n):
self.uid.append(i)
i += 1
# print(len(self.uid))
# def __getitem__(self, index):
# img, target = self.data[index], int(self.targets[index])
# img = Image.fromarray(img.numpy(), mode='F')
# if self.transform is not None:
# img = self.transform(img)
# if self.target_transform is not None:
# target = self.target_transform(target)
# return img, target
def __getitem__(self, index):
img, target, uid = self.data[index], int(self.targets[index]), int(self.uid[index])
img = Image.fromarray(img.numpy(), mode='F')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def download(self):
"""Download the FEMNIST data if it doesn't exist in processed_folder already."""
import shutil
if self._check_exists():
return
utils.makedir_exist_ok(self.raw_folder)
utils.makedir_exist_ok(self.processed_folder)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
utils.download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
# process and save as torch files
print('Processing...')
shutil.move(os.path.join(self.raw_folder, self.training_file), self.processed_folder)
shutil.move(os.path.join(self.raw_folder, self.test_file), self.processed_folder)
| 3,128 | 36.25 | 110 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/usps.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'usps_28x28.mat'
# data = scio.loadmat(dataFile)
# # for k in data.keys():
# # print(k)
# # __header__
# # __version__
# # __globals__
# # dataset
# dataset_training = data['dataset'][0]
# dataset_test = data['dataset'][1]
# # a = dataset_training[0] # data
# # print(type(a)) # numpy
# # print(len(a)) # 7438
# # print(len(a[0])) # 1
# # print(len(a[0][0])) # 28
# # print(len(a[0][0][0])) # 28
# # b = dataset_training[1] # targets
# # print(len(b)) # 7438
# # print(len(b[0])) # 1
# training_data = []
# for img in dataset_training[0]:
# img = img * 255
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# # print(img.size()) # 28 28 3
# training_data.append(img)
# # print(len(temp))
# # print(len(temp[0]))
# # print(len(temp[0][0]))
# training_targets = []
# for label in dataset_training[1]:
# training_targets.append(label[0])
# # print(label[0])
# torch.save((training_data, training_targets), 'USPS/processed/training.pt')
# test_data = []
# for img in dataset_test[0]:
# img = img * 255
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# # print(img.size()) # 28 28 3
# test_data.append(img)
# # print(len(temp))
# # print(len(temp[0]))
# # print(len(temp[0][0]))
# test_targets = []
# for label in dataset_test[1]:
# test_targets.append(label[0])
# # print(label[0])
# torch.save((test_data, test_targets), 'USPS/processed/test.pt')
class USPS(MNIST):
def __init__(self, *args, **kwargs):
super(USPS, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('type of img: ', type(img))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# ToPILImage
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
# 'MNISTM': MNISTM,
# 'SVHN': SVHN,
# 'SYN': SYN,
'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
# 'MNISTM': 10,
# 'SVHN': 10,
# 'SYN': 10,
'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('USPS')
# dataset_train = loader.train_dataset
# img = dataset_train[50][0]
# print(dataset_train[50][1])
# img = img * 255
# print(img)
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
| 5,890 | 30.502674 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/svhn.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'svhn_train_32x32.mat'
# data = scio.loadmat(dataFile)
# # for k in data.keys():
# # print(k)
# # __header__
# # __version__
# # __globals__
# # X
# # y
# data['X'] = data['X'].transpose(3, 0, 1, 2)
# a = data['X']
# print(len(a)) # 73257
# print(len(a[0])) # 32
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 3
# training_data = []
# for img in data['X']:
# img = torch.from_numpy(img)
# training_data.append(img)
# training_targets = []
# for label in data['y']:
# if label[0] == 10:
# l = 0
# else:
# l = label[0]
# training_targets.append(l)
# torch.save((training_data, training_targets), 'SVHN/processed/training.pt')
# dataFile = 'svhn_test_32x32.mat'
# data = scio.loadmat(dataFile)
# # for k in data.keys():
# # print(k)
# # __header__
# # __version__
# # __globals__
# # X
# # y
# data['X'] = data['X'].transpose(3, 0, 1, 2)
# a = data['X']
# print(len(a)) # 26032
# print(len(a[0])) # 32
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 3
# test_data = []
# for img in data['X']:
# img = torch.from_numpy(img)
# test_data.append(img)
# test_targets = []
# for label in data['y']:
# if label[0] == 10:
# l = 0
# else:
# l = label[0]
# test_targets.append(l)
# torch.save((test_data, test_targets), 'SVHN/processed/test.pt')
class SVHN(MNIST):
def __init__(self, *args, **kwargs):
super(SVHN, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
# 'MNISTM': MNISTM,
'SVHN': SVHN,
# 'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
# 'MNISTM': 10,
'SVHN': 10,
# 'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('SVHN')
# dataset_train = loader.train_dataset
# img = dataset_train[50][0]
# print(dataset_train[50][1])
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
# dataset_training = torch.load('MNISTM/processed/training.pt')
# img = dataset_training[0][0]
# print(img.size())
# img = Image.fromarray(np.array(img).astype('uint8'), mode='RGB')
# img.show()
| 5,699 | 29.15873 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/syn.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'syn_number.mat'
# data = scio.loadmat(dataFile)
# for k in data.keys():
# print(k)
# __header__
# __version__
# __globals__
# test_data
# test_label
# train_data
# train_label
# # data['train_data'] = data['train_data'].transpose(0, 3, 1, 2)
# a = data['train_data']
# print(len(a)) # 25000
# print(len(a[0])) # 32
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 3
# training_data = []
# for img in data['train_data']:
# img = torch.from_numpy(img).int()
# training_data.append(img)
# training_targets = []
# for label in data['train_label']:
# l = label[0]
# training_targets.append(l)
# torch.save((training_data, training_targets), 'SYN/processed/training.pt')
# a = data['test_data']
# print(len(a)) # 9000
# print(len(a[0])) # 3
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 32
# test_data = []
# for img in data['test_data']:
# img = torch.from_numpy(img).int()
# test_data.append(img)
# test_targets = []
# for label in data['test_label']:
# l = label[0]
# test_targets.append(l)
# torch.save((test_data, test_targets), 'SYN/processed/test.pt')
class SYN(MNIST):
def __init__(self, *args, **kwargs):
super(SYN, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
# 'MNISTM': MNISTM,
# 'SVHN': SVHN,
'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
# 'MNISTM': 10,
# 'SVHN': 10,
'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('SYN')
# dataset_train = loader.train_dataset
# img = dataset_train[40][0]
# print(dataset_train[40][1])
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
# dataset_training = torch.load('MNISTM/processed/training.pt')
# img = dataset_training[0][0]
# print(img.size())
# img = Image.fromarray(np.array(img).astype('uint8'), mode='RGB')
# img.show()
| 5,471 | 30.630058 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/mnistm.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'mnistm_with_label.mat'
# data = scio.loadmat(dataFile)
# for k in data.keys():
# print(k)
# __header__
# __version__
# __globals__
# label_test
# label_train
# test
# train
# img = data['train'][4]
# # img = Image.fromarray(img, mode='RGB')
# # img.show()
# img = torch.from_numpy(img)
# print(img)
# label = np.argmax(data['label_train'][4])
# print(label)
# training_data = []
# for img in data['train']:
# img = torch.from_numpy(img).int()
# # print(img)
# training_data.append(img)
# img = training_data[0]
# print(type(img))
# img = img.numpy().astype('uint8')
# img = Image.fromarray(img, mode='RGB')
# img.show()
# training_targets = []
# for label in data['label_train']:
# l = np.argmax(np.array(label))
# training_targets.append(l)
# print(training_targets[0])
# torch.save((training_data, training_targets), 'MNISTM/processed/training.pt')
# test_data = []
# for img in data['test']:
# img = torch.from_numpy(img).int()
# test_data.append(img)
# test_targets = []
# for label in data['label_test']:
# l = np.argmax(np.array(label))
# test_targets.append(l)
# torch.save((test_data, test_targets), 'MNISTM/processed/test.pt')
class MNISTM(MNIST):
def __init__(self, *args, **kwargs):
super(MNISTM, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
'MNISTM': MNISTM,
# 'SVHN': SVHN,
# 'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
'MNISTM': 10,
# 'SVHN': 10,
# 'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('MNISTM')
# dataset_train = loader.train_dataset
# img = dataset_train[40][0]
# print(dataset_train[40][1])
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
# dataset_training = torch.load('MNISTM/processed/training.pt')
# img = dataset_training[0][0]
# print(img.size())
# img = Image.fromarray(np.array(img).astype('uint8'), mode='RGB')
# img.show()
| 5,512 | 29.97191 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/SubPolicy.py | class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor), | 2,048 | 54.378378 | 138 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/mnist.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'mnist_data.mat'
# data = scio.loadmat(dataFile)
# for k in data.keys():
# print(k)
# __header__
# __version__
# __globals__
# test_32
# test_28
# label_test
# label_train
# train_32
# train_28
# img = data['train_28'][0]
# img = torch.from_numpy(img)
# print(img) # torch.Size([28, 28, 1])
# training_data = []
# for img in data['train_28']:
# img = img.transpose(2, 0, 1)
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# training_data.append(img)
# img = training_data[0]
# # img = Image.fromarray(img.numpy(), mode='RGB') # black
# img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # astype('uint8')
# img.show()
# training_targets = []
# for label in data['label_train']:
# l = np.argmax(np.array(label))
# training_targets.append(l)
# torch.save((training_data, training_targets), 'MNIST/processed/training.pt')
# test_data = []
# for img in data['test_28']:
# img = img.transpose(2, 0, 1)
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# test_data.append(img)
# test_targets = []
# for label in data['label_test']:
# l = np.argmax(np.array(label))
# test_targets.append(l)
# torch.save((test_data, test_targets), 'MNIST/processed/test.pt')
# img = test_data[0]
# # img = Image.fromarray(img.numpy(), mode='RGB') # black
# img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # astype('uint8')
# img.show()
class MNIST(MNIST):
def __init__(self, *args, **kwargs):
super(MNIST, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# ToPILImage
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
'MNIST': MNIST,
# 'MNISTM': MNISTM,
# 'SVHN': SVHN,
# 'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
'MNIST': 10,
# 'MNISTM': 10,
# 'SVHN': 10,
# 'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('MNIST')
# dataset_train = loader.train_dataset
# img = dataset_train[5][0]
# print(dataset_train[5][1])
# print(img)
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
| 5,753 | 30.966667 | 215 | py |
skimulator | skimulator-master/setup.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
"""Build and install the SKIM Simulator for Ocean Science package."""
import os
import sys
import errno
import shutil
import logging
from setuptools import setup
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Check Python version
if not 3 == sys.version_info[0]:
logger.error('This package is only available for Python 3.x')
sys.exit(1)
__package_name__ = 'skimulator'
project_dir = os.path.dirname(__file__)
git_exe = shutil.which('git')
git_dir = os.path.join(project_dir, '.git')
has_git = (git_exe is not None and os.path.isdir(git_dir))
readme_file = os.path.join(project_dir, 'README')
package_dir = os.path.join(project_dir, __package_name__)
init_file = os.path.join(package_dir, '__init__.py')
share_dir = os.path.join(package_dir, 'share')
version_file = os.path.join(share_dir, 'VERSION.txt')
# Regenerate a version file from git history
if has_git:
import subprocess
import datetime
_githash = (git_exe, 'rev-parse', '--short', 'HEAD')
githash = subprocess.check_output(_githash).decode('utf-8').strip()
gitrev = (git_exe, 'rev-list', 'HEAD', '--count')
commits = subprocess.check_output(gitrev).decode('utf-8').strip()
# Note: this block could be replaced by
# os.makedirs(share_dir, exist_ok=True)
# if support for Python < 3.5 is dropped
if not os.path.isdir(share_dir):
try:
os.makedirs(share_dir)
except OSError:
_, e, _ = sys.exc_info()
if e.errno == errno.EEXIST:
pass
with open(version_file, 'wt') as f:
f.write('{}\n'.format(commits))
f.write('{}\n'.format(githash))
f.write('{}\n'.format(datetime.datetime.utcnow().strftime('%Y-%m-%d')))
# - Read in the package version and author fields from the Python
# main __init__.py file:
#
# IMPORTANT: this must be done AFTER trying to generate the VERSION.txt file
# as the __version__ variable contained in the __init__.py will not only have
# the major and minor, but also the commit number if VERSION.txt exists.
metadata = {}
with open(init_file, 'rt') as f:
exec(f.read(), metadata)
requirements = []
with open('requirements.txt', 'r') as f:
lines = [x.strip() for x in f if 0 < len(x.strip())]
requirements = [x for x in lines if x[0].isalpha()]
with open(readme_file, 'rt') as f:
long_description = f.read()
optional_dependencies = {'plot': ['matplotlib', ], 'carto': ['matplotlib',
'cartopy'], 'numba': ['numba', ],}
cmds = ['skimul2b = {}.cli:run_script'.format(__package_name__),
'skimul2c = {}.cli:run_l2c'.format(__package_name__),
'skimul2d = {}.cli:run_l2d'.format(__package_name__),
'interpl2d = {}.cli:interpolate_l2d'.format(__package_name__),
]
# VERSION.txt must be added to the package if the file has been generated
pkg_data = None
pkg_data = {__package_name__: ['share/coeff.npy',
'share/coeffr.npy',
'share/Spline_128_64_TED_TAS_6_DEG.npy',
'share/Spline_128_64_TED_TAS_12_DEG.npy',
'share/Spline_128_64_TED_CB_12_DEG.npy',
'share/Spline_128_64_TED_CB_6_DEG.npy',
'share/noise_pdf_mss1d.npy',
]}
if os.path.exists(version_file):
pkg_data[__package_name__].append('share/VERSION.txt')
setup(name=__package_name__,
version=metadata['__version__'],
description=metadata['__description__'],
author=metadata['__author__'],
author_email=metadata['__author_email__'],
url=metadata['__url__'],
license='COPYING',
keywords=metadata['__keywords__'],
long_description=long_description,
packages=(__package_name__, f'{__package_name__}.error'),
install_requires=requirements,
setup_require=(),
entry_points={'console_scripts': cmds},
extras_require=optional_dependencies,
package_data=pkg_data,
)
| 4,810 | 36.007692 | 79 | py |
skimulator | skimulator-master/test/projskim.py | import os
import numpy
import healpy as heal
from numpy import linalg as LA
from netCDF4 import Dataset
import params as p
import skimulator.const as const
import skimulator.rw_data as rw
import matplotlib.pyplot as plt
theta1 = const.theta1
theta0 = const.theta0
gamma0 = const.gamma0
# - In parameter file ## TODO -
# Number of pixel (resolution for healpix)
nside = 256
# Number of diamonds for healpix
ndiam = 12
ntotpixel = nside * nside * ndiam
# Conditionning threshold
thresh_cond = 10
# List of pass
swathtab = [40] #,'p025','p042','p053','p055']
#
cycle = 1
plot_res = 10
gamma = numpy.array(p.list_angle)
ind_12 = numpy.where(gamma == 12)
ind_6 = numpy.where(gamma == 6)
gamma = numpy.deg2rad(gamma)
n_ind_12 = len(ind_12)
n_ind_6 = len(ind_6)
path = os.path.join(p.outdatadir, '{}_c{:02d}'.format(p.config, cycle))
gpath = os.path.join(p.outdatadir, '{}_grid'.format(p.config))
#heal.mollview(np.arange(12*64*64))
#plt.show()
# rebin variance to beam
sig1 = 1E-2*numpy.loadtxt(p.rms_instr[ind_12[0][0]], usecols=(0, 1), unpack=True)
sig2 = 1E-2*numpy.loadtxt(p.rms_instr[ind_6[0][0]], usecols=(0, 1), unpack=True)
sig = numpy.zeros([len(gamma), 360])
for i in ind_12[0]:
sig[i, : 180] = sig2[1, : 180]
sig[i, 180 + numpy.arange(180)] = sig2[1, 180 - numpy.arange(180)]
for i in ind_6[0]:
sig[i, 0: 180] = sig1[1, 0: 180]
sig[i, 180 + numpy.arange(180)] = sig1[1, 180 - numpy.arange(180)]
#===== table to store image inversion
im = numpy.zeros((ntotpixel, 3))
nim = numpy.zeros((ntotpixel))
cov = numpy.zeros((ntotpixel, 2, 2))
cov2 = numpy.zeros([ntotpixel, 2, 2])
vec = numpy.zeros([ntotpixel, 2])
vec2 = numpy.zeros([ntotpixel, 2])
vec3 = numpy.zeros([ntotpixel, 2])
vecdop = numpy.zeros([3, ntotpixel, 2])
for swath in swathtab:
data = rw.Sat_SKIM(ifile='{}_p{:03d}.nc'.format(path, swath))
grid = rw.Sat_SKIM(ifile='{}_p{:03d}.nc'.format(gpath, swath))
data.load_data(p, ur_model=[], ur_obs=[], instr=[], u_model=[], v_model=[],
lon_nadir=[], lat_nadir=[], time_nadir=[], lon=[], lat=[])
grid.load_swath(p, radial_angle=[])
ur = data.ur_model
uro = data.ur_obs
noise = data.instr
u = data.u_model
v = data.v_model
rangle = numpy.mod(grid.radial_angle, numpy.pi *2)
lon = data.lon
lat = data.lat
ndata, nbeam = ur.shape
# COMPUTE SIGMA AT EACH SAMPLE
sigma = numpy.zeros(ur.shape)
arctan_rangle = numpy.arctan(numpy.sin(rangle + numpy.pi/2),
numpy.cos(rangle + numpy.pi/2))
_tmp = numpy.rad2deg(numpy.mod(arctan_rangle + 2 * numpy.pi,
2 * numpy.pi))
for i in range(len(gamma)):
sigma[:, i] = sig[i, (_tmp[:, i]).astype(int)]
#COMPUTE ORBITAL SPEED FROM NADIR INFORMATION
# TODO MANAGE BUG OF -180 to +180 TODOTODOTODO
lat_nadir = data.lat_nadir
lon_nadir = data.lon_nadir
time_nadir = data.time_nadir
time_nadir = time_nadir - time_nadir[0]
latpar = numpy.polyfit(time_nadir, lat_nadir, 3)
lonpar = numpy.polyfit(time_nadir, lon_nadir, 3)
dlatorb = latpar[2] + 2*time_nadir*latpar[1] + 3*time_nadir**2*latpar[0]
dlonorb = lonpar[2] + 2*time_nadir*lonpar[1] + 3*time_nadir**2*lonpar[0]
#altiorb=793.0
alti_total = const.Rearth + const.sat_elev
usat = (numpy.deg2rad(dlonorb) * numpy.cos(numpy.deg2rad(lat_nadir))
* alti_total)
vsat = numpy.deg2rad(dlatorb) * alti_total
dtheta = theta1 - gamma0 * numpy.sin(rangle - theta0)
dgamma = gamma0 * numpy.cos(rangle - theta0)
# nongeophysical doppler to add to measure
gamma_mat = numpy.array([gamma,] * ndata)
ddop = numpy.zeros(ur.shape)
usat = numpy.array([usat, ]*len(gamma)).transpose()
vsat = numpy.array([vsat, ]*len(gamma)).transpose()
ddop1 = dtheta * (usat * numpy.sin(rangle) + vsat * numpy.cos(rangle))
ddop2 = dgamma * numpy.cos(gamma_mat) * (usat * numpy.cos(rangle)
+ vsat * numpy.sin(rangle))
ddop = ddop1 + ddop2
tdop=numpy.zeros((ndata, nbeam, 3))
tdop[:, :, 0] = usat * numpy.sin(rangle) + vsat * numpy.cos(rangle)
tdop[:, :, 1] = (-numpy.sin(rangle) * (usat*numpy.sin(rangle)
+ vsat * numpy.cos(rangle)) + numpy.cos(rangle)
* numpy.cos(gamma_mat) * (usat*numpy.cos(rangle)
+ vsat * numpy.sin(rangle)))
tdop[:, :, 2] = (numpy.cos(rangle) * (usat * numpy.sin(rangle) + vsat
* numpy.cos(rangle)) + numpy.sin(rangle)
* numpy.cos(gamma_mat) * (usat * numpy.cos(rangle)
+ vsat * numpy.sin(rangle)))
# DEBUG PLOT
#plt.plot(ddop-(tdop[:,:,0]*theta1+tdop[:,:,1]*gamma0*np.cos(theta0)+tdop[:,:,2]*gamma0*np.sin(theta0)))
#plt.show()
uro = ddop + uro
ww = 1 / sigma**2
ph = 2 * numpy.pi - numpy.deg2rad(lon)
th = numpy.pi / 2 - numpy.deg2rad(lat)
pidx = heal.ang2pix(nside, th, ph)
for i in range(nbeam):
for j in range(ndata):
if ur[j, i] > -1E9:
ip = pidx[j,i]
# compute imulated model
im[ip, 1] += u[j, i]
im[ip, 2] += v[j,i]
nim[ip] += 1
# compute covariance(s) model
co = numpy.cos(rangle[j,i])
si = numpy.sin(rangle[j,i])
w = ww[j,i]
cov[ip, 0, 0] += co * co
cov[ip, 1, 0] += si * co
cov[ip, 0, 1] += si * co
cov[ip, 1, 1] += si * si
cov2[ip, 0, 0] += w * co * co
cov2[ip, 1, 0] += w * si * co
cov2[ip, 0, 1] += w * si * co
cov2[ip, 1, 1] += w * si * si
# compute data vector model
vec[ip, 0] += co * ur[j,i]
vec[ip, 1] += si * ur[j,i]
# compute data noise vector model
vec2[ip, 0] += w* co * uro[j,i]
vec2[ip, 1] += w * si * uro[j,i]
# compute doppler projection
for k in range(3):
vecdop[k, ip, 0] += w * co * tdop[j,i,k]
vecdop[k, ip, 1] += w * si * tdop[j,i,k]
rim = 0 * im
rim2 = 0 * im
mask = numpy.zeros([ntotpixel])
rimdop = numpy.zeros([3, ntotpixel, 2])
for i in range(ntotpixel):
if cov2[i, 0, 0] > 0:
mask[i] = LA.cond(cov2[i, :, :])
if mask[i] < thresh_cond:
rim[i, 1: 3] = LA.solve(cov[i, :, :],vec[i, :])
rim2[i, 1: 3] = LA.solve(cov2[i, :, :],vec2[i, :])
for k in range(3):
rimdop[k, i, :] = LA.solve(cov2[i, :, :], vecdop[k, i, :])
im[i, 0] = numpy.sqrt((im[i, 1]/nim[i])**2 + (im[i, 2]/nim[i])**2)
rim[i, 0] = numpy.sqrt(rim[i, 1]**2 + rim[i, 2]**2)
rim2[i, 0] = numpy.sqrt(rim2[i, 1]**2 + rim2[i, 2]**2)
mat=numpy.zeros([3,3])
vres=numpy.zeros([3])
for i in range(nbeam):
for j in range(ndata):
ip = pidx[j,i]
if mask[ip] < thresh_cond and ur[j, i] > -1E9:
co = numpy.cos(rangle[j, i])
si = numpy.sin(rangle[j, i])
w = ww[j, i]
vv = uro[j, i] - rim2[ip, 1] * co - rim2[ip, 2] * si
tvv = numpy.zeros((3,))
for k in range(3):
tvv[k] = (tdop[j, i, k] - rimdop[k, ip, 0] * co
- rimdop[k, ip, 1] * si)
for k in range(3):
for l in range(3):
mat[k, l] += w * tvv[k] * tvv[l]
for k in range(3):
vres[k] += w * tvv[k] * vv
rr = LA.solve(mat, vres)
print(rr)
#[yo, peach, roll] = rr
#print(rr[0], numpy.arctan(rr[1], rr[2]), numpy.sqrt(rr[1]**2 + rr[2]**2))
rot=[numpy.rad2deg(numpy.mean(ph)), numpy.rad2deg(numpy.pi/2 - numpy.mean(th))]
residu = (rr simulé - rr) . tdop[i, j, :]
u_residu_alt = cos(azimuth) * residu
v_residu_alt = sin(azimuth) * residu
ur.
heal.gnomview(mask,rot=rot,reso=plot_res * 3,title='CONDITIONING',max=thresh_cond)
#plt.pcolormesh()
plt.savefig('{}_conditioning.png'.format(p.config))
heal.gnomview(im[:,0],rot=rot, reso=plot_res, title='MODEL',xsize=600)
plt.savefig('{}_model.png'.format(p.config))
heal.gnomview(rim[:,0],rot=rot,reso=plot_res,title='INVERSE',xsize=600)
plt.savefig('{}_inverse.png'.format(p.config))
heal.gnomview(rim2[:,0],rot=rot,reso=plot_res,title='INVERSE WITH NOISE+DOPPLER',xsize=600)
plt.savefig('{}_inverse_noise.png'.format(p.config))
| 8,525 | 32.046512 | 108 | py |
skimulator | skimulator-master/test/params2.py | # -----------------------#
# Files and directories
# -----------------------#
## -- Get the user home directory
from os.path import expanduser
import os
import math
home = expanduser("~") + '/src/'
# ------ Directory that contains orbit file:
dir_setup = os.path.join(home, 'skimulator', 'data')
# ------ Directory that contains your own inputs:
indatadir = os.path.join(home, 'skimulator', 'example', 'input_fields')
indatadir = '/mnt/data/model/ww3_fram/' #netcdf3/'
# ------ Directory that contains your outputs:
outdatadir = os.path.join(home, 'skimulator', 'example', 'skim_output')
outdatadir = os.path.join('/mnt/data/project/skim/', 'skim_output')
# ------ Orbit file:
#filesat = os.path.join(dir_setup,'orbs1a.txt')
filesat = os.path.join(dir_setup,'orbits1_ifremer')
# , dir_setup+os.sep+'orbjason.txt', dir_setup+os.sep+'orbaltika.txt' ]
# ------ Name of the configuration (to build output files names)
config="WW3_FRAM_8b105az"
proc_number = 4
# -----------------------#
# SKIM swath parameters
# -----------------------#
# ------ Satellite grid file root name:
# (Final file name is root_name_[numberofpass].nc)
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
# ------ Force the computation of the satellite grid:
makesgrid = True
# ------ Give a subdomain if only part of the model is needed:
# (modelbox=[lon_min, lon_max, lat_min, lat_max])
# (If modelbox is None, the whole domain of the model is considered)
modelbox = [335,23,73,85.00024]
#------- Rotation speed of the antenna (in tr/min)
rotation_speed = 5.20833
#------- List of position of beams:
list_pos = (0, 72*math.pi/180., 144*math.pi/180., 216*math.pi / 180.,
288*math.pi/180., 0, math.pi)
#------- List of angle of beams in degrees:
list_angle = (12, 12, 12, 12, 12, 6, 6)
#------- List of timeshift as regard to nadir for 12 degree beams:
list_shift = (1, 2, 4, 5, 7, 3, 6)
#------- Cycle duration
cycle = 0.0096
# ------ Shift longitude of the orbit file if no pass is in the domain
# (in degree): Default value is None (no shift)
shift_lon = 0
# ------ Shift time of the satellite pass (in day):
# Default value is None (no shift)
shift_time = None
# -----------------------#
# Model input parameters
# -----------------------#
# ------ List of model files:
# (The first file contains the grid and is not considered as model data)
# To generate the noise alone, file_input=None and specify region
# in modelbox
file_input = os.path.join(indatadir, 'list_of_file.txt')
# ------ Type of model data:
# (Optional, default is NETCDF_MODEL and reads netcdf3 and netcdf4 files)
# (Other options are ROMS, NEMO and CLS to read Nemo, roms or CLS)
model = 'WW3'
# ------ Type of grid:
# 'regular' or 'irregular', if 'regular' only 1d coordinates
# are extracted from model
grid = 'regular'
# ------ Specify velocities variable:
varu = 'ucur'
varv = 'vcur'
# ------ Specify factor to convert velocity values in m/s:
vel_factor = 1.
# ------ Specify longitude variable:
lonu = 'longitude'
lonv = 'longitude'
# ------ Specify latitude variable:
latu = 'latitude'
latv = 'latitude'
# ------ Specify number of time in file:
dim_time = (720,)
# ------ Time step between two model outputs (in days):
timestep = 1/24.
# ------ Number of outputs to consider:
# (timestep*nstep=total number of days)
nstep = 719.
# ------ Not a number value:
model_nan = -32767.
# -----------------------#
# SKIM output files
# -----------------------#
# ------ Output file root name:
# (Final file name is root_name_c[cycle].nc
file_output = os.path.join(outdatadir, config)
# ------ Interpolation of the SSH from the model (if grid is irregular and
# pyresample is not installed:
# (either 'linear' or 'nearest', use 'nearest' for large region
# as it is faster and use less memory.)
interpolation = 'linear'
# -----------------------#
# SKIM error parameters
# -----------------------#
# ------ File containing random coefficients to compute and save
# random error coefficients so that runs are reproducible:
# If file_coeff is specified and does not exist, file is created
# If you don't want runs to be reproducible, file_coeff is set to None
file_coeff = None # outdatadir+os.sep+'Random_coeff.nc'
# ------ Number of random realisations for instrumental and geophysical error
# (recommended ncomp=2000), ncomp1d is used for 1D spectrum, and ncomp2d
# is used for 2D spectrum (wet troposphere computation):
ncomp1d = 3000
ncomp2d = 2000
# ------- Instrument white noise error
instr = True
# ------- Instrument white noise rms
# rms_instr = [10 * 10**(-2), 10 * 10**(-2), 10 * 10**(-2), 10 * 10**(-2),
# 20 * 10 ** (-2)]
rms_instr = [os.path.join(dir_setup, 'instrumentnoise_12.dat'),
os.path.join(dir_setup, 'instrumentnoise_12.dat'),
os.path.join(dir_setup, 'instrumentnoise_12.dat'),
os.path.join(dir_setup, 'instrumentnoise_12.dat'),
os.path.join(dir_setup, 'instrumentnoise_12.dat'),
os.path.join(dir_setup, 'instrumentnoise_06.dat'),
os.path.join(dir_setup, 'instrumentnoise_06.dat')]
# Multplication factor for instrument noise data to compensate for faster time
# cycle
rms_instr_factor = 2.5
# ------- Stoke drift velocity [beam 12, beam 6]
uss = True
input_uss = os.path.join(indatadir, 'list_file_uss.txt')
G = [50, 50, 50, 50, 50, 50, 50]
bias_std = 0.09
errdcos = None
#[25.2006/20, 25.2747/20, 25.4763/20, 25.4271/20, 19.9728/20]
footprint_std = 0 #400
formula = False
## -- Geophysical error
## ----------------------
# ------ Wet tropo error (True to compute it):
wet_tropo = True
# ------ Beam print size (in km):
# Gaussian footprint of sigma km
sigma = 8.
| 5,776 | 37.771812 | 79 | py |
skimulator | skimulator-master/test/mod_diag.py | import numpy
import netCDF4
import os
import sys
import glob
import pickle
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import cartopy
import skimulator.rw_data as rw
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
def diag_rms(listfile, modelbox, output, list_angle):
nanmean12 = 0
nanmean06 = 0
nanmeaninstr12 = 0
nanmeaninstr06 = 0
nanmeanuwb12 = 0
nanmeanuwb06 = 0
nanmeanuwbc12 = 0
nanmeanuwbc06 = 0
nanmeandsig12 = 0
nanmeandsig06 = 0
nanmeangsig12 = 0
nanmeangsig06 = 0
nanstd12 = 0
nanstd06 = 0
nanstd12norm = 0
nanstd06norm = 0
i06 = 0
i12 = 0
for ifile in listfile:
print(ifile)
data = netCDF4.Dataset(ifile, 'r')
ind0 = 100
ind1 = -100
if 'sigma0' in data.variables.keys():
sig = data.variables['sigma0'][ind0:ind1, :]
uwnd = data.variables['uwnd'][ind0:ind1, :]
vwnd = data.variables['vwnd'][ind0:ind1, :]
wnd = numpy.sqrt(uwnd**2 + vwnd**2)
lon = data.variables['lon'][ind0:ind1, :]
lat = data.variables['lat'][ind0:ind1, :]
lon = numpy.mod(lon + 180.0, 360.0) - 180.0
lon_nadir = data.variables['lon_nadir'][ind0:ind1]
lon_nadir = numpy.mod(lon_nadir + 180.0, 360) - 180.0
lat_nadir = data.variables['lat_nadir'][ind0:ind1]
vartrue = data.variables['ur_true'][ind0:ind1, :]
varobs = data.variables['ur_obs'][ind0:ind1, :]
if len(lon_nadir) < 700:
print('track too short')
continue
instr = False
if 'instr' in data.variables.keys():
instr = True
varinstr = data.variables['instr'][ind0:ind1, :]
varinstr[numpy.where(abs(varinstr) > 1)] = numpy.nan
uwb = False
if 'uwd' in data.variables.keys():
uwb = True
varuwb = data.variables['uwd'][ind0:ind1, :]
uwbc = False
if 'uwd_est' in data.variables.keys():
uwbc = True
varuwbc = data.variables['uwd_est'][ind0:ind1, :]
varuwbc = varuwbc - varuwb
dsigma = False
if 'dsigma' in data.variables.keys():
dsigma = True
vardsig = data.variables['dsigma'][ind0:ind1, :]
dsigma = False
gsig_atm = False
if 'gsig_atm_err' in data.variables.keys():
gsig_atm = False
vargsig = data.variables['gsig_atm_err'][ind0:ind1, :]
mask = ((abs(vartrue)>100) | (abs(varobs)>100) | numpy.isnan(varinstr))
vartrue[mask] = numpy.nan
varobs[mask] = numpy.nan
beam_angle = list_angle #data['beam_angle'][:]
data.close()
# Remove borders
if numpy.shape(lon)[0]<100:
continue
for i in range(numpy.shape(lon)[1]):
if varobs[:, i].mask.all():
continue
#if varuwbc[:, i].mask.all():
# continue
_ind = numpy.where((wnd[:, i]>3) & (wnd[:, i]<20))
#_ind = numpy.where((sig[:, i]>10**-5) & (wnd[:, i]>3))
try:
_tmp = numpy.nanmean(((varobs[_ind, i] - vartrue[_ind,i])/vartrue[_ind,i])**2)
_tmp = numpy.nanmean(((varobs[_ind, i] - vartrue[_ind,i]))**2)
_tmp2 = numpy.nanmean(abs(vartrue[_ind, i])**2)
except:
continue
if instr is True:
_tmpinstr = numpy.nanmean(abs(varinstr[_ind, i])**2)
if uwb is True:
try:
_tmpuwb = numpy.nanmean(abs(varuwb[_ind, i])**2)
except:
continue
if uwbc is True:
_tmpuwbc = numpy.nanmean(abs(varuwbc[_ind, i])**2)
if dsigma is True:
_tmpdsig = numpy.nanmean(abs(vardsig[_ind, i])**2)
if gsig_atm is True:
_tmpgsig = numpy.nanmean(abs(vargsig[_ind, i])**2)
if beam_angle[i] == 6:
nanstd06norm += _tmp2
nanstd06 += _tmp
if instr is True:
nanmeaninstr06 += _tmpinstr
if uwb is True:
nanmeanuwb06 += _tmpuwb
if uwbc is True:
nanmeanuwbc06 += _tmpuwbc
if dsigma is True:
nanmeandsig06 += _tmpdsig
if gsig_atm is True:
nanmeangsig06 += _tmpgsig
i06 += 1
else:
nanstd12 += _tmp
nanstd12norm += _tmp2
if instr is True:
nanmeaninstr12 += _tmpinstr
if uwb is True:
nanmeanuwb12 += _tmpuwb
if uwbc is True:
nanmeanuwbc12 += _tmpuwbc
if dsigma is True:
nanmeandsig12 += _tmpdsig
if gsig_atm is True:
nanmeangsig12 += _tmpgsig
i12 += 1
if i06 == 0:
i06 = numpy.nan
if i12 == 0:
i12 = numpy.nan
nanstd06 = numpy.sqrt(nanstd06/i06)
nanstd12 = numpy.sqrt(nanstd12/i12)
# nanstd06 = numpy.sqrt(nanstd06/i06)
# nanstd12 = numpy.sqrt(nanstd12/i12)
nanstd06norm = numpy.sqrt(nanstd06norm/i06)
nanstd12norm = numpy.sqrt(nanstd12norm/i12)
if instr is True:
nanmeaninstr06 = numpy.sqrt(nanmeaninstr06/i06)
nanmeaninstr12 = numpy.sqrt(nanmeaninstr12/i12)
if uwb is True:
nanmeanuwb06 = numpy.sqrt(nanmeanuwb06/i06)
nanmeanuwb12 = numpy.sqrt(nanmeanuwb12/i12)
if uwbc is True:
nanmeanuwbc06 = numpy.sqrt(nanmeanuwbc06/i06)
nanmeanuwbc12 = numpy.sqrt(nanmeanuwbc12/i12)
if dsigma is True:
nanmeandsig06 = numpy.sqrt(nanmeandsig06/i06)
nanmeandsig12 = numpy.sqrt(nanmeandsig12/i12)
if gsig_atm is True:
nanmeangsig06 = numpy.sqrt(nanmeangsig06/i06)
nanmeangsig12 = numpy.sqrt(nanmeangsig12/i12)
print('RMSE 06: {}, RMSE 12: {}'.format(nanstd06, nanstd12))
print('NRMSE 06: {}, NRMSE 12: {}'.format(nanstd06norm, nanstd12norm))
if instr is True:
print('instr rms 06: {}, instr rms 12: {}'.format(nanmeaninstr06, nanmeaninstr12))
if uwb is True:
print('uwb rms 06: {}, uwb rms 12: {}'.format(nanmeanuwb06, nanmeanuwb12))
if uwbc is True:
print('uwbc rms 06: {}, uwbc rms 12: {}'.format(nanmeanuwbc06, nanmeanuwbc12))
if dsigma is True:
print('dsigma rms 06: {}, dsigma rms 12: {}'.format(nanmeandsig06, nanmeandsig12))
if gsig_atm is True:
print('gsig rms 06: {}, gsig rms 12: {}'.format(nanmeangsig06, nanmeangsig12))
return nanstd06, nanstd12, nanstd06norm, nanstd12norm
def diag_azimuth_rms(listfile, modelbox, output, list_angle):
nanmean12 = 0
nanmean06 = 0
nanmeaninstr12 = 0
nanmeaninstr06 = 0
nanmeanuwb12 = 0
nanmeanuwb06 = 0
nanmeanuwbc12 = 0
nanmeanuwbc06 = 0
nanstd12 = 0
nanstd06 = 0
nanstd12norm = 0
nanstd06norm = 0
i06 = 0
i12 = 0
for ifile in listfile:
print(ifile)
data = netCDF4.Dataset(ifile, 'r')
lon = data.variables['lon'][:]
lat = data.variables['lat'][:]
lon = numpy.mod(lon + 180.0, 360.0) - 180.0
lon_nadir = data.variables['lon_nadir'][:]
lon_nadir = numpy.mod(lon_nadir + 180.0, 360) - 180.0
lat_nadir = data.variables['lat_nadir'][:]
vartrue = data.variables['ur_true'][:]
varobs = data.variables['ur_obs'][:]
if 'instr' in data.variables.keys():
instr = True
varinstr = data.variables['instr'][:]
if 'uwd' in data.variables.keys():
uwb = True
varuwb = data.variables['uwd'][:]
if 'uwd_est' in data.variables.keys():
uwbc = True
varuwbc = data.variables['uwd_est'][:]
mask = ((abs(vartrue)>100) | (abs(varobs)>100))
#vartrue[mask] = numpy.nan
#varobs[mask] = numpy.nan
beam_angle = list_angle #data['beam_angle'][:]
data.close()
if numpy.shape(lon)[0]<100:
continue
for i in range(numpy.shape(lon)[1]):
if varobs[:, i].mask.all():
continue
if varuwbc[:, i].mask.all():
continue
try:
_tmp = numpy.nanmean(((varobs[:, i] - vartrue[:,i])/vartrue[:,i])**2)
_tmp = numpy.nanmean(((varobs[:, i] - vartrue[:,i]))**2)
_tmp2 = numpy.nanmean(abs(vartrue[:, i])**2)
except:
continue
if instr is True:
_tmpinstr = numpy.nanmean(abs(varinstr[:, i])**2)
if uwb is True:
try:
_tmpuwb = numpy.nanmean(abs(varuwb[:, i])**2)
except:
continue
if uwbc is True:
_tmpuwbc = numpy.nanmean(abs(varuwbc[:, i])**2)
if beam_angle[i] == 6:
nanstd06norm += _tmp/_tmp2
nanstd06 += _tmp
if instr is True:
nanmeaninstr06 += _tmpinstr
if uwb is True:
nanmeanuwb06 += _tmpuwb
if uwbc is True:
nanmeanuwbc06 += _tmpuwbc
i06 += 1
else:
nanstd12 += _tmp
nanstd12norm += _tmp/_tmp2
if instr is True:
nanmeaninstr12 += _tmpinstr
if uwb is True:
nanmeanuwb12 += _tmpuwb
if uwbc is True:
nanmeanuwbc12 += _tmpuwbc
i12 += 1
nanstd06 = numpy.sqrt(nanstd06/i06)
nanstd12 = numpy.sqrt(nanstd12/i12)
# nanstd06 = numpy.sqrt(nanstd06/i06)
# nanstd12 = numpy.sqrt(nanstd12/i12)
if instr is True:
nanmeaninstr06 = numpy.sqrt(nanmeaninstr06/i06)
nanmeaninstr12 = numpy.sqrt(nanmeaninstr12/i12)
if uwb is True:
nanmeanuwb06 = numpy.sqrt(nanmeanuwb06/i06)
nanmeanuwb12 = numpy.sqrt(nanmeanuwb12/i12)
if uwbc is True:
nanmeanuwbc06 = numpy.sqrt(nanmeanuwbc06/i06)
nanmeanuwbc12 = numpy.sqrt(nanmeanuwbc12/i12)
print('RMSE 06: {}, RMSE 12: {}'.format(nanstd06, nanstd12))
print('NRMSE 06: {}, NRMSE 12: {}'.format(nanstd06norm, nanstd12norm))
if instr is True:
print('instr rms 06: {}, instr rms 12: {}'.format(nanmeaninstr06, nanmeaninstr12))
if uwb is True:
print('uwb rms 06: {}, uwb rms 12: {}'.format(nanmeanuwb06, nanmeanuwb12))
if uwbc is True:
print('uwbc rms 06: {}, uwbc rms 12: {}'.format(nanmeanuwbc06, nanmeanuwbc12))
return nanstd06, nanstd12, nanstd06norm, nanstd12norm
def bin_variables(listfile, listvar, bin_in, modelbox):
lonp = numpy.arange(modelbox[0], modelbox[1] + modelbox[2], modelbox[2])
latp = numpy.arange(modelbox[3], modelbox[4] + modelbox[5], modelbox[5])
resol = numpy.sqrt((modelbox[2] * numpy.cos(numpy.deg2rad(latp))) **2
+ modelbox[5]**2)
dic_v = {}
for ifile in listfile:
data = netCDF4.Dataset(ifile, 'r')
lon = data['lon'][:]
lat = data['lat'][:]
for j in range(len(lonp)):
for i in range(len(latp)):
ind_key = 10000 * int(i) + int(j)
lon = numpy.mod(lon + 180, 360) - 180
dist = numpy.sqrt(((lonp[j] - lon)
*numpy.cos(numpy.deg2rad(lat)))**2
+ (latp[i] - lat)**2)
iiobs = numpy.where(dist < resol[i])
if not iiobs:
continue
if ind_key not in dic_v.keys():
dic_v[ind_key] = {}
for ivar in listvar:
try:
var = data[ivar][:]
except: print(ifile, ivar)
_mask = numpy.ma.getmaskarray(var)
mask = _mask[iiobs]
if ivar == 'ur_obs':
ivar2 = 'diff_ur'
var1 = data['ur_true'][:]
var2 = numpy.array(abs(var.data[iiobs] - var1.data[iiobs]))
if var2.any():
if ivar2 not in dic_v[ind_key].keys():
dic_v[ind_key][ivar2] = []
dic_v[ind_key][ivar2].append(var2[~mask])
ivar2 = 'cov_ur'
cov = numpy.cov(var.data[iiobs], var1.data[iiobs])
var2 = (200*cov[0, 1] / (numpy.trace(cov)))
if var2.any():
if ivar2 not in dic_v[ind_key].keys():
dic_v[ind_key][ivar2] = []
dic_v[ind_key][ivar2].append(var2)
if ivar == 'uwd_est':
ivar2 = 'diff_uwdr'
var1 = data['uwd'][:]
var2 = numpy.array(abs(var.data[iiobs] - var1.data[iiobs]))
if var2.any():
if ivar2 not in dic_v[ind_key].keys():
dic_v[ind_key][ivar2] = []
dic_v[ind_key][ivar2].append(var2[~mask])
ivar2 = 'cov_uwdr'
cov = numpy.cov(var.data[iiobs], var1.data[iiobs])
var2 = (200*cov[0, 1] / (numpy.trace(cov)))
if var2.any():
if ivar2 not in dic_v[ind_key].keys():
dic_v[ind_key][ivar2] = []
dic_v[ind_key][ivar2].append(var2)
var = numpy.array(var.data[iiobs])
if var.any():
if ivar not in dic_v[ind_key].keys():
dic_v[ind_key][ivar] = []
dic_v[ind_key][ivar].append(var[~mask])
data.close()
with open(bin_in, 'wb') as f:
pickle.dump(dic_v, f)
def compute_rms(bin_in, bin_out, listvar, modelbox):
with open(bin_in, 'rb') as f:
dic_v = pickle.load(f)
lonp = numpy.arange(modelbox[0], modelbox[1] + modelbox[2], modelbox[2])
latp = numpy.arange(modelbox[3], modelbox[4] + modelbox[5], modelbox[5])
rms = {}
std = {}
snr = {}
cov = {}
rms['lon'] = lonp
rms['lat'] = latp
std['lon'] = lonp
std['lat'] = latp
snr['lon'] = lonp
snr['lat'] = latp
if 'uwd_est' in listvar:
listvar.append('diff_uwdr')
listvar.append('diff_ur')
listvar.append('cov_ur')
listvar.append('cov_uwdr')
for ivar in listvar:
rms[ivar] = numpy.full((len(latp), len(lonp)), numpy.nan)
std[ivar] = numpy.full((len(latp), len(lonp)), numpy.nan)
snr[ivar] = numpy.full((len(latp), len(lonp)), numpy.nan)
cov[ivar] = numpy.full((len(latp), len(lonp)), numpy.nan)
for j in range(len(rms['lon'])):
for i in range(len(rms['lat'])):
ind_key = 10000 * int(i) + int(j)
if ind_key not in dic_v.keys():
continue
if 'ur_true' not in dic_v[ind_key].keys():
continue
if 'uwd' not in dic_v[ind_key].keys():
continue
var1 = numpy.concatenate(dic_v[ind_key]['ur_true']).ravel()
if 'uwd' in dic_v[ind_key].keys():
var2 = numpy.concatenate(dic_v[ind_key]['uwd']).ravel()
if not var1.any() or not var2.any():
continue
rms_true = numpy.sqrt(numpy.nanmean(var1**2))
# import pdb ; pdb.set_trace()
for ivar in listvar:
if ivar not in dic_v[ind_key].keys():
continue
if len(dic_v[ind_key][ivar]) == 0:
continue
print(len(dic_v[ind_key][ivar]))
try:
var = numpy.concatenate(dic_v[ind_key][ivar]).ravel()
except:
continue
rms[ivar][i, j] = numpy.sqrt(numpy.nanmean(var**2))
std[ivar][i, j] = numpy.nanstd(var)
if ivar == 'diff_ur':
snr[ivar][i, j] = rms_true / std[ivar][i, j]
elif ivar == 'instr' or ivar == 'diff_uwdr':
snr[ivar][i, j] = rms_true / std[ivar][i, j]
elif 'cov' in ivar:
cov[ivar][i, j] = numpy.nanmean(var)
with open('rms_{}'.format(bin_out), 'wb') as f:
pickle.dump(rms, f)
with open('std_{}'.format(bin_out), 'wb') as f:
pickle.dump(std, f)
with open('snr_{}'.format(bin_out), 'wb') as f:
pickle.dump(snr, f)
with open('cov_{}'.format(bin_out), 'wb') as f:
pickle.dump(cov, f)
def plot_rms(pfile, list_var, outfile, isrms=True, isstd=True, issnr=True,
isvar=True):
import mod_plot
if isrms is True:
with open('rms_{}'.format(pfile), 'rb') as f:
rms = pickle.load(f)
if isstd is True:
with open('std_{}'.format(pfile), 'rb') as f:
std = pickle.load(f)
if issnr is True:
with open('snr_{}'.format(pfile), 'rb') as f:
snr = pickle.load(f)
if isvar is True:
with open('cov_{}'.format(pfile), 'rb') as f:
cov = pickle.load(f)
if 'uwd_est' in list_var:
list_var.append('diff_uwdr')
list_var.append('diff_ur')
for ivar in list_var:
if ivar in rms.keys() and isrms is True:
lon = rms['lon']
lat = rms['lat']
var = rms[ivar]
_outfile = 'rms_{}_{}.png'.format(ivar, outfile)
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
if ivar in std.keys() and isstd is True:
lon = std['lon']
lat = std['lat']
var = std[ivar]
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
_outfile = 'std_{}_{}.png'.format(ivar, outfile)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
if ivar in snr.keys():
lon = snr['lon']
lat = snr['lat']
var = snr[ivar]
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
_outfile = 'snr_{}_{}.png'.format(ivar, outfile)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
for ivar in ['cov_uwdr', 'cov_ur']:
if ivar in cov.keys():
lon = cov['lon']
lat = cov['lat']
var = cov[ivar]
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
_outfile = 'snr_{}_{}.png'.format(ivar, outfile)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
return None
| 19,253 | 38.374233 | 94 | py |
skimulator | skimulator-master/test/mod_plot.py | import numpy
import netCDF4
import os
import sys
import glob
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import cartopy
import skimulator.rw_data as rw
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
def plot_all(listfile, modelbox, output):
# Initialize plot
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
fig = pyplot.figure(figsize=(12,12))
if modelbox is None:
projection = cartopy.crs.Orthographic(0, 0)
ax = pyplot.axes(projection=projection)
if modelbox is not None:
ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
crs=transform)
norder = 6
else:
ax.set_global()
norder = 1
#ax.add_feature(cartopy.feature.OCEAN, zorder=norder)
#ax.add_feature(cartopy.feature.LAND, zorder=norder, edgecolor='black')
gl = ax.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
# Loop on files
for ifile in listfile:
data = netCDF4.Dataset(ifile, 'r')
lon = data['lon'][:]
lat = data['lat'][:]
lon = numpy.mod(lon + 180.0, 360.0) - 180.0
lon_nadir = data['lon_nadir'][:]
lon_nadir = numpy.mod(lon_nadir + 180.0, 360) - 180.0
lat_nadir = data['lat_nadir'][:]
data.close()
pyplot.plot(lon[:, 0], lat[:, 0], '.', color=listcolor[0], markersize=1,
transform=transform)
pyplot.plot(lon_nadir, lat_nadir, '.', color=listcolor[4], markersize=1,
transform=transform)
# Save figure
pyplot.savefig(output)
return fig
def plot_variable(listfile, nvar, modelbox, output):
# Initialize plot
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
fig = pyplot.figure(figsize=(12,12))
if modelbox is None:
projection = cartopy.crs.Orthographic(0, 0)
ax = pyplot.axes(projection=projection)
if modelbox is not None:
ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
crs=transform)
norder = 6
else:
ax.set_global()
norder = 1
#ax.add_feature(cartopy.feature.OCEAN, zorder=norder)
ax.add_feature(cartopy.feature.LAND, zorder=norder, edgecolor='black')
gl = ax.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
# Loop on files
for ifile in listfile:
data = netCDF4.Dataset(ifile, 'r')
lon = data['lon'][:]
lat = data['lat'][:]
lon = numpy.mod(lon + 180.0, 360.0) - 180.0
lon_nadir = data['lon_nadir'][:]
lon_nadir = numpy.mod(lon_nadir + 180.0, 360) - 180.0
lat_nadir = data['lat_nadir'][:]
var = data[nvar][:]
radial_angle = data['radial_angle'][:]
data.close()
for i in range(numpy.shape(lon)[1]):
if var[:, i].mask.all():
continue
try:
pyplot.scatter(lon[:, i], lat[:, i], c=var[:, i], cmap='jet',
vmin=-1, vmax=1, s=1,
#color=listcolor[0], markersize=1,
transform=transform)
except:
continue
pyplot.plot(lon_nadir, lat_nadir, '.', color=listcolor[4], markersize=1,
transform=transform)
try:
pyplot.colorbar()
except:
pass
# Save figure
# pyplot.savefig(output)
return fig
def plot_vectors(listfile, nvar, modelbox, output, scale=20):
# Initialize plot
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
fig = pyplot.figure(figsize=(12,12))
if modelbox is None:
projection = cartopy.crs.Orthographic(0, 0)
ax = pyplot.axes(projection=projection)
if modelbox is not None:
ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
crs=transform)
norder = 6
else:
ax.set_global()
norder = 1
#ax.add_feature(cartopy.feature.OCEAN, zorder=norder)
ax.add_feature(cartopy.feature.LAND, zorder=norder, edgecolor='black')
gl = ax.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
# Loop on files
for ifile in listfile:
data = netCDF4.Dataset(ifile, 'r')
lon = data['lon'][:]
lat = data['lat'][:]
lon = numpy.mod(lon + 180.0, 360.0) - 180.0
lon_nadir = data['lon_nadir'][:]
lon_nadir = numpy.mod(lon_nadir + 180.0, 360) - 180.0
lat_nadir = data['lat_nadir'][:]
var = data[nvar][:]
radial_angle = data['radial_angle'][:]
u_true = data['ucur'][:]
v_true = data['vcur'][:]
uvar = var * numpy.cos(radial_angle)
vvar = var * numpy.sin(radial_angle)
data.close()
for i in range(numpy.shape(lon)[1]):
pyplot.quiver(lon[:, i], lat[:, i], u_true[:, i], v_true[:, i],
transform=transform, color='green', scale=scale)
pyplot.quiver(lon[:, i], lat[:, i], uvar[:, i], vvar[:, i],
transform=transform, color='red', scale=scale)
pyplot.plot(lon_nadir, lat_nadir, '.', color=listcolor[4], markersize=1,
transform=transform)
# Save figure
pyplot.savefig(output)
def plot_diag(lon, lat, var, outfile, vmin=None, vmax=None, cmap='jet'):
lon = numpy.mod(lon + 180.0, 360.0) - 180.0
if vmin is None:
vmin = numpy.nanpercentile(var, 1)
if vmax is None:
vmax = numpy.nanpercentile(var, 99)
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
fig = pyplot.figure(figsize=(12,12))
modelbox = [numpy.min(lon), numpy.max(lon), numpy.min(lat), numpy.max(lat)]
ax = pyplot.axes(projection=projection)
if modelbox is not None:
ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
crs=transform)
norder = 6
else:
ax.set_global()
norder = 1
#ax.add_feature(cartopy.feature.OCEAN, zorder=norder)
ax.add_feature(cartopy.feature.LAND, zorder=norder, edgecolor='black')
gl = ax.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
c = ax.pcolormesh(lon, lat, var, vmin=vmin, vmax=vmax, cmap=cmap)
pyplot.colorbar(c)
pyplot.savefig(outfile)
return None
| 6,884 | 35.428571 | 80 | py |
skimulator | skimulator-master/test/plot_grid.py | import mod_plot
import params as p
import numpy
import glob
import os
# Initialize variables
indatadir = p.outdatadir
config = p.config
modelbox = p.modelbox
#modelbox = [-5, 5, 75, 85]
#modelbox = [-65, 55, 40, 45]
modelbox[0] = numpy.mod(modelbox[0] + 180.0, 360.0) - 180.0
modelbox[1] = numpy.mod(modelbox[1] + 180.0, 360.0) - 180.0
filesgrid = os.path.join(indatadir, '{}_grid'.format(config))
listfile = glob.glob('{}*.nc'.format(filesgrid))
output = os.path.join(indatadir, 'Grid_{}.png'.format(config))
# Plot all Grids:
mod_plot.plot_all(listfile, modelbox, output)
print('Plot saved in {}'.format(output))
# Plot first 6 passes"
listfile = sorted(listfile)
output = os.path.join(indatadir, 'Grid_6{}.png'.format(config))
mod_plot.plot_all(listfile[:6], modelbox, output)
print('Plot saved in {}'.format(output))
| 828 | 26.633333 | 64 | py |
skimulator | skimulator-master/test/diag_l2b.py | import numpy
import mod_diag
import glob
import os
import matplotlib
import json
import sys
if len(sys.argv) < 1:
print('Provide json file for diagnostics')
sys.exit(1)
file_param = sys.argv[1]
with open(file_param, 'r') as f:
params = json.load(f)
modelbox = params['l2b']['modelbox']
config = params['l2b']['config']
indatadir = params['l2b']['indatadir']
outdatadir = params['l2b']['outdatadir']
list_angle = params['l2b']['list_angle']
# indatadir = params['/tmp/key/data/skim_eq_output/{}'.format(config)
filesgrid = os.path.join(indatadir, '{}_grid'.format(config))
listfile = glob.glob('{}*.nc'.format(filesgrid))
files = os.path.join(indatadir, '{}_c01'.format(config))
listfiles = glob.glob('{}*.nc'.format(files))
listfiles = sorted(listfiles)
output = os.path.join(outdatadir, 'Grid_{}.png'.format(config))
output2 = os.path.join(outdatadir, 'c_{}.png'.format(config))
print(config)
# listfiles.remove('/tmp/key/data/skim_eq_output/WW3_EQ_metop_2018_8a_c01_p022.nc')
mod_diag.diag_rms(listfiles[:], modelbox, output, list_angle)
listvar = ['ur_true', 'instr', 'ur_obs', 'uwd', 'uwd_est']
listvar = ['ur_true', 'instr', 'ur_obs', 'uwd'] #, 'uwd_est']
modelbox2 = params['l2b']['modelbox_bin']
bin_file = os.path.join(outdatadir, '{}.pyo'.format(config))
#mod_diag.bin_variables(listfiles[:], listvar, bin_file, modelbox2)
bin_file2 = '{}.pyo'.format(config)
#mod_diag.compute_rms(bin_file, bin_file2, listvar, modelbox2)
#mod_diag.plot_rms(bin_file2, listvar, config)
| 1,499 | 32.333333 | 83 | py |
skimulator | skimulator-master/test/diags_l2d.py | import numpy
from matplotlib import pyplot
import netCDF4
import glob
import os
import scipy.signal
from scipy.fftpack import fft
def cpsd1d(hh1=None, hh2=None, dx=1.,tap=0.05, detrend=True):
hh1 = hh1 - numpy.mean(hh1)
hh2 = hh2 - numpy.mean(hh2)
nx = numpy.shape(hh1)[0]
if detrend:
hh1 = scipy.signal.detrend(hh1)
hh2 = scipy.signal.detrend(hh2)
if tap>0:
ntaper = numpy.int(tap * nx + 0.5)
taper = numpy.zeros(nx)+1.
taper[:ntaper] = numpy.cos(numpy.arange(ntaper) / (ntaper-1.)
*numpy.pi/2 + 3*numpy.pi/2)
taper[-ntaper:] = numpy.cos(-numpy.arange(-ntaper+1, 1)/(ntaper-1.)
*numpy.pi/2 + 3*numpy.pi/2)
hh1 = hh1 * taper
hh2 = hh2 * taper
ss1 = fft(hh1)
ss2 = fft(hh2)
ff = numpy.arange(1, nx/2 - 1) / (nx*dx)
C = numpy.cos(numpy.angle(ss1[1: int(nx/2)-1])
- numpy.angle(ss2[1: int(nx/2)-1]))
return ff, C
def coherency_l2d(datadir_input, config, var, nal_min,
posting, basename, outfile='coherency'):
list_files_all = []
pyplot.figure()
for indir, iconfig, ivar in zip(datadir_input, config, var):
print(indir, iconfig, ivar)
list_files = glob.glob(os.path.join(indir, '{}_l2d_*.nc'.format(iconfig)))
fid = netCDF4.Dataset(list_files[0], 'r')
varx = 'ux_{}'.format(var)
vary = 'uy_{}'.format(var)
_tmp = numpy.array(fid.variables[varx][:])
_tmp[_tmp < -10] = numpy.nan
fid.close()
nac = _tmp.shape[1]
nal = nac
dal = posting #km
MCuac = []
countuac = 0
MCual = []
countual = 0
for ifile in list_files:
ref = {}
skim = {}
fid = netCDF4.Dataset(ifile, 'r')
skim[varx] = numpy.array(fid.variables[varx][:])
skim[varx][ref['ux_obs']<-10] = numpy.nan
skim[varx] = numpy.ma.masked_invalid(ref[varx])
ref['ux_true'] = numpy.array(fid.variables['ux_true'][:])
ref['ux_true'][ref['ux_true']<-10] = numpy.nan
ref['ux_true'] = numpy.ma.masked_invalid(ref['ux_true'])
skim[vary] = numpy.array(fid.variables[vary][:])
skim[vary][skim[vary]<-10] = numpy.nan
skim[vary] = numpy.ma.masked_invalid(skim[vary])
ref['uy_true'] = numpy.array(fid.variables['uy_true'][:])
ref['uy_true'][ref['uy_true']<-10] = numpy.nan
ref['uy_true'] = numpy.ma.masked_invalid(ref['uy_true'])
fid.close()
if numpy.shape(skim[varx])[1] < nac:
continue
idate = ifile[-18:-9]
print(idate)
#mfile = '{}{}{}.nc'.format(basename[0], idate, basename[1])
#fidm = netCDF4.Dataset(mfile, 'r')
#ref['ucur'] = numpy.array(fid.variables['ucur'][:])
for i in range(nac):
checknanref = + ref['ux_true'][:, i]
checknanobs = + skim[varx][:, i]
merged_mask = (checknanref.mask | checknanobs.mask)
indok = numpy.where(~merged_mask)[0]
_ind = numpy.where(indok[1:] - indok[:-1] > 1)
ensind = numpy.split(indok, numpy.cumsum(_ind) + 1)
for chunk in range(len(ensind)):
if len(ensind[chunk]) > nal_min:
s1 = ref['ux_true'][ensind[chunk][:nal_min], i]
s2 = skim[varx][ensind[chunk][:nal_min], i]
ffac, C = cpsd1d(hh1=s1, hh2=s2, dx=dal,
tap=0.5, detrend=True)
countuac += 1
try:
MCuac += C
except:
MCuac = +C
s1 = ref['uy_true'][ensind[chunk][:nal_min], i]
s2 = skim[vary][ensind[chunk][:nal_min], i]
try:
ffal, C = cpsd1d(hh1=s1, hh2=s2, dx=dal,
tap=0.5, detrend=True)
countual += 1
except:
continue
try:
MCual += C
except:
MCual = +C
MCuac = numpy.array(MCuac) / countuac
MCual = numpy.array(MCual) / countual
pyplot.semilogx(ffac, MCuac, label='{} {}'.format(ivar, iconfig)) # ,c='k')
#pyplot.semilogx(ffal, MCual, label='along {} {}'.format(ivar, iconfig)) # ,c='k')
pyplot.grid()
pyplot.legend()
pyplot.xlabel('/cy/km')
pyplot.ylabel('coherency')
pyplot.savefig('coherency_{}.png'.format(outfile))
def rms_l2d(datadir_input, config):
datadir_output = './'
glob_files = os.path.join(datadir_input, '{}_l2d_*.nc'.format(config))
list_files = glob.glob(glob_files)
ref = {}
skim = {}
fid = netCDF4.Dataset(list_files[0], 'r')
_tmp = numpy.array(fid.variables['ux_true'][:])
#_tmp = numpy.array(fid.variables['u_ac_true'][:])
_tmp[_tmp < -10] = numpy.nan
fid.close()
nac = _tmp.shape[1]
nal = nac
std_uac = numpy.zeros(nac)
std_ual = numpy.zeros(nal)
ntot_ac = numpy.zeros(nac)
ntot_al = numpy.zeros(nal)
std_uacm = numpy.zeros(nac)
std_ualm = numpy.zeros(nal)
ntot_acm = numpy.zeros(nac)
ntot_alm = numpy.zeros(nal)
for filev in list_files:
fid = netCDF4.Dataset(filev, 'r')
ipath = int(filev[-6:-3])
if ipath > 400:
continue
# if ipath%2==0:
# continue
ref['uac'] = numpy.array(fid.variables['uy_true'][:])
ref['uac'][ref['uac'] < -10] = numpy.nan
ref['ual'] = numpy.array(fid.variables['ux_true'][:])
ref['ual'][ref['ual'] < -10] = numpy.nan
skim['uacm'] = numpy.array(fid.variables['uy_noerr'][:])
skim['uacm'][skim['uacm'] < -10] = numpy.nan
skim['ualm'] = numpy.array(fid.variables['ux_noerr'][:])
skim['ualm'][skim['ualm'] < -10] = numpy.nan
skim['uac'] = numpy.array(fid.variables['uy_obs'][:])
skim['uac'][skim['uac'] < -10] = numpy.nan
skim['ual'] = numpy.array(fid.variables['ux_obs'][:])
skim['ual'][skim['ual'] < -10] = numpy.nan
nuac = ref['uac'].shape[1]
fid.close()
if nuac < 61:
continue
for i in range(nuac):
it_ac = len(numpy.where(numpy.isnan(skim['uac'][:, i]) == False)[0])
if it_ac >= 62:
std_uac[i] += numpy.nanstd(skim['uac'][:, i]
- ref['uac'][:, i])*it_ac
ntot_ac[i] += it_ac
it_al = len(numpy.where(numpy.isnan(skim['ual'][:, i]) == False)[0])
if it_al >= 62:
std_ual[i] += numpy.nanstd(skim['ual'][:, i]
- ref['ual'][:, i])*it_al
ntot_al[i] += it_al
it_acm = len(numpy.where(numpy.isnan(skim['uacm'][:, i]) == False)[0])
if it_acm >= 62:
std_uacm[i] += numpy.nanstd(skim['uacm'][:, i]
- ref['uac'][:, i])*it_acm
ntot_acm[i] += it_acm
it_alm = len(numpy.where(numpy.isnan(skim['ualm'][:, i]) == False)[0])
if it_alm >= 62:
std_ualm[i] += numpy.nanstd(skim['ualm'][:, i]
- ref['ual'][:, i])*it_alm
ntot_alm[i] += it_alm
std_uac = std_uac/ntot_ac
std_ual = std_ual/ntot_al
std_uacm = std_uacm/ntot_acm
std_ualm = std_ualm/ntot_alm
f, (ax1, ax2) = pyplot.subplots(1, 2, sharey= True, figsize=(12,5 ))
xac = numpy.arange(-(nac - 1) * p.posting/2, (nac + 1)* p.posting/2, p.posting)
_ind = numpy.where(numpy.abs(xac)>40)
print(config, 'uac', numpy.nanmean(std_uac[_ind]))
print(config, 'ual', numpy.nanmean(std_ual[_ind]))
print(config, 'uacm', numpy.nanmean(std_uacm[_ind]))
print(config, 'ualm', numpy.nanmean(std_ualm[_ind]))
ax1.plot(xac, std_uac, 'r', label='eastward')
ax1.plot(xac, std_ual, 'b', label='northward')
ax1.set_title('Observation {}'.format(config))
ax1.set_ylim([0.00, 0.18])
ax1.legend()
ax2.plot(xac, std_uacm, 'r', label='eastward')
ax2.plot(xac, std_ualm, 'b', label='northward')
ax2.set_title('Error-free {}'.format(config))
ax2.legend()
pyplot.savefig('std_{}.png'.format(config))
if '__main__' == __name__:
import params as p
# rms_l2c(p.outdatadir, p.config)
length_al = 200
#coherency_l2c((p.outdatadir, p.outdatadir), (p.config, p.config),
# ('obs','model'), length_al,
# p.posting, outfile='{}_obs_model'.format(p.config))
list_config = ('WW3_AT_metop_2018_8a', 'WW3_AT_metop_2018_8b',
'WW3_AT_metop_2018_8c', 'WW3_AT_metop_2018_6a')
list_config = ('WW3_EQ_metop_2018_8a', 'WW3_EQ_metop_2018_8b',
'WW3_EQ_metop_2018_8c', 'WW3_EQ_metop_2018_6a')
#list_config = ('WW3_FR_metop_2018_8a', 'WW3_FR_metop_2018_8b',
# 'WW3_FR_metop_2018_8c', 'WW3_FR_metop_2018_6a')
list_dir = []
for iconfig in list_config:
outdatadir = os.path.join('/tmp/key/data/skim_at_output', iconfig)
outdatadir = os.path.join('/tmp/key/data/skim_eq_output', iconfig)
# outdatadir = os.path.join('/tmp/key/data/skim_fr_output', iconfig)
list_dir.append(outdatadir)
rms_l2c(outdatadir, iconfig)
coherency_l2c((outdatadir, outdatadir), (iconfig, iconfig),
('obs','model'), length_al,
p.posting, outfile='{}_obs_model'.format(iconfig))
list_var = ('obs', 'obs', 'obs', 'obs') #, 'obs')
nal_min = 200
print(list_dir, list_config)
coherency_l2c(list_dir, list_config, list_var, nal_min,
p.posting, outfile=list_config[0][:-8])
| 10,114 | 38.666667 | 90 | py |
skimulator | skimulator-master/test/diags_l2c.py | import numpy
from matplotlib import pyplot
import netCDF4
import glob
import os
import sys
import json
import scipy.signal
from scipy.fftpack import fft
def cpsd1d(hh1=None, hh2=None, dx=1.,tap=0.05, detrend=True):
hh1 = hh1 - numpy.mean(hh1)
hh2 = hh2 - numpy.mean(hh2)
nx = numpy.shape(hh1)[0]
if detrend:
hh1 = scipy.signal.detrend(hh1)
hh2 = scipy.signal.detrend(hh2)
if tap>0:
ntaper = numpy.int(tap * nx + 0.5)
taper = numpy.zeros(nx)+1.
taper[:ntaper] = numpy.cos(numpy.arange(ntaper) / (ntaper-1.)
*numpy.pi/2 + 3*numpy.pi/2)
taper[-ntaper:] = numpy.cos(-numpy.arange(-ntaper+1, 1)/(ntaper-1.)
*numpy.pi/2 + 3*numpy.pi/2)
hh1 = hh1 * taper
hh2 = hh2 * taper
ss1 = fft(hh1)
ss2 = fft(hh2)
ff = numpy.arange(1, nx/2 - 1) / (nx*dx)
C = numpy.cos(numpy.angle(ss1[1: int(nx/2)-1])
- numpy.angle(ss2[1: int(nx/2)-1]))
return ff, C
def coherency_l2c(datadir_input, config, var, nal_min,
posting, outfile='coherency', fsize=1):
list_files_all = []
pyplot.figure()
for indir, iconfig, ivar in zip(datadir_input, config, var):
print(indir, iconfig, ivar)
list_files = glob.glob(os.path.join(indir, '{}_l2c_c*p*.nc'.format(iconfig)))
fid = netCDF4.Dataset(list_files[0], 'r')
try:
_tmp = numpy.array(fid.variables['u_ac_true'][:])
except:
continue
_tmp[_tmp < -10] = numpy.nan
fid.close()
nac = _tmp.shape[1]
nac_mid = int(nac/2)
nal = nac
dal = posting #km
MCuac = []
countuac = 0
MCual = []
countual = 0
for ifile in list_files:
ref = {}
skim = {}
fid = netCDF4.Dataset(ifile, 'r')
try:
ref['uac'] = numpy.array(fid.variables['u_ac_true'][:])
ref['uac'] = scipy.ndimage.uniform_filter(ref['uac'], size=fisze)
except:
print(ifile)
continue
ref['uac'][ref['uac']<-10] = numpy.nan
ref['uac'] = numpy.ma.masked_invalid(ref['uac'])
ref['ual'] = numpy.array(fid.variables['u_al_true'][:])
ref['ual'] = scipy.ndimage.uniform_filter(ref['ual'], size=fsize)
ref['ual'][ref['ual']<-10] = numpy.nan
ref['ual'] = numpy.ma.masked_invalid(ref['ual'])
skim['uac'] = numpy.array(fid.variables['u_ac_{}'.format(ivar)][:])
skim['uac'][numpy.abs(skim['uac']) > 10] = numpy.nan
skim['uac'] = numpy.ma.masked_invalid(skim['uac'])
skim['ual'] = numpy.array(fid.variables['u_al_{}'.format(ivar)][:])
skim['ual'][numpy.abs(skim['ual']) > 10] = numpy.nan
skim['ual'] = numpy.ma.masked_invalid(skim['ual'])
fid.close()
if numpy.shape(skim['ual'])[1] < nac:
continue
for i in range(3, nac-3):
checknanref = + ref['uac'][:, i]
checknanobs = + skim['uac'][:, i]
merged_mask = (checknanref.mask | checknanobs.mask)
indok = numpy.where(~merged_mask)[0]
_ind = numpy.where(indok[1:] - indok[:-1] > 1)
ensind = numpy.split(indok, numpy.cumsum(_ind) + 1)
for chunk in range(len(ensind)):
if len(ensind[chunk]) > nal_min:
s1 = ref['uac'][ensind[chunk][:nal_min], i]
s2 = skim['uac'][ensind[chunk][:nal_min], i]
ffac, C = cpsd1d(hh1=s1, hh2=s2, dx=dal,
tap=0.5, detrend=True)
countuac += 1
try:
MCuac += C
except:
MCuac = +C
s1 = ref['ual'][ensind[chunk][:nal_min], i]
s2 = skim['ual'][ensind[chunk][:nal_min], i]
try:
ffal, C = cpsd1d(hh1=s1, hh2=s2, dx=dal,
tap=0.5, detrend=True)
countual += 1
except:
continue
try:
MCual += C
except:
MCual = +C
MCuac = numpy.array(MCuac) / countuac
MCual = numpy.array(MCual) / countual
pyplot.semilogx(ffac, MCuac, label='{} {}'.format(ivar, iconfig)) # ,c='k')
#pyplot.semilogx(ffal, MCual, label='along {} {}'.format(ivar, iconfig)) # ,c='k')
pyplot.grid()
pyplot.legend()
pyplot.xlabel('/cy/km')
pyplot.ylabel('coherency')
pyplot.savefig('{}.png'.format(outfile))
def rms_l2c(datadir_input, config, output, threshold=0.1, fsize=1):
datadir_output = output
glob_files = os.path.join(datadir_input, '{}_l2c_c01*p*.nc'.format(config))
list_files = glob.glob(glob_files)
ref = {}
skim = {}
print(datadir_input, config)
fid = netCDF4.Dataset(list_files[5], 'r')
try:
_tmp = numpy.array(fid.variables['u_ac_true'][:])
except:
print(list_files[0])
#_tmp = numpy.array(fid.variables['u_ac_true'][:])
_tmp[_tmp < -10] = numpy.nan
fid.close()
nac = _tmp.shape[1]
nal = nac
std_uac = numpy.zeros(nac)
std_ual = numpy.zeros(nal)
ntot_ac = numpy.zeros(nac)
ntot_al = numpy.zeros(nal)
std_uacm = numpy.zeros(nac)
std_ualm = numpy.zeros(nal)
ntot_acm = numpy.zeros(nac)
ntot_alm = numpy.zeros(nal)
print(nac)
list_key = [#'u_al_wd', 'u_ac_wd',# 'u_al_wdrem', 'u_ac_wdrem',
'u_ac_instr', 'u_al_instr',# 'u_ac_dsigma', 'u_al_dsigma',
'u_ac_true', 'u_al_true']
list_fkey = ['uwnd', 'vwnd'] #, 'rain', 'mssu', 'mssc']
std_err = {}
ntot_err = {}
for ikey in list_key:
std_err[ikey] = numpy.zeros(nac)
ntot_err[ikey] = numpy.zeros(nac)
for filev in list_files:
fid = netCDF4.Dataset(filev, 'r')
ipath = int(filev[-6:-3])
#if ipath > 400:
# continue
# if ipath%2==0:
# continue
try:
ref['uac'] = numpy.array(fid.variables['u_ac_true'][:])
ref['uac'] = scipy.ndimage.uniform_filter(ref['uac'], size=fsize)
except:
print(filev)
continue
ref['uac'][numpy.abs(ref['uac']) > 10] = numpy.nan
ref['ual'] = numpy.array(fid.variables['u_al_true'][:])
ref['ual'] = scipy.ndimage.uniform_filter(ref['ual'], size=fsize)
ref['ual'][numpy.abs(ref['ual']) > 10] = numpy.nan
skim['uacm'] = numpy.array(fid.variables['u_ac_noerr'][:])
skim['uacm'][numpy.abs(skim['uacm']) > 10] = numpy.nan
skim['ualm'] = numpy.array(fid.variables['u_al_noerr'][:])
skim['ualm'][numpy.abs(skim['ualm']) > 10] = numpy.nan
skim['uac'] = numpy.array(fid.variables['u_ac_obs'][:])
skim['uac'][numpy.abs(skim['uac']) > 10] = numpy.nan
skim['ual'] = numpy.array(fid.variables['u_al_obs'][:])
skim['ual'][numpy.abs(skim['ual']) > 10] = numpy.nan
for ikey in list_fkey:
skim[ikey] = numpy.array(fid.variables[ikey][:])
wnd = numpy.sqrt(skim['uwnd']**2 + skim['vwnd']**2)
for ikey in list_key:
skim[ikey] = numpy.array(fid.variables[ikey][:])
skim[ikey][numpy.abs(skim[ikey]) > 100] = numpy.nan
skim['uac'][numpy.isnan(skim[ikey])] = numpy.nan
skim['ual'][numpy.isnan(skim[ikey])] = numpy.nan
skim[ikey][numpy.where((wnd < 3) & (wnd < 3))] = numpy.nan
# skim[ikey][numpy.where((skim['rain'] > 0.1))] = numpy.nan
skim['uac'][numpy.where((wnd < 3) & (wnd < 3))] = numpy.nan
skim['ual'][numpy.where((wnd <3) & (wnd < 3))] = numpy.nan
#skim['uac'][numpy.where((skim['rain'] > 0.1))] = numpy.nan
#skim['ual'][numpy.where((skim['rain'] > 0.1))] = numpy.nan
#_indwdal = numpy.where(abs(skim['u_al_wdrem']) > 1)
#_indwdac = numpy.where(abs(skim['u_ac_wdrem']) > 1)
#skim['uac'][_indwdac] = numpy.nan
#skim['ual'][_indwdal] = numpy.nan
#skim['u_ac_wdrem'][_indwdac] = numpy.nan
#skim['u_al_wdrem'][_indwdal] = numpy.nan
# skim['ual'][numpy.isnan(skim[ikey])] = numpy.nan
nuac = ref['uac'].shape[1]
fid.close()
if nuac < 61:
continue
ind0 = 50
ind1 = -50
# skim['ual'] = skim['ualm'] + skim['u_al_instr'] + skim['u_al_dsigma'] + skim['u_al_wdrem']
# skim['uac'] = skim['uacm'] + skim['u_ac_instr'] + skim['u_ac_dsigma'] + skim['u_ac_wdrem']
for i in range(nuac):
it_ac = len(numpy.where(numpy.isnan(skim['uac'][ind0:ind1, i]) == False)[0])
if it_ac >= 100:
_std = numpy.nanstd(skim['uac'][ind0:ind1, i] - ref['uac'][ind0:ind1, i])*it_ac
if numpy.isfinite(_std):
std_uac[i] += _std
ntot_ac[i] += it_ac
else:
std_uac[i] += 0
ntot_ac[i] += 0
it_al = len(numpy.where(numpy.isnan(skim['ual'][ind0:ind1, i]) == False)[0])
if it_al >= 100:
_std = numpy.nanstd(skim['ual'][ind0:ind1, i]
- ref['ual'][ind0:ind1, i])*it_al
if numpy.isfinite(_std):
std_ual[i] += _std
ntot_al[i] += it_al
else:
std_ual[i] += 0
ntot_al[i] += 0
it_acm = len(numpy.where(numpy.isnan(skim['uacm'][ind0:ind1, i]) == False)[0])
if it_acm >= 100:
_std = numpy.nanstd(skim['uacm'][ind0:ind1, i]
- ref['uac'][ind0:ind1, i])*it_acm
if numpy.isfinite(_std):
std_uacm[i] += _std
ntot_acm[i] += it_acm
else:
std_uacm[i] += 0
ntot_acm[i] += 0
it_alm = len(numpy.where(numpy.isnan(skim['ualm'][ind0:ind1, i]) == False)[0])
if it_alm >= 100:
_std = numpy.nanstd(skim['ualm'][ind0:ind1, i]
- ref['ual'][ind0:ind1, i])*it_alm
if numpy.isfinite(_std):
std_ualm[i] += _std
ntot_alm[i] += it_alm
else:
std_ualm[i] += 0
ntot_alm[i] += 0
for ikey in list_key:
it_alm = len(numpy.where(numpy.isnan(skim[ikey][ind0:ind1, i]) == False)[0])
if it_alm >= 100:
_std = numpy.nanstd(skim[ikey][ind0:ind1, i]) * it_alm
if numpy.isfinite(_std):
std_err[ikey][i] += _std
ntot_err[ikey][i] += it_alm
else:
std_err[ikey][i] += 0
ntot_err[ikey][i] += 0
std_uac = std_uac/ntot_ac
std_ual = std_ual/ntot_al
std_uacm = std_uacm/ntot_acm
std_ualm = std_ualm/ntot_alm
f, (ax1, ax2, ax3) = pyplot.subplots(1, 3, sharey= True, figsize=(20,5 ))
xac = numpy.arange(-(nac - 1) * posting/2, (nac + 1)* posting/2, posting)
indx = numpy.where(abs(xac)<= 40)
#std_uac[indx] = numpy.nan
#std_ual[indx] = numpy.nan
print(xac[numpy.where(std_uac > threshold)])
print(xac[numpy.where(std_ual > threshold)])
_ind = numpy.where((numpy.abs(xac)>50) & (numpy.abs(xac)<400))
print(config, 'uac', numpy.nanmean(std_uac[_ind]))
print(config, 'ual', numpy.nanmean(std_ual[_ind]))
print(config, 'uacm', numpy.nanmean(std_uacm[_ind]))
print(config, 'ualm', numpy.nanmean(std_ualm[_ind]))
for ikey in list_key:
std_err[ikey] = std_err[ikey] / ntot_err[ikey]
print(config, ikey, numpy.nanmean(std_err[ikey][_ind]))
ax1.plot(xac, std_uac, 'r', label='across track')
ax1.plot(xac, std_ual, 'b', label='along track')
ax1.axhline(y=0.15, color="0.5")
ax1.set_title('Observation {}'.format(config))
ax1.set_ylim([0.00, 0.4])
ax1.legend()
ax2.plot(xac, std_uacm, 'r', label='across track')
ax2.plot(xac, std_ualm, 'b', label='along track')
ax2.axhline(y=0.15, color="0.5")
ax2.set_title('Error-free {}'.format(config))
ax2.legend()
ax3.plot(xac, std_uacm, 'r', label='across track regridding')
ax3.plot(xac, std_ualm, 'b', label='along track regridding')
for ikey in list_key:
std_err[ikey][indx] = numpy.nan
ax3.plot(xac, std_err[ikey], label=ikey)
ax3.axhline(y=0.15, color="0.5")
ax3.set_title('Error decomposition {}'.format(config))
ax3.legend()
pyplot.savefig('{}.pdf'.format(datadir_output))
def bin_variables(listfile, listvar, bin_in, modelbox):
lonp = numpy.arange(modelbox[0], modelbox[1] + modelbox[2], modelbox[2])
latp = numpy.arange(modelbox[3], modelbox[4] + modelbox[5], modelbox[5])
resol = numpy.sqrt((modelbox[2] * numpy.cos(numpy.deg2rad(latp))) **2
+ modelbox[5]**2)
dic_v = {}
for ifile in listfile:
print(ifile)
data = netCDF4.Dataset(ifile, 'r')
lon = data['lon'][:]
lat = data['lat'][:]
for j in range(len(lonp)):
for i in range(len(latp)):
ind_key = 10000 * int(i) + int(j)
lon = numpy.mod(lon + 180, 360) - 180
dist = numpy.sqrt(((lonp[j] - lon)
*numpy.cos(numpy.deg2rad(lat)))**2
+ (latp[i] - lat)**2)
iiobs = numpy.where(dist < resol[i])
if iiobs[0].shape == (0, ):
continue
if ind_key not in dic_v.keys():
dic_v[ind_key] = {}
for ivar in listvar:
ivaro = '{}_obs'.format(ivar)
ivart = '{}_noerr'.format(ivar)
ivarr = '{}_true'.format(ivar)
idvaro = '{}_diff_obs'.format(ivar)
idvart = '{}_diff_noerr'.format(ivar)
if ivar == 'norm':
varo = numpy.sqrt(data['u_ac_obs'][iiobs]**2
+ data['u_al_obs'][iiobs]**2)
vart = numpy.sqrt(data['u_ac_noerr'][iiobs]**2
+ data['u_al_noerr'][iiobs]**2)
varr = numpy.sqrt(data['u_ac_true'][iiobs]**2
+ data['u_al_true'][iiobs]**2)
else:
try:
varo = numpy.ma.array(data[ivaro][iiobs])
except: print(ifile, ivar, iiobs)
vart = data[ivart][iiobs]
varr = data[ivarr][iiobs]
mask = varo.mask
dvaro = numpy.array(abs(varo.data - varr.data))
dvart = numpy.array(abs(vart.data - varr.data))
if varo.any():
if ivaro not in dic_v[ind_key].keys():
dic_v[ind_key][ivaro] = []
if ivart not in dic_v[ind_key].keys():
dic_v[ind_key][ivart] = []
if ivarr not in dic_v[ind_key].keys():
dic_v[ind_key][ivarr] = []
if idvaro not in dic_v[ind_key].keys():
dic_v[ind_key][idvaro] = []
if idvart not in dic_v[ind_key].keys():
dic_v[ind_key][idvart] = []
dic_v[ind_key][ivaro].append(varo[~mask])
dic_v[ind_key][ivart].append(vart[~mask])
dic_v[ind_key][ivarr].append(varr[~mask])
dic_v[ind_key][idvart].append(dvart[~mask])
dic_v[ind_key][idvaro].append(dvaro[~mask])
dic_v[ind_key][ivart].append(varo[~mask])
dic_v[ind_key][ivart].append(varo[~mask])
dic_v[ind_key][ivart].append(varo[~mask])
data.close()
with open(bin_in, 'wb') as f:
pickle.dump(dic_v, f)
def compute_rms(bin_in, bin_out, listvar, modelbox):
with open(bin_in, 'rb') as f:
dic_v = pickle.load(f)
lonp = numpy.arange(modelbox[0], modelbox[1] + modelbox[2], modelbox[2])
latp = numpy.arange(modelbox[3], modelbox[4] + modelbox[5], modelbox[5])
rms = {}
std = {}
snr = {}
cov = {}
rms['lon'] = lonp
rms['lat'] = latp
std['lon'] = lonp
std['lat'] = latp
snr['lon'] = lonp
snr['lat'] = latp
for ivar in listvar:
_list = ['_obs', '_noerr', '_true', '_diff_obs', 'diff_noerr']
for ivar2 in _list:
ivar3 = '{}{}'.format(ivar, ivar2)
rms[ivar3] = numpy.full((len(latp), len(lonp)), numpy.nan)
std[ivar3] = numpy.full((len(latp), len(lonp)), numpy.nan)
snr[ivar3] = numpy.full((len(latp), len(lonp)), numpy.nan)
for j in range(len(rms['lon'])):
for i in range(len(rms['lat'])):
ind_key = 10000 * int(i) + int(j)
if ind_key not in dic_v.keys():
continue
# import pdb ; pdb.set_trace()
for ivar in listvar:
ivaro = '{}_obs'.format(ivar)
ivart = '{}_noerr'.format(ivar)
ivarr = '{}_true'.format(ivar)
idvaro = '{}_diff_obs'.format(ivar)
idvart = '{}_diff_noerr'.format(ivar)
if ivaro not in dic_v[ind_key].keys():
continue
varo = numpy.concatenate(dic_v[ind_key][ivar]).ravel()
vart = numpy.concatenate(dic_v[ind_key][ivar]).ravel()
varr = numpy.concatenate(dic_v[ind_key][ivar]).ravel()
rms_true = numpy.sqrt(numpy.nanmean(varr**2))
rms[ivaro][i, j] = numpy.sqrt(numpy.nanmean(varo**2))
std[ivaro][i, j] = numpy.nanstd(varo)
snr[ivaro][i, j] = rms_true / std[idvaro][i, j]
rms[ivart][i, j] = numpy.sqrt(numpy.nanmean(vart**2))
std[ivart][i, j] = numpy.nanstd(vart)
snr[ivart][i, j] = rms_true / std[idvart][i, j]
rms[ivarr][i, j] = numpy.sqrt(numpy.nanmean(varr**2))
std[ivarr][i, j] = numpy.nanstd(varr)
with open('rms_{}'.format(bin_out), 'wb') as f:
pickle.dump(rms, f)
with open('std_{}'.format(bin_out), 'wb') as f:
pickle.dump(std, f)
with open('snr_{}'.format(bin_out), 'wb') as f:
pickle.dump(snr, f)
def plot_rms(pfile, list_var, outfile, isrms=True, isstd=True, issnr=True,
isvar=True):
import mod_plot
if isrms is True:
with open('rms_{}'.format(pfile), 'rb') as f:
rms = pickle.load(f)
if isstd is True:
with open('std_{}'.format(pfile), 'rb') as f:
std = pickle.load(f)
if issnr is True:
with open('snr_{}'.format(pfile), 'rb') as f:
snr = pickle.load(f)
if isvar is True:
with open('cov_{}'.format(pfile), 'rb') as f:
cov = pickle.load(f)
list_var = rms.keys()
for ivar in list_var:
if ivar in rms.keys() and isrms is True:
lon = rms['lon']
lat = rms['lat']
var = rms[ivar]
_outfile = 'rms_{}_{}.png'.format(ivar, outfile)
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
if ivar in std.keys() and isstd is True:
lon = std['lon']
lat = std['lat']
var = std[ivar]
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
_outfile = 'std_{}_{}.png'.format(ivar, outfile)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
if ivar in snr.keys():
lon = snr['lon']
lat = snr['lat']
var = snr[ivar]
vmin = numpy.nanpercentile(var, 5)
vmax = numpy.nanpercentile(var, 95)
_outfile = 'snr_{}_{}.png'.format(ivar, outfile)
mod_plot.plot_diag(lon, lat, var, _outfile, vmin=vmin, vmax=vmax, cmap='jet')
return None
if '__main__' == __name__:
if len(sys.argv) < 1:
print('Provide json file for diagnostics')
sys.exit(1)
file_param = sys.argv[1]
with open(file_param, 'r') as f:
params = json.load(f)
# rms_l2c(p.outdatadir, p.config)
pl2c = params['l2c']
length_al = pl2c['alongtrack_length']
list_config = pl2c['list_config']
list_dir = pl2c['indatadir']
posting = pl2c['posting']
outdir = pl2c['outdatadir']
list_filter_size = pl2c['filter_size']
for i, iconfig in enumerate(list_config):
filter_size = list_filter_size[i]
indatadir = list_dir[i]
# print(indatadir, iconfig)
outfile = os.path.join(outdir, 'std_{}'.format(iconfig))
rms_l2c(indatadir, iconfig, outfile, threshold=0.15, fsize=filter_size)
outfile = os.path.join(outdir, 'coherency_{}_obs_model'.format(iconfig))
#coherency_l2c((indatadir, indatadir), (iconfig, iconfig),
# ('obs','noerr'), length_al,
# posting, outfile=outfile, fsize=filter_size)
list_var = ('obs', 'obs', 'obs', 'obs') #, 'obs')
nal_min = length_al
# print(list_dir, list_config)
outfile = os.path.join(outdir, 'coherency_obs_{}'.format(list_config[0][:-8]))
#coherency_l2c(list_dir, list_config, list_var, nal_min,
# posting, outfile=outfile, fsize=filter_size)
list_var = ('noerr', 'noerr', 'noerr', 'noerr') #, 'obs')
print(list_dir, list_config)
outfile = os.path.join(outdir, 'coherency_noerr_{}'.format(list_config[0][:-8]))
#coherency_l2c(list_dir, list_config, list_var, nal_min,
# posting, outfile=outfile, fsize=filter_size)
listvar = ['u_ac', 'u_al', 'ux', 'uy', 'norm']
modelbox2 = params['l2b']['modelbox_bin']
for i, iconfig in enumerate(list_config):
filter_size = list_filter_size[i]
indatadir = list_dir[i]
print(indatadir, iconfig)
files = os.path.join(indatadir, '{}_l2c_c01'.format(iconfig))
listfiles = glob.glob('{}*.nc'.format(files))
bin_file = os.path.join(outdir, '{}_l2c.pyo'.format(iconfig))
# bin_variables(listfiles[:10], listvar, bin_file, modelbox2)
# bin_file2 = '{}_l2c.pyo'.format(iconfig)
# compute_rms(bin_file, bin_file2, listvar, modelbox2)
# plot_rms(bin_file2, listvar, iconfig)
| 23,170 | 41.282847 | 101 | py |
skimulator | skimulator-master/example/params_example_8beams.py | # -----------------------#
# Files and directories
# -----------------------#
## -- Get the user home directory
from os.path import expanduser
import os
import math
home = expanduser("~")
# ------ Name of the configuration (to build output files names)
# 8 beams, 45 azimuths, 1024 pulses and cycle length of 37 ms
config = "WW3_23W_metop_2018_8a"
# 8 beams, 72 azimuths, 0512 pulses and cycle length of 18 ms
# config = "WW3_23W_metop_2018_8b"
# 8 beams, 108 azimuths, 1024 pulses and cycle length of 37 ms
# config = "WW3_23W_metop_2018_8c"
# ------ Directory that contains orbit file:
dir_setup = os.path.join(home, 'skimulator', 'data')
# ------ Directory that contains your own inputs:
indatadir = os.path.join(home, 'skimulator', 'example', 'input_fields')
# ------ Directory that contains your outputs:
outdatadir = os.path.join(home, 'skimulator', 'example', 'skim_output')
# ------ Orbit file:
#filesat = os.path.join(dir_setup,'orbs1a.txt')
filesat = os.path.join(dir_setup,'orbmetop_skim.txt')
# ------ Number of days in orbit (optional if specified in orbit file)
satcycle = 29
# ------ Satellite elevation (optional if specified in orbit file)
sat_elev = 817 * 10**3
# ------ Order of columns (lon, lat, time) in orbit file
# (default is (0, 1, 2) with order_orbit_col = None)
order_orbit_col = None
# , dir_setup+os.sep+'orbjason.txt', dir_setup+os.sep+'orbaltika.txt' ]
# ------ Number of processor for parallelisation
proc_number = 1
# ------ Deactivate printing of progress bar to avoid huge log
progress_bar = True
# -----------------------#
# SKIM swath parameters
# -----------------------#
# ------ Satellite grid file root name:
# (Final file name is root_name_[numberofpass].nc)
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
# ------ Force the computation of the satellite grid:
makesgrid = True
# ------ Give a subdomain if only part of the model is needed:
# (modelbox=[lon_min, lon_max, lat_min, lat_max])
# (If modelbox is None, the whole domain of the model is considered)
modelbox = None #[329.,347., -8.,8.]
#------- Rotation speed of the antenna (in tr/min)
if '2018_8a' in config:
rotation_speed = 4.52
if '2018_8b' in config:
rotation_speed = 5.66
if '2018_8c' in config:
rotation_speed = 1.89
# Cycle duration
cycle = 0.0368
if '2018_8b' in config:
cycle = 0.0368 / 2.
# cycle = 0.0368 / 2. # for 512 pulses configuration
#------- List of position of beams:
list_pos = (0, 72*math.pi/180., 144*math.pi/180., 216*math.pi / 180.,
288*math.pi/180., 0, math.pi)
#------- List of angle of beams in degrees:
list_angle = (12, 12, 12, 12, 12, 6, 6)
#------- List of timeshift as regard to nadir for 12 degree beams:
list_shift = (1, 2, 4, 5, 7, 3, 6)
# ------ Shift longitude of the orbit file if no pass is in the domain
# (in degree): Default value is None (no shift)
shift_lon = 0
# ------ Shift time of the satellite pass (in day):
# Default value is None (no shift)
shift_time = None
# -----------------------#
# Model input parameters
# -----------------------#
# ------ List of model files:
# (The first file contains the grid and is not considered as model data)
# To generate the noise alone, file_input=None and specify region
# in modelbox
file_input = os.path.join(indatadir, 'list_of_file.txt')
# ------ Type of model data:
# (Optional, default is NETCDF_MODEL and reads netcdf3 and netcdf4 files)
# (Other option is WW3)
model = 'WW3'
# ------ First time of the model
first_time = '2011-11-15T00:00:00Z'
# ------ Grid file name
file_grid_model = (os.path.join(indatadir, 'ww3.20111115_cur.nc'),)
# ------ Specify if there is a ice mask for high latitudes
# (if true, mask is recomputed at each cycle)
ice_mask = False
# ------ Type of grid:
# 'regular' or 'irregular', if 'regular' only 1d coordinates
# are extracted from model
grid = 'regular'
# ------ Specify list of variable:
list_input_var = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0],
'uuss': ['uuss', 'uss', 0], 'vuss': ['vuss', 'uss', 0],
'ice': ['ice', 'ice', 0], 'mssd': ['mssd', 'msd', 0],
'mssx': ['mssu', 'mss', 0], 'mssy':['mssc', 'mss', 0],
'ssh': ['wlv', 'wlv', 0], 'hs':['hs', 'hs', 0],
'uwnd': ['uwnd', 'wnd', 0], 'vwnd': ['vwnd', 'wnd', 0]}
# ------ Specify longitude variable:
lon = ('longitude',)
# ------ Specify latitude variable:
lat = ('latitude',)
# ------ Specify number of time in file:
dim_time = 24
# ------ Time step between two model outputs (in days):
timestep = 1/24.
# ------ Number of outputs to consider:
# (timestep*nstep=total number of days)
nstep = 35*24
# ------ Not a number value:
model_nan = -32767.
# -----------------------#
# SKIM output files
# -----------------------#
# ------ Output file root name:
# (Final file name is root_name_c[cycle].nc
file_output = os.path.join(outdatadir, config)
# ------ Interpolation of the SSH from the model (if grid is irregular and
# pyresample is not installed:
# (either 'linear' or 'nearest', use 'nearest' for large region
# as it is faster and use less memory.)
interpolation = 'linear'
# -----------------------#
# SKIM error parameters
# -----------------------#
# List of errors to compute
noise = ['Altimeter', 'Instrument', 'Attitude', 'DSigma', 'Rain',
'WaveDoppler']
# Number of seed for automatic computation
nseed = 0
# Length of repeat for altimetric noise
len_repeat = 20000
# Todo compute automatically with cycle, number of beams and satellite velocity
delta_al = 2
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
# ------ Number of random realisations for instrumental and geophysical error
# (recommended ncomp=2000), ncomp1d is used for 1D spectrum, and ncomp2d
# is used for 2D spectrum (wet troposphere computation):
ncomp1d = 3000
ncomp2d = 2000
# ------- Choice of instrument configuration
instr_configuration = 'A'
# ------- Coefficient SNR to retrieve instrumental noise from sigma,
# Recommanded value for 1024 pulses: 3e-2, for 512 pulses: 3sqrt(2)e-3
snr_coeff = 6e-3
if '2018_8b' in config:
snr_coeff = 1.4142*6e-3
# ------- File which provide the AOCS error:
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
## -- Geophysical error
## ----------------------
# ------ Consider ice in sigma0 computation
ice = True
# ------ Rain file containing scenarii (python file):
rain_file = os.path.join(dir_setup, 'rain_eq_atl.pyo')
# ------ Threshold to flag data:
rain_threshold = 0.15
wet_tropo = False
# -----------------------#
# L2C computation
# -----------------------#
# config name for L2d:
config_l2c = ''
# Length resolution to select neighbors (in km):
resol = 40
# Grid resolution for l2c (alongtrack, acrosstrack) grid (in km):
posting = 5
# Remove noisy data around nadir (in km):
ac_threshold = 20
# List of variables to be interpolated on the swath:
list_input_var_l2c = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
# -----------------------#
# L2D computation
# -----------------------#
# config name for L2d:
config_l2d = ''
# Length resolution to select neighbors (multiplication factor):
resol_spatial_l2d = 1
# Temporal resolution to select neighbors (multiplication factor):
resol_temporal_l2d = 1
# Grid resolution for l2d (lat, lon) grid (in degrees):
posting_l2d = (0.1, 0.1)
# Time domain: (start_time, end_time, dtime) in days:
time_domain = (7, 23, 1)
# Spatial domain (lon_min, lon_max, lat_min, lat_max):
spatial_domain = [0, 360, -90, 90]
# List of variables to be interpolated on the grid:
list_input_var_l2d = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
| 7,726 | 36.692683 | 80 | py |
skimulator | skimulator-master/example/params_example_6beams.py | # -----------------------#
# Files and directories
# -----------------------#
## -- Get the user home directory
from os.path import expanduser
import os
import math
home = expanduser("~")
# ------ Name of the configuration (to build output files names)
#config="WW3_EQ_metop_2018_6a"
# 6 beams, 60 azimuths, 512 pulses and cycle length of 37/2 ms
#config="WW3_EQ_metop_2019_6a"
config="WW3_EQ_metop_2019_6b"
config="WW3_EQ_metop_2019_6c"
# 6 beams, ?? azimuths, 1024 pulses and cycle length of 37 ms
#config = "WW3_EQ_metop_2019_6b"
# ------ Directory that contains orbit file:
dir_setup = os.path.join(home, 'skimulator', 'data')
# ------ Directory that contains your own inputs:
indatadir = os.path.join(home, 'skimulator', 'example', 'input_fields')
# ------ Directory that contains your outputs:
outdatadir = os.path.join(home, 'skimulator', 'example', 'skim_output')
# ------ Orbit file:
#filesat = os.path.join(dir_setup,'orbs1a.txt')
filesat = os.path.join(dir_setup,'orbmetop_skim.txt')
# ------ Number of days in orbit (optional if specified in orbit file)
satcycle = 29
# ------ Satellite elevation (optional if specified in orbit file)
sat_elev = 817 * 10**3
# ------ Order of columns (lon, lat, time) in orbit file
# (default is (0, 1, 2) with order_orbit_col = None)
order_orbit_col = None
# , dir_setup+os.sep+'orbjason.txt', dir_setup+os.sep+'orbaltika.txt' ]
# ------ Number of processor for parallelisation
proc_number = 1
# ------ Deactivate printing of progress bar to avoid huge log
progress_bar = True
# -----------------------#
# SKIM swath parameters
# -----------------------#
# ------ Satellite grid file root name:
# (Final file name is root_name_[numberofpass].nc)
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
# ------ Force the computation of the satellite grid:
makesgrid = True
# ------ Give a subdomain if only part of the model is needed:
# (modelbox=[lon_min, lon_max, lat_min, lat_max])
# (If modelbox is None, the whole domain of the model is considered)
modelbox = None # [329.,347., -8.,8.]
#------- Rotation speed of the antenna (in tr/min)
if '2019_6a' in config:
rotation_speed = 6.0
elif '2019_6b' in config:
rotation_speed = 6
elif '2019_6c' in config:
rotation_speed = 6
else:
rotation_speed = 9.26
# ------ Cycle duration
cycle = 0.0368
#------- List of position of beams:
if '2019_6a' in config:
list_pos = (0, 90 * math.pi/180., 180*math.pi/180, 270*math.pi/180, 0)
elif '2019_6b' in config:
list_pos = (0, 120 * math.pi/180, 240 * math.pi/180, 0, 180 * math.pi / 180)
elif '2019_6c' in config:
list_pos = (0, 120 * math.pi/180, 240 * math.pi/180, 0, 180 * math.pi / 180)
else:
list_pos = (0, 120*math.pi/180., 240*math.pi/180.,
0, math.pi)
#------- List of angle of beams in degrees:
if '2019_6a' in config:
list_angle = (12, 12, 12, 12, 6)
else:
list_angle = (12, 12, 12, 6, 6)
#------- List of timeshift as regard to nadir for 12 degree beams:
if '2019_6a' in config:
list_shift = (1, 2, 5, 3, 4)
elif '2019_6b' in config:
list_shift = (1, 5, 2, 4, 3)
elif '2019_6c' in config:
list_shift = (1, 4, 2, 5, 3)
else:
list_shift = (5, 2, 3, 1, 4)
# ------ Shift longitude of the orbit file if no pass is in the domain
# (in degree): Default value is None (no shift)
shift_lon = 0
# ------ Shift time of the satellite pass (in day):
# Default value is None (no shift)
shift_time = None
# -----------------------#
# Model input parameters
# -----------------------#
# ------ List of model files:
# (The first file contains the grid and is not considered as model data)
# To generate the noise alone, file_input=None and specify region
# in modelbox
file_input = os.path.join(indatadir, 'list_of_file.txt')
# ------ Type of model data:
# (Optional, default is NETCDF_MODEL and reads netcdf3 and netcdf4 files)
# (Other option is WW3)
model = 'WW3'
# ------ First time of the model
first_time = '2011-11-15T00:00:00Z'
# ------ Grid file name
file_grid_model = (os.path.join(indatadir, 'ww3.20111115_cur.nc'),)
# ------ Specify if there is a ice mask for high latitudes
# (if true, mask is recomputed at each cycle)
ice_mask = False
# ------ Type of grid:
# 'regular' or 'irregular', if 'regular' only 1d coordinates
# are extracted from model
grid = 'regular'
# ------ Specify list of variable:
list_input_var = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0],
'uuss': ['uuss', 'uss', 0], 'vuss': ['vuss', 'uss', 0],
'ice': ['ice', 'ice', 0], 'mssd': ['mssd', 'msd', 0],
'mssx': ['mssu', 'mss', 0], 'mssy':['mssc', 'mss', 0],
'ssh': ['wlv', 'wlv', 0], 'hs':['hs', 'hs', 0],
'uwnd': ['uwnd', 'wnd', 0], 'vwnd': ['vwnd', 'wnd', 0]}
# ------ Specify longitude variable:
lon = ('longitude',)
# ------ Specify latitude variable:
lat = ('latitude',)
# ------ Specify number of time in file:
dim_time = 24
# ------ Time step between two model outputs (in days):
timestep = 1/24.
# ------ Number of outputs to consider:
# (timestep*nstep=total number of days)
nstep = 35*24
# ------ Not a number value:
model_nan = -32767.
# -----------------------#
# SKIM output files
# -----------------------#
# ------ Output file root name:
# (Final file name is root_name_c[cycle].nc
file_output = os.path.join(outdatadir, config)
# ------ Interpolation of the SSH from the model (if grid is irregular and
# pyresample is not installed:
# (either 'linear' or 'nearest', use 'nearest' for large region
# as it is faster and use less memory.)
interpolation = 'linear'
# -----------------------#
# SKIM error parameters
# -----------------------#
# List of errors to compute
noise = ['Altimeter', 'Instrument', 'Attitude', 'DSigma', 'Rain',
'WaveDoppler']
# Number of seed for automatic computation
nseed = 0
# Length of repeat for altimetric noise
len_repeat = 20000
# Todo compute automatically with cycle, number of beams and satellite velocity
delta_al = 2
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
# ------ Number of random realisations for instrumental and geophysical error
# (recommended ncomp=2000), ncomp1d is used for 1D spectrum, and ncomp2d
# is used for 2D spectrum (wet troposphere computation):
ncomp1d = 3000
ncomp2d = 2000
# ------- Choice of instrument configuration
instr_configuration = 'A'
# ------- Coefficient SNR to retrieve instrumental noise from sigma,
# Recommanded value for 1024 pulses: 3e-2, for 512 pulses: 3sqrt(2)e-3
snr_coeff = 1.4142*6e-3
# ------- File which provide the AOCS error:
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
## -- Geophysical error
## ----------------------
# ------ Consider ice in sigma0 computation
ice = True
# ------ Rain file containing scenarii (python file):
rain_file = os.path.join(dir_setup, 'rain_eq_atl.pyo')
# ------ Threshold to flag data:
rain_threshold = 0.15
# -----------------------#
# L2C computation
# -----------------------#
# config name for L2d:
config_l2c = ''
# Length resolution to select neighbors (in km):
resol = 40
# Grid resolution for l2c (alongtrack, acrosstrack) grid (in km):
posting = 5
# Remove noisy data around nadir (in km):
ac_threshold = 20
# List of variables to be interpolated on the swath:
list_input_var_l2c = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
# -----------------------#
# L2D computation
# -----------------------#
# config name for L2d:
config_l2d = ''
# Length resolution to select neighbors (multiplication factor):
resol_spatial_l2d = 1
# Temporal resolution to select neighbors (multiplication factor):
resol_temporal_l2d = 1
# Grid resolution for l2d (lat, lon) grid (in degrees):
posting_l2d = (0.1, 0.1)
# Time domain: (start_time, end_time, dtime) in days:
time_domain = (7, 23, 1)
# Spatial domain (lon_min, lon_max, lat_min, lat_max):
spatial_domain = [0, 360, -90, 90]
# List of variables to be interpolated on the grid:
list_input_var_l2d = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
| 8,115 | 36.229358 | 80 | py |
skimulator | skimulator-master/example/params_example_8beams_ogcm.py | # -----------------------#
# Files and directories
# -----------------------#
## -- Get the user home directory
from os.path import expanduser
import os
import math
home = expanduser("~")
# ------ Name of the configuration (to build output files names)
# 8 beams, 45 azimuths, 1024 pulses and cycle length of 37 ms
config = "OGCM_metop_2018_8a"
# 8 beams, 72 azimuths, 0512 pulses and cycle length of 18 ms
# config = "WW3_23W_metop_2018_8b"
# 8 beams, 108 azimuths, 1024 pulses and cycle length of 37 ms
# config = "WW3_23W_metop_2018_8c"
# ------ Directory that contains orbit file:
dir_setup = os.path.join(home, 'skimulator', 'data')
# ------ Directory that contains your own inputs:
indatadir = os.path.join(home, 'skimulator', 'example', 'input_fields')
# ------ Directory that contains your outputs:
outdatadir = os.path.join(home, 'skimulator', 'example', 'skim_output')
# ------ Orbit file:
#filesat = os.path.join(dir_setup,'orbs1a.txt')
filesat = os.path.join(dir_setup,'orbmetop_skim.txt')
# ------ Number of days in orbit (optional if specified in orbit file)
satcycle = 29
# ------ Satellite elevation (optional if specified in orbit file)
sat_elev = 817 * 10**3
# ------ Order of columns (lon, lat, time) in orbit file
# (default is (0, 1, 2) with order_orbit_col = None)
order_orbit_col = None
# , dir_setup+os.sep+'orbjason.txt', dir_setup+os.sep+'orbaltika.txt' ]
# ------ Number of processor for parallelisation
proc_number = 1
# ------ Deactivate printing of progress bar to avoid huge log
progress_bar = True
# -----------------------#
# SKIM swath parameters
# -----------------------#
# ------ Satellite grid file root name:
# (Final file name is root_name_[numberofpass].nc)
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
# ------ Force the computation of the satellite grid:
makesgrid = True
# ------ Give a subdomain if only part of the model is needed:
# (modelbox=[lon_min, lon_max, lat_min, lat_max])
# (If modelbox is None, the whole domain of the model is considered)
modelbox = [300, 310, 30, 40]
#------- Rotation speed of the antenna (in tr/min)
if '2018_8a' in config:
rotation_speed = 4.52
if '2018_8b' in config:
rotation_speed = 5.66
if '2018_8c' in config:
rotation_speed = 1.89
# Cycle duration
cycle = 0.0368
if '2018_8b' in config:
cycle = 0.0368 / 2.
# cycle = 0.0368 / 2. # for 512 pulses configuration
#------- List of position of beams:
list_pos = (0, 72*math.pi/180., 144*math.pi/180., 216*math.pi / 180.,
288*math.pi/180., 0, math.pi)
#------- List of angle of beams in degrees:
list_angle = (12, 12, 12, 12, 12, 6, 6)
#------- List of timeshift as regard to nadir for 12 degree beams:
list_shift = (1, 2, 4, 5, 7, 3, 6)
# ------ Shift longitude of the orbit file if no pass is in the domain
# (in degree): Default value is None (no shift)
shift_lon = 0
# ------ Shift time of the satellite pass (in day):
# Default value is None (no shift)
shift_time = None
# -----------------------#
# Model input parameters
# -----------------------#
# ------ List of model files:
# (The first file contains the grid and is not considered as model data)
# To generate the noise alone, file_input=None and specify region
# in modelbox
file_input = os.path.join(indatadir, 'list_of_file.txt')
# ------ Type of model data:
# (Optional, default is NETCDF_MODEL and reads netcdf3 and netcdf4 files)
# (Other option is WW3)
model = 'NETCDF_MODEL'
# ------ First time of the model
first_time = '2011-11-15T00:00:00Z'
# ------ Grid file name
file_grid_model = (os.path.join(indatadir, 'model.20111115_grid.nc'),)
# ------ Specify if there is a ice mask for high latitudes
# (if true, mask is recomputed at each cycle)
ice_mask = False
# ------ Type of grid:
# 'regular' or 'irregular', if 'regular' only 1d coordinates
# are extracted from model
grid = 'regular'
# ------ Specify list of variable:
list_input_var = {'ucur': ['uo', '', 0], 'vcur': ['vo', '', 0],
'ssh': ['sla', '', 0]
}
# ------ Specify longitude variable:
lon = ('longitude', )
# ------ Specify latitude variable:
lat = ('latitude', )
# ------ Specify number of time in file:
dim_time = 1
# ------ Time step between two model outputs (in days):
timestep = 1
# ------ Number of outputs to consider:
# (timestep*nstep=total number of days)
nstep = 20
# ------ Not a number value:
model_nan = -32767.
# -----------------------#
# SKIM output files
# -----------------------#
# ------ Output file root name:
# (Final file name is root_name_c[cycle].nc
file_output = os.path.join(outdatadir, config)
# ------ Interpolation of the SSH from the model (if grid is irregular and
# pyresample is not installed:
# (either 'linear' or 'nearest', use 'nearest' for large region
# as it is faster and use less memory.)
interpolation = 'linear'
# -----------------------#
# SKIM error parameters
# -----------------------#
# List of errors to compute
noise = ['Altimeter', 'Instrument', 'Attitude', 'DSigma', 'Rain',
'WaveDoppler']
# Number of seed for automatic computation
nseed = 0
# Length of repeat for altimetric noise
len_repeat = 20000
# Todo compute automatically with cycle, number of beams and satellite velocity
delta_al = 2
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
# ------ Number of random realisations for instrumental and geophysical error
# (recommended ncomp=2000), ncomp1d is used for 1D spectrum, and ncomp2d
# is used for 2D spectrum (wet troposphere computation):
ncomp1d = 3000
ncomp2d = 2000
# ------- Choice of instrument configuration
instr_configuration = 'A'
# ------- Coefficient SNR to retrieve instrumental noise from sigma,
# Recommanded value for 1024 pulses: 3e-2, for 512 pulses: 3sqrt(2)e-3
snr_coeff = 6e-3
if '2018_8b' in config:
snr_coeff = 1.4142*6e-3
# ------- File which provide the AOCS error:
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
## -- Geophysical error
## ----------------------
# ------ Consider ice in sigma0 computation
ice = True
# ------ Rain file containing scenarii (python file):
rain_file = os.path.join(dir_setup, 'rain_eq_atl.pyo')
# ------ Threshold to flag data:
rain_threshold = 0.15
wet_tropo = False
# -----------------------#
# L2C computation
# -----------------------#
# config name for L2d:
config_l2c = ''
# Length resolution to select neighbors (in km):
resol = 40
# Grid resolution for l2c (alongtrack, acrosstrack) grid (in km):
posting = 5
# Remove noisy data around nadir (in km):
ac_threshold = 20
# List of variables to be interpolated on the swath:
list_input_var_l2c = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
# -----------------------#
# L2D computation
# -----------------------#
# config name for L2d:
config_l2d = ''
# Length resolution to select neighbors (multiplication factor):
resol_spatial_l2d = 1
# Temporal resolution to select neighbors (multiplication factor):
resol_temporal_l2d = 1
# Grid resolution for l2d (lat, lon) grid (in degrees):
posting_l2d = (0.1, 0.1)
# Time domain: (start_time, end_time, dtime) in days:
time_domain = (7, 23, 1)
# Spatial domain (lon_min, lon_max, lat_min, lat_max):
spatial_domain = [0, 360, -90, 90]
# List of variables to be interpolated on the grid:
list_input_var_l2d = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
| 7,411 | 35.875622 | 80 | py |
skimulator | skimulator-master/example/params_stream.py | # -----------------------#
# Files and directories
# -----------------------#
## -- Get the user home directory
from os.path import expanduser
import os
import math
home = expanduser("~") + '/src/'
# ------ Name of the configuration (to build output files names)
# 8 beams, 45 azimuths, 1024 pulses and cycle length of 37 ms
rp = 35
config = f'WW3_STREAM_{rp}'
# 8 beams, 72 azimuths, 0512 pulses and cycle length of 18 ms
# config = "WW3_23W_metop_2018_8b"
# 8 beams, 108 azimuths, 1024 pulses and cycle length of 37 ms
# config = "WW3_23W_metop_2018_8c"
# ------ Directory that contains orbit file:
dir_setup = os.path.join(home, 'skimsimulator', 'data')
dir_setup = '/mnt/data/data_skim/'
# ------ Directory that contains your own inputs:
indatadir = os.path.join(home, 'skimsimulator', 'example', 'input_fields')
# ------ Directory that contains your outputs:
outdatadir = os.path.join(home, 'skimsimulator', 'example', 'skim_output')
outdatadir = '/mnt/data/skim_output'
# ------ Orbit file:
#filesat = os.path.join(dir_setup,'orbs1a.txt')
filesat = os.path.join(dir_setup,'orbmetop_skim.txt')
# ------ Number of days in orbit (optional if specified in orbit file)
satcycle = 29
# ------ Satellite elevation (optional if specified in orbit file)
sat_elev = 817 * 10**3
# ------ Order of columns (lon, lat, time) in orbit file
# (default is (0, 1, 2) with order_orbit_col = None)
order_orbit_col = None
# , dir_setup+os.sep+'orbjason.txt', dir_setup+os.sep+'orbaltika.txt' ]
# ------ Number of processor for parallelisation
proc_number = 1
# ------ Deactivate printing of progress bar to avoid huge log
progress_bar = True
# -----------------------#
# SKIM swath parameters
# -----------------------#
# ------ Satellite grid file root name:
# (Final file name is root_name_[numberofpass].nc)
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
# ------ Force the computation of the satellite grid:
makesgrid = True
# ------ Give a subdomain if only part of the model is needed:
# (modelbox=[lon_min, lon_max, lat_min, lat_max])
# (If modelbox is None, the whole domain of the model is considered)
modelbox = [300, 359, 20, 50] #None #[329.,347., -8.,8.]
#------- Rotation speed of the antenna (in tr/min)
rotation_speed = rp
# Cycle duration
# PRF 12KHz, 64 pulses / cycle
cycle = 64/12000
# cycle = 0.0368 / 2. # for 512 pulses configuration
#------- List of position of beams:
list_pos = (0,)
#------- List of angle of beams in degrees:
list_angle = (35,)
#------- List of timeshift as regard to nadir for 12 degree beams:
list_shift = (1,)
# ------ Shift longitude of the orbit file if no pass is in the domain
# (in degree): Default value is None (no shift)
shift_lon = 0
# ------ Shift time of the satellite pass (in day):
# Default value is None (no shift)
shift_time = None
# -----------------------#
# Model input parameters
# -----------------------#
# ------ List of model files:
# (The first file contains the grid and is not considered as model data)
# To generate the noise alone, file_input=None and specify region
# in modelbox
file_input = os.path.join(indatadir, 'list_of_file.txt')
# ------ Type of model data:
# (Optional, default is NETCDF_MODEL and reads netcdf3 and netcdf4 files)
# (Other option is WW3)
model = 'WW3'
# ------ First time of the model
first_time = '2011-11-15T00:00:00Z'
# ------ Grid file name
file_grid_model = (os.path.join(indatadir, 'ww3.20111115_hs.nc'),
os.path.join(indatadir, 'ww3.20111115_cur12.nc'),
os.path.join(indatadir, 'ww3.20111115_rain.nc'))
# ------ Specify if there is a ice mask for high latitudes
# (if true, mask is recomputed at each cycle)
ice_mask = False
# ------ Type of grid:
# 'regular' or 'irregular', if 'regular' only 1d coordinates
# are extracted from model
grid = 'regular'
# ------ Specify list of variable:
list_input_var = {'ucur': ['ucur', 'cur12', 1], 'vcur': ['vcur', 'cur12', 1],
'uuss': ['uuss', 'uss', 0], 'vuss': ['vuss', 'uss', 0],
'ice': ['ice', 'ice', 0], 'mssd': ['mssd', 'msd', 0],
'mssx': ['mssu', 'mss', 0], 'mssy':['mssc', 'mss', 0],
'ssh': ['wlv', 'wlv', 0], 'hs':['hs', 'hs', 0],
'uwnd': ['uwnd', 'wnd', 0], 'vwnd': ['vwnd', 'wnd', 0]}
# ------ Specify longitude variable:
lon = ('longitude', 'longitude', 'lon')
# ------ Specify latitude variable:
lat = ('latitude', 'latitude', 'lat')
# ------ Specify number of time in file:
dim_time = 24
# ------ Time step between two model outputs (in days):
timestep = 1/24.
# ------ Number of outputs to consider:
# (timestep*nstep=total number of days)
nstep = 35*24
# ------ Not a number value:
model_nan = 0.
# -----------------------#
# SKIM output files
# -----------------------#
# ------ Output file root name:
# (Final file name is root_name_c[cycle].nc
file_output = os.path.join(outdatadir, config)
# ------ Interpolation of the SSH from the model (if grid is irregular and
# pyresample is not installed:
# (either 'linear' or 'nearest', use 'nearest' for large region
# as it is faster and use less memory.)
interpolation = 'linear'
# -----------------------#
# SKIM error parameters
# -----------------------#
# List of errors to compute
noise = ['Altimeter', 'Instrument', 'Attitude', 'DSigma', 'Rain',
'WaveDoppler']
# Number of seed for automatic computation
nseed = 0
# Length of repeat for altimetric noise
len_repeat = 20000
# Todo compute automatically with cycle, number of beams and satellite velocity
delta_al = 2
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
# ------ Number of random realisations for instrumental and geophysical error
# (recommended ncomp=2000), ncomp1d is used for 1D spectrum, and ncomp2d
# is used for 2D spectrum (wet troposphere computation):
ncomp1d = 3000
ncomp2d = 2000
# ------- Choice of instrument configuration
instr_configuration = 'A'
# ------- Coefficient SNR to retrieve instrumental noise from sigma,
# Recommanded value for 1024 pulses: 3e-2, for 512 pulses: 3sqrt(2)e-3
snr_coeff = 6e-3
if '2018_8b' in config:
snr_coeff = 1.4142*6e-3
# ------- File which provide the AOCS error:
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
## -- Geophysical error
## ----------------------
# ------ Consider ice in sigma0 computation
ice = True
# ------ Rain file containing scenarii (python file):
rain_file = None #os.path.join(dir_setup, 'rain_eq_atl.pyo')
# ------ Threshold to flag data:
rain_threshold = 0.15
wet_tropo = False
# -----------------------#
# L2C computation
# -----------------------#
# config name for L2d:
config_l2c = 'l40'
# Length resolution to select neighbors (in km):
resol = 40
# Grid resolution for l2c (alongtrack, acrosstrack) grid (in km):
posting = 5
# Remove noisy data around nadir (in km):
ac_threshold = 20
# List of variables to be interpolated on the swath:
list_input_var_l2c = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
# -----------------------#
# L2D computation
# -----------------------#
# config name for L2d:
config_l2d = ''
# Length resolution to select neighbors (multiplication factor):
resol_spatial_l2d = 1
# Temporal resolution to select neighbors (multiplication factor):
resol_temporal_l2d = 1
# Grid resolution for l2d (lat, lon) grid (in degrees):
posting_l2d = (0.1, 0.1)
# Time domain: (start_time, end_time, dtime) in days:
time_domain = (7, 23, 1)
# Spatial domain (lon_min, lon_max, lat_min, lat_max):
spatial_domain = [0, 360, -90, 90]
# List of variables to be interpolated on the grid:
list_input_var_l2d = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 0]}
| 7,742 | 37.142857 | 80 | py |
skimulator | skimulator-master/skimulator/const.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
'''Constants that are defined for SKIM simulator. \n
It contains Earth parameters as well as SKIM instrument
and satellite caracteristics. '''
# ################################
# # EARTH CONSTANTS ##
# ################################
# - Earth radius (m)
Rearth = 6378. * 10**3
# - Convert degree to km
deg2km = 111.11
# - Seconds in a day
secinday = 86400.
# - Light speed (m/s)
C = 2.998*10**8
# ###################################
# # SKIM INSTRUMENT CARACTERISTICS ##
# ###################################
# - Satellite elevation (m)
sat_elev = 817*10**3
# - Baseline (m)
B = 10
# (in Hz)
Fka = 35.75 * 10**9
# Satellite cycle (S1) in days
satcycle = 29.
# Satellite velocity (m/s)
vsat = 6657.
# ###################################
# # OTHER PARAMETERS ##
# ###################################
# - Radius to interpolate locally model data on the swath (in km)
# data are selected every xal_step points and on a radius of radius_interp
radius_interp = 100.
# - Sampling to interpolate locally model data on the swath (in km)
# Data are selected every xal_step points and on a radius of radius_interp
xal_step = 20.
| 1,840 | 29.683333 | 75 | py |
skimulator | skimulator-master/skimulator/mod_parallel.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
@author <sylvain.herledan@oceandatalab.com>
@date 2018-12-26
"""
import os
import sys
import math
import time
import logging
import traceback
import collections
import multiprocessing
logger = logging.getLogger(__name__)
#logger = multiprocessing.log_to_stderr()
#logger.setLevel(logging.DEBUG)
class DyingOnError(Exception):
""""""
pass
class MultiprocessingError(Exception):
""""""
def __init__(self, exc):
""""""
self.exc = exc
class JobsManager():
""""""
def __init__(self, pool_size, status_updater, exc_fmt, err_fmt,
init_method=None, init_args=None):
""""""
self._pool_size = pool_size
self.manager = multiprocessing.Manager()
self.msg_queue = self.manager.Queue()
self.res_queue = self.manager.Queue()
self.errors_queue = self.manager.Queue()
ctx = multiprocessing.get_context('spawn')
if init_method is None:
self.pool = ctx.Pool(pool_size)
else:
self.pool = ctx.Pool(pool_size, initializer=init_method,
initargs=init_args)
#self.format_exception = (exc_fmt,)
self.format_exception = None
if exc_fmt is not None:
self.format_exception = staticmethod(exc_fmt)
self.update_status = None
if status_updater is not None:
self.update_status = staticmethod(status_updater)
self.format_error = None
if err_fmt is not None:
self.format_error = staticmethod(err_fmt)
def show_errors(self):
""""""
if self.format_error is None:
return
while not self.errors_queue.empty():
(pid, job_id, extra, exc) = self.errors_queue.get()
error_str = self.format_error.__func__(pid, job_id, extra, exc)
logger.error(error_str)
logger.error(' {}'.format(' \n'.join(exc)))
def _error_callback(self, exc):
""""""
logger.error(exc)
raise MultiprocessingError(exc)
def handle_message(self, status, msg, die_on_error, progress_bar):
"""die_on_error"""
_ok = (msg[3] is None)
if (progress_bar is True) and (self.update_status is not None):
_ok = self.update_status.__func__(status, msg)
#_ok = mod_tools.update_progress_multiproc(status, msg)
if (_ok is False) and (die_on_error is True):
# Kill all workers, show error and exit with status 1
self.pool.terminate()
self.show_errors()
raise DyingOnError
return _ok
def submit_jobs(self, operation, jobs, die_on_error, progress_bar,
delay=0.5, results=None):
""""""
results_callback = None
results_args = None
if results is not None:
if isinstance(results, collections.Iterable) and 1 < len(results):
results_callback, results_args = results[0], results[1:]
else:
results_callback = results
for j in jobs:
j.append(self.errors_queue)
j.append(self.msg_queue)
j.append(self.format_exception.__func__)
j.append(operation)
# Distribute jobs between workers
chunk_size = int(math.ceil(len(jobs) / self._pool_size))
status = {}
for n, w in enumerate(self.pool._pool):
status[w.pid] = {'done': 0, 'total': 0, 'jobs': None, 'extra': ''}
proc_jobs = jobs[n::self._pool_size]
status[w.pid]['jobs'] = [j[0] for j in proc_jobs]
status[w.pid]['total'] = len(proc_jobs)
# Make some room for the progress bars
if progress_bar is True:
sys.stdout.write('\n' * self._pool_size)
sys.stdout.flush()
# Start jobs processing
if results is True:
res = []
tasks = self.pool.map_async(_operation_wrapper, jobs,
chunksize=chunk_size,
error_callback=self._error_callback,
callback=res.append)
else:
tasks = self.pool.map_async(_operation_wrapper, jobs,
chunksize=chunk_size,
error_callback=self._error_callback)
# Wait until all jobs have been executed
ok = True
while not tasks.ready():
# Handle progress and error messages
if not self.msg_queue.empty():
msg = self.msg_queue.get()
_ok = self.handle_message(status, msg, die_on_error,
progress_bar)
ok = ok and _ok
# Handle results from processed jobs on the fly
if (results_callback is not None) and (not self.res_queue.empty()):
job_res = self.res_queue.get()
if results_args is not None:
results_callback(job_res, *results_args)
else:
results_callback(job_res)
time.sleep(delay)
# Make sure all messages have been processed
while not self.msg_queue.empty():
msg = self.msg_queue.get()
_ok = self.handle_message(status, msg, die_on_error, progress_bar)
ok = ok and _ok
# Flush stdout buffer to avoid partial output issues
sys.stdout.flush()
# Make sure all results have been processed
while ((results is not None) and (not self.res_queue.empty())):
job_res = self.res_queue.get()
if results_args is not None:
results_callback(job_res, *results_args)
else:
results_callback(job_res)
# Wait for workers to release resources and synchronize with the main
# process
self.pool.close()
self.pool.join()
#"""
return ok
def _operation_wrapper(*args, **kwargs):
""""""
_args = args[0]
operation = _args.pop()
format_exc = _args.pop()
msg_queue = _args.pop()
errors_queue = _args.pop()
try:
job_id = _args[0]
res = operation(msg_queue, *_args, **kwargs)
except:
# Error sink
exc = sys.exc_info()
if format_exc is None:
error_msg = traceback.format_exception(exc[0], exc[1], exc[2])
else:
error_msg = format_exc(exc)
# Pass the error message to both the messages queue and the
# errors queue
msg_queue.put((os.getpid(), job_id, -1, error_msg))
errors_queue.put((os.getpid(), job_id, -1, error_msg))
return False
if res is None:
return True
else:
return res
| 7,515 | 31.257511 | 79 | py |
skimulator | skimulator-master/skimulator/fitspline2d.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
from . import spline
import scipy.sparse.linalg as linalg
class fitspline2d:
def __init__(self, nx, ny, nxspline, nyspline, xcircle=True, ycircle=True):
self.xcircle = xcircle
self.ycircle = ycircle
self.nx = nx
self.ny = ny
self.nxspline = nxspline
self.nyspline = nyspline
x = numpy.arange(nx) + 0.5
y = numpy.arange(ny) + 0.5
if self.xcircle == False:
self.spx = spline.spline(3, nxspline - 2)
else:
self.spx = spline.spline_circular(3, nxspline)
if self.ycircle == False:
self.spy = spline.spline(3, nyspline - 2)
else:
self.spy = spline.spline_circular(3,nyspline)
self.spx.init_uniform(0, nx)
self.spy.init_uniform(0, ny)
self.vx, self.ix, self.jx = self.spx.bspline_value(x)
self.vy, self.iy, self.jy = self.spy.bspline_value(y)
self.nspline=self.nxspline*self.nyspline
def init_fit(self, doinvert=True):
mat=numpy.zeros([self.nspline, self.nspline])
for ii in range(self.nx):
# print(ii)
for jj in range(self.ny):
for jjx in range(4):
for jjy in range(4):
for kkx in range(4):
for kky in range(4):
ixj = numpy.mod(self.ix[ii] + jjx, self.nxspline)
ixk = numpy.mod(self.ix[ii] + kkx, self.nxspline)
iyj = numpy.mod(self.iy[jj] + jjy, self.nyspline)
iyk = numpy.mod(self.iy[jj] + kky, self.nyspline)
ind0 = ixj + self.nxspline * iyj
ind1 = ixk + self.nxspline * iyk
mat[ind0, ind1] += (self.vx[kkx,ii]
* self.vy[kky,jj]
* self.vx[jjx,ii]
* self.vy[jjy,jj])
if self.xcircle == False:
for ii in range(self.nyspline):
mat[ii * self.nxspline, ii * self.nxspline] += 1
for ii in range(self.nyspline):
mat[self.nxspline-1+ii*self.nxspline, ii*self.nxspline] += 1
if self.ycircle == False:
for ii in range(self.nxspline):
mat[ii, ii] += 1
for ii in range(self.nxspline):
ind0 = ii+(self.nyspline-1)*self.nxspline
ind1 = (self.nyspline-1)*self.nxspline
mat[ind0, ind1] += 1
if doinvert is True:
self.imat=numpy.linalg.pinv(mat)
self.inv = True
else:
self.imat = mat
self.inv = False
def fit(self,data):
nspline=self.nspline
vec=numpy.zeros([nspline])
for ii in range(self.nx):
for jj in range(self.ny):
for jjx in range(4):
for jjy in range(4):
ixj = numpy.mod(self.ix[ii] + jjx, self.nxspline)
iyj = numpy.mod(self.iy[jj] + jjy, self.nyspline)
vec[ixj + self.nxspline * iyj] += self.vx[jjx, ii]*self.vy[jjy, jj] * data[ii, jj]
if self.xcircle == False:
for ii in range(self.nyspline):
vec[ii*self.nxspline]+=data[0,ii]
for ii in range(self.nyspline):
vec[self.nxspline-1+ii*self.nxspline]+=data[self.nx-1,ii]
if self.ycircle == False:
for ii in range(self.nxspline):
vec[ii]+=data[ii,0]
for ii in range(self.nxspline):
vec[ii+(self.nyspline-1)*self.nxspline]+=data[ii,self.ny-1]
if self.inv == False:
self.wres,infoinv=linalg.cg(self.imat,vec)
else:
self.wres=numpy.dot(self.imat, vec)
def getparam(self):
return self.wres
def setparam(self,wres):
self.wres=wres
def transform(self, x, y):
n = x.shape[0]
res = numpy.zeros([n])
vx, ix, jx = self.spx.bspline_value(x)
vy, iy, jy = self.spy.bspline_value(y)
self.wres=self.wres.reshape(self.nyspline, self.nxspline)
for jj in range(4):
for ii in range(4):
iyj = numpy.mod(iy + jj, self.nyspline)
ixi = numpy.mod(ix + ii, self.nxspline)
res[:] += vy[jj, :] * vx[ii, :] * self.wres[iyj, ixi]
return res
class ted_tas():
def __init__(self, wres, nxspline, nyspline, thedeg):
self.mysp2={}
self.n_time = 1200
self.n_az = 120
for i in range(3):
self.mysp2[i]=fitspline2d(self.n_time, self.n_az, nxspline,
nyspline, xcircle=True, ycircle=True)
self.mysp2[i].setparam(wres[i,:])
def transform(self, t_orbit, az, t_year):
'''
#====================================================================
# t_orbit : orbital time inside [0,1]; 0 begining of the orbit, 1 end
of the orbit (if t_orbit <0 or t_orbit>1 then BUG!
# az : azimuth in degree
# t_year : date in year inside [0,1]; 0 begining of the year,
1 end of the year
'''
# Number of points for the time dimension
n_time = self.n_time
az = numpy.mod(az, 360)
# Discretisation step for azimuth
delta_az = 360 / self.n_az
fit_1 = self.mysp2[0].transform(t_orbit * n_time, az / delta_az)
cos_year = numpy.cos(t_year * 2 * numpy.pi)
t_orbit = numpy.fmod((t_orbit * n_time), n_time)
azd = numpy.fmod(az, 360) / delta_az
fit_t2 = self.mysp2[1].transform(t_orbit, azd)
fit_t3 = self.mysp2[2].transform(t_orbit, azd)
fit_2 = fit_t2 * numpy.cos(t_year * 2 * numpy.pi)
fit_3 = fit_t3 * numpy.sin(t_year * 2 * numpy.pi)
return (fit_1 + fit_2 + fit_3)
| 6,754 | 37.6 | 106 | py |
skimulator | skimulator-master/skimulator/build_swath.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import numpy
import math
from scipy import interpolate
import skimulator.mod_tools as mod_tools
import skimulator.const as const
import skimulator.rw_data as rw_data
import skimulator.mod_parallel
import time
import logging
import traceback
# Define logger level for debug purposes
logger = logging.getLogger(__name__)
def makeorbit(modelbox, p, orbitfile='orbit_292.txt', filealtimeter=None):
'''Computes the swath of SKIM satellites on a subdomain.
The path of the satellite is given by the orbit file and the subdomain
corresponds to the one in the model. Note that a subdomain can be manually
added in the parameters file. \n
Inputs are satellite orbit (p.filesat), subdomain (modelbox), List of
postion of six degree beams, list of position of twelve degree beams,
rotation speed. \n
Outputs are netcdf files containing SKIM position (lon, lat number of days
in a cycle, distance crossed in a cycle, time)
'''
# - Load SKIM orbit ground track
logger.info('Load data from orbit file')
if p.order_orbit_col is None:
volon, volat, votime = numpy.loadtxt(orbitfile, usecols=(0, 1, 2),
comments='#', unpack=True)
else:
ncols = p.order_orbit_col
volon, volat, votime = numpy.loadtxt(orbitfile, usecols=ncols,
comments='#', unpack=True)
dic_sat = {}
with open(orbitfile, 'r') as fh:
for i, line in enumerate(fh):
if line.strip().startswith('#'):
key, value = line.strip().split('=')
dic_sat[key[1:].strip()] = float(value.strip())
else:
break
if 'cycle' in dic_sat.keys() and 'elevation' in dic_sat.keys():
p.satcycle = dic_sat['cycle']
p.sat_elev = dic_sat['elevation']
# - If orbit is at low resolution, interpolate at cycle (s) resolution
cycle = p.cycle
votime = votime * const.secinday
# macrocycle is fixed or depend on number of beam
logger.info('Interpolate orbit at {} seconds'.format(cycle))
# votime2 = votime + 0
# - If orbit is at low resolution, interpolate at 0.5 s resolution
hr_step = 0.5
if numpy.mean(votime[1:] - votime[:-1]) > hr_step:
x, y, z = mod_tools.spher2cart(volon, volat)
time_hr = numpy.arange(0., votime[-1], hr_step)
# Parallelisation of this loop?
# jobs = []
# jobs.append([votime, time_hr, x])
# jobs.append([votime, time_hr, y])
# jobs.append([votime, time_hr, z])
# worker
f = interpolate.interp1d(votime, x)
x_hr = f(time_hr)
f = interpolate.interp1d(votime, y)
y_hr = f(time_hr)
f = interpolate.interp1d(votime, z)
z_hr = f(time_hr)
lon_hr = numpy.zeros(len(x_hr)) + numpy.nan
lat_hr = numpy.zeros(len(x_hr)) + numpy.nan
lon_hr, lat_hr = mod_tools.cart2sphervect(x_hr, y_hr, z_hr)
# Cut orbit if more than an orbit cycle
if p.satcycle is None:
p.satcycle = const.satcycle
ind = numpy.where((time_hr < p.satcycle * const.secinday))
volon = lon_hr[ind]
volat = lat_hr[ind]
votime = time_hr[ind]
# - Get number of points in orbit
nop = numpy.shape(votime)[0]
# - Get cycle period.
tcycle = votime[nop-1] + votime[1] - votime[0]
# shift time if the user needs to shift the time of the orbit
if p.shift_time is not None:
shift_time = p.shift_time * const.secinday
shift_index = numpy.where(votime >= shift_time)[0]
volon = numpy.hstack([volon[shift_index[0]:],
volon[:shift_index[0]]])
volat = numpy.hstack([volat[shift_index[0]:],
volat[:shift_index[0]]])
# shift lon if the user needs to shift the localisation of the orbit
if p.shift_lon is not None:
volon = volon + p.shift_lon
volon = (volon + 360) % 360
# - Rearrange orbit starting from pass 1
# Detect the beginning of pass 1 in orbit txt file. By definition, it is
# the first passage at southernmost latitude.
dlat = numpy.roll(volat, 1) - volat
ind = numpy.where((dlat < 0) & (numpy.roll(dlat, 1) >= 0))
# Shift coordinates, so that the first point of the orbit is the beginning
# of pass 1
decal = ind[0][-1]
# timeshift = votime[-1] - votime[decal]
volon = numpy.hstack([volon[decal:], volon[:decal]])
volat = numpy.hstack([volat[decal:], volat[:decal]])
votime = numpy.hstack([votime[decal:], votime[:decal]])
votime = (votime - votime[0]) % tcycle
if votime[numpy.where(votime < 0)]:
logger.warn('WARNING: there are negative times in your orbit')
del ind
# Compute the initial time of each pass
dlat = numpy.roll(volat, 1) - volat
ind = numpy.where(((dlat < 0) & (numpy.roll(dlat, 1) >= 0)) | ((dlat > 0)
& (numpy.roll(dlat, 1) <= 0)))
# index=numpy.hstack([0,ind[0]-1])
index = ind[0]
passtime = votime[index] # Times of pass
# - Extract region
matnpbox = numpy.zeros((nop))
if modelbox[0] > modelbox[1]:
matnpbox[numpy.where((((modelbox[0] - 1) <= volon)
| (volon <= (modelbox[1] + 1)))
& ((modelbox[2] - 1) <= volat)
& ((modelbox[3] + 1) >= volat))] = 1
else:
matnpbox[numpy.where(((modelbox[0] - 1) <= volon)
& (volon <= (modelbox[1] + 1))
& ((modelbox[2] - 1) <= volat)
& ((modelbox[3] + 1) >= volat))] = 1
norp = int(numpy.sum(matnpbox))
# Initialize total distance travelled by the satellite since the first
# point of the cycle in the subdomain at low (orbital file) resolution
x_al_lr = numpy.zeros((norp))
lon_lr = numpy.zeros((norp))
lat_lr = numpy.zeros((norp))
stime_lr = numpy.zeros((norp))
# Initialize vector with accumulated distance travelled by the satellite
indp = 0
distance = numpy.zeros((nop))
# Compute and store distances and coordinates that are in the defined
# subdomain
logger.info('Compute SKIM nadir coordinate in the new domain')
for i in range(0, nop - 1):
if p.progress_bar is True:
mod_tools.update_progress(float(i) / float(nop-1), None, None)
if abs(volon[i + 1] - volon[i]) > 1:
if volon[i + 1] > 180.:
volon[i + 1] = volon[i + 1] - 360
if volon[i] > 180.:
volon[i] = volon[i] - 360
distance[i+1] = (distance[i] + numpy.sqrt(((volon[i+1]-volon[i])
* const.deg2km*numpy.cos(volat[i+1]
* 2*math.pi/360.))**2 + ((volat[i+1] - volat[i])
* const.deg2km)**2)) # numpy.sum(dl[:i])
volon[i + 1] = (volon[i + 1] + 360) % 360
if matnpbox[i]:
x_al_lr[indp] = distance[i]
lon_lr[indp] = (volon[i] + 360) % 360
lat_lr[indp] = volat[i]
stime_lr[indp] = votime[i]
indp += 1
# - Interpolate orbit at cycle resolution (default is cycle=0.0096)
# Detect gap in time in stime (to detect step in x_al, lon and lat)
dstime = stime_lr[:] - numpy.roll(stime_lr[:], 1)
ind = numpy.where(dstime > 3*(votime[1] - votime[0]))
index = numpy.hstack([0, ind[0]])
nindex = numpy.shape(index)[0]
# Initialize along track distance, time and coordinates at cycle
# resolution
if nindex > 1:
dgap = numpy.zeros((nindex))
for i in range(1, nindex):
dgap[i] = stime_lr[index[i]] - stime_lr[max(index[i] - 1, 0)]
Ninterp = int(((stime_lr[-1] - stime_lr[0] - sum(dgap))
/ float(p.cycle)) + 1)
x_al = numpy.zeros((Ninterp))
stime = numpy.zeros((Ninterp))
lon = numpy.zeros((Ninterp))
lat = numpy.zeros((Ninterp))
imin = 0
imax = 0
substract_point = 0
for i in range(0, nindex - 1):
imax = imin + int((stime_lr[index[i+1]-1] - stime_lr[index[i]])
/ float(p.cycle)) + 1
if imax <= (imin + 1):
x_al[imin] = x_al_lr[index[i]]
stime[imin] = stime_lr[index[i]]
lon[imin] = lon_lr[index[i]]
lat[imin] = lat_lr[index[i]]
else:
slicei = slice(index[i], index[i + 1])
nofp = (stime_lr[index[i+1] - 1]-stime_lr[index[i]]) / p.cycle
if (nofp).is_integer():
imax = imax - 1
substract_point += 1
stime[imin: imax] = numpy.arange(stime_lr[index[i]],
stime_lr[index[i+1] - 1],
p.cycle)
x_al[imin: imax] = numpy.interp(stime[imin: imax],
stime_lr[slicei],
x_al_lr[slicei])
loncirc = numpy.rad2deg(numpy.unwrap(numpy.deg2rad(
lon_lr[slicei])))
lon[imin: imax] = numpy.interp(stime[imin: imax],
stime_lr[slicei],
loncirc)
lat[imin: imax] = numpy.interp(stime[imin: imax],
stime_lr[slicei],
lat_lr[slicei])
imin = imax
if substract_point > 0:
substract_point = 1
_tmp = numpy.arange(stime_lr[index[-1]], stime_lr[index[-1]]
+ (Ninterp - imin)*p.cycle
- p.cycle * substract_point, p.cycle)
if len(_tmp) > len(stime[imin:]):
_tmp2 = _tmp[:-1]
elif len(_tmp) < len(stime[imin:]):
_tmp2 = numpy.arange(stime_lr[index[-1]], stime_lr[index[-1]]
+ (Ninterp - imin)*p.cycle, p.cycle)
#logger.error('damnit, there is a bug, contact me')
else:
_tmp2 = _tmp
stime[imin:] = _tmp2 #numpy.arange(stime_lr[index[-1]], stime_lr[index[-1]]
# + (Ninterp - imin)*p.cycle
# - p.cycle * substract_point, p.cycle)
x_al[imin:] = numpy.interp(stime[imin:], stime_lr[index[-1]:],
x_al_lr[index[-1]:])
loncirc = numpy.rad2deg(numpy.unwrap(numpy.deg2rad(
lon_lr[index[-1]:])))
lon[imin:] = numpy.interp(stime[imin:], stime_lr[index[-1]:], loncirc)
lat[imin:] = numpy.interp(stime[imin:], stime_lr[index[-1]:],
lat_lr[index[-1]:])
else:
Ninterp = int(((stime_lr[-2] - stime_lr[0]) / float(p.cycle)) + 1)
stime = numpy.arange(stime_lr[0], stime_lr[-2], p.cycle)
x_al = numpy.interp(stime, stime_lr[:-1], x_al_lr[:-1])
loncirc = numpy.rad2deg(numpy.unwrap(numpy.deg2rad(lon_lr[:-1])))
lon = numpy.interp(stime, stime_lr[:-1], loncirc)
lat = numpy.interp(stime, stime_lr[:-1], lat_lr[:-1])
#x_al = numpy.arange(x_al_lr[0], x_al_lr[-2], p.delta_al)
#stime = numpy.interp(x_al, x_al_lr[:-1], stime_lr[:-1])
#lon = numpy.interp(x_al, x_al_lr[:-1], loncirc)
#lat = numpy.interp(x_al, x_al_lr[:-1], lat_lr[:-1])
lon = lon % 360
# Save orbit data in Sat_SKIM object
orb = rw_data.Sat_SKIM(ifile='{}.nc'.format(os.path.basename(orbitfile)))
orb.x_al = x_al
orb.time = stime
orb.lon = lon
orb.lat = lat
orb.cycle = tcycle
orb.al_cycle = distance[-1]
orb.passtime = numpy.sort(passtime)
orb.timeshift = p.timeshift
return orb
def orbit2swath(modelbox, p, orb, die_on_error):
'''Computes the swath of SKIM satellites on a subdomain from an orbit.
The path of the satellite is given by the orbit file and the subdomain
corresponds to the one in the model. Note that a subdomain can be manually
added in the parameters file. \n
Inputs are satellite orbit (p.filesat), subdomain (modelbox), Swath
parameters (half gap distance p.halfgap, half swath distance p.halfswath,
along track
resolution p.delta_al, across track resolution p.delta_ac). \n
Outputs are netcdf files containing SKIM grid (along track distance x_al,
radial angle, longitude lon and latitude lat,
number of days in a cycle cycle, distance crossed in a cycle cycle_al,
time'''
''' Compute orbit from Swath '''
# - Load altimeter orbit
x_al = orb.x_al
stime = orb.time
lon = orb.lon
lat = orb.lat
tcycle = orb.cycle
al_cycle = orb.al_cycle
passtime = orb.passtime
# - Computation of SKIM grid and storage by passes
logger.info('\n Compute SKIM grid')
# Detect first pass that is in the subdomain
ipass0 = 0
# Check that list of position, shift and angle have the same
# dimension
if len(p.list_pos) != len(p.list_shift) or \
len(p.list_pos) != len(p.list_angle) or \
len(p.list_angle) != len(p.list_shift):
logger.error('Wrong length in list_pos, list_shift'
'or list_angle')
sys.exit(1)
# Loop on all passes after the first pass detected (note that ipass is
# actually stored as ipass + 1 to have the first pass at 1 and ascending
jobs = []
p2 = mod_tools.todict(p)
for ipass in range(ipass0, numpy.shape(passtime)[0]):
jobs.append([ipass, p2, passtime, stime, x_al, tcycle, al_cycle, lon,
lat, orb.timeshift])
ok = make_skim_grid(p.proc_count, jobs, die_on_error, p.progress_bar)
if p.progress_bar is True:
mod_tools.update_progress(1, 'All swaths have been processed', ' ')
else:
logger.info('All swaths have been processed')
return ok
def make_skim_grid(_proc_count, jobs, die_on_error, progress_bar):
""" Compute SWOT grids for every pass in the domain"""
# - Set up parallelisation parameters
proc_count = min(len(jobs), _proc_count)
status_updater = mod_tools.update_progress_multiproc
jobs_manager = skimulator.mod_parallel.JobsManager(proc_count,
status_updater,
exc_formatter,
err_formatter)
ok = jobs_manager.submit_jobs(worker_method_grid, jobs, die_on_error,
progress_bar)
if not ok:
# Display errors once the processing is done
jobs_manager.show_errors()
return ok
def exc_formatter(exc):
"""Format exception returned by sys.exc_info() as a string so that it can
be serialized by pickle and stored in the JobsManager."""
error_msg = traceback.format_exception(exc[0], exc[1], exc[2])
return error_msg
def err_formatter(pid, ipass, cycle, exc):
"""Transform errors stored by the JobsManager into readable messages."""
msg = None
if cycle < 0:
msg = '/!\ Error occurred while processing pass {}'.format(ipass)
else:
_msg = '/!\ Error occurred while processing cycle {} on pass {}'
msg = _msg.format(cycle, ipass)
return msg
def worker_method_grid(*args, **kwargs):
msg_queue, ipass, p2 = args[:3]
passtime, stime, x_al, tcycle, al_cycle, lon, lat, timeshift = args[3:]
p = mod_tools.fromdict(p2)
# Compute rotating beams
omega = p.rotation_speed * 2 * math.pi / 60.
# Number of beam to lighten:
nbeam = len(p.list_shift) + 1
sat_elev= p.sat_elev
if sat_elev is None:
sat_elev = const.sat_elev
# Detect indices corresponding to the pass
if ipass == numpy.shape(passtime)[0]-1:
ind = numpy.where((stime >= passtime[ipass]))[0]
else:
ind = numpy.where((stime >= passtime[ipass])
& (stime < passtime[ipass+1]))[0]
nind = numpy.shape(ind)[0]
# Compute swath grid if pass is in the subdomain
if nind > 5:
# Initialize SKIM grid, grid variables
filesgrid = '{}_p{:03d}.nc'.format(p.filesgrid, ipass + 1)
sgrid = rw_data.Sat_SKIM(ifile=filesgrid)
sgrid.x_al = x_al[ind]
x_al_nad = x_al[ind]
x_al_nad = x_al_nad[0::nbeam]
sgrid.cycle = tcycle
sgrid.al_cycle = al_cycle
sgrid.ipass = ipass + 1
# Compute nadir coordinate and initialize angles
lonnad = (lon[ind] + 360) % 360
latnad = + lat[ind]
timenad = + stime[ind]
lon_beam = [lonnad[0::nbeam]]
lat_beam = [latnad[0::nbeam]]
time_beam = [timenad[0::nbeam]]
# n_nad = numpy.shape(lon_beam[0])[0]
angle_beam = [numpy.zeros(numpy.shape(lon_beam[0]))]
radial_angle_tot = [numpy.zeros(numpy.shape(lon_beam[0]))]
xal_beam = [x_al_nad]
xac_beam = [x_al_nad * 0]
xal_beam_tot = [x_al_nad]
# x_al_tmp = x_al_nad - x_al_nad[0]
inclination_angle = numpy.zeros(numpy.shape(lonnad))
inclination_angle[1:] = numpy.arctan((latnad[1:] - latnad[:-1])
/ numpy.cos(latnad[1:]
* math.pi / 180.)
/ (lonnad[1:] - lonnad[:-1]))
inclination_angle[0] = inclination_angle[1]
# Loop on beam to construct cycloid
for angle, shift, beam in zip(p.list_pos, p.list_shift,
p.list_angle):
# Angle projected on the earth
rc = (const.Rearth * (beam * math.pi/180
- numpy.arcsin(const.Rearth * numpy.sin(math.pi - beam
* math.pi/180) / (const.Rearth + sat_elev)))
* 10**(-3))
timebeamshift = timenad[shift::nbeam]
beam_angle = omega * timebeamshift + angle
xal = -(rc * numpy.sin(beam_angle)) / const.deg2km
xac = (rc * numpy.cos(beam_angle)) / const.deg2km
sign_ac = 1
# Even pass: descending
if ((ipass + 1) % 2 == 0):
# inclination = -inclination_angle[shift::nbeam] + math.pi
inclination = inclination_angle[shift::nbeam]
inclination_save = inclination_angle[0::nbeam]
radial_angle = -beam_angle + inclination - math.pi/2.
sign_ac = -1
# Odd pass: ascending
else:
inclination = math.pi + inclination_angle[shift::nbeam]
# inclination = + inclination_angle[shift::nbeam]
inclination_save = inclination_angle[0::nbeam] #+ numpy.pi
radial_angle = -beam_angle + inclination - math.pi / 2.
lon_tmp = (lonnad[shift::nbeam] + (xal * numpy.cos(inclination)
+ xac * numpy.sin(inclination))
/ numpy.cos(latnad[shift::nbeam] * math.pi / 180.))
lat_tmp = (latnad[shift::nbeam] + (xal * numpy.sin(inclination)
- xac * numpy.cos(inclination)))
lon_tmp = (lon_tmp + 360) % 360
# Concatenate list for each beam angle
lon_beam.append(lon_tmp)
lat_beam.append(lat_tmp)
xal_beam.append(xal * const.deg2km * sign_ac)
xac_beam.append(xac * const.deg2km * sign_ac)
xal_beam_tot.append(sgrid.x_al[shift::nbeam])
time_beam.append(timebeamshift)
angle_beam.append(beam_angle)
radial_angle_tot.append(radial_angle)
# Save Sgrid object
sgrid.timeshift = timeshift
sgrid.lon = lon_beam
sgrid.lat = lat_beam
sgrid.time = time_beam
sgrid.x_al = xal_beam
sgrid.x_al_tot = xal_beam_tot
sgrid.x_ac = xac_beam
sgrid.list_angle = p.list_angle
sgrid.list_pos = p.list_pos
sgrid.beam_angle = angle_beam
sgrid.radial_angle = radial_angle_tot
sgrid.angle = inclination_save
# Remove grid file if it exists and save it
if os.path.exists(filesgrid):
os.remove(filesgrid)
sgrid.write_swath(p)
msg_queue.put((os.getpid(), ipass, None, None))
return None
| 21,162 | 42.455852 | 83 | py |
skimulator | skimulator-master/skimulator/run_simulator.py | '''Main program:
Usage: run_simulator(file_param) \n
If no param file is specified, the default one is exemple/params_exemple.txt \n
In the first part of the program, model coordinates are read and the
SKIM swath is computing accordingly. \n
The SKIM grid parameters are saved in netcdf files, if you don't want to
recompute them, set maksgrid (in params file) to False.\n
In the second part of the program, errors are computed on SKIM grid for
each pass, for each cycle. The error free velocity is the velocity interpolated
from the model at each timestep. Note that there is no temporal interpolation
between model files and thus if several files are used in the velocity
interpolation, some discontinuities may be visible. \n
OUTPUTS are netcdf files containing the requested errors, the error free
radial velocity and the radial velocity with errors. There is one file every
pass and every cycle.
\n
\n
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import numpy
import glob
import sys
import time
import datetime
import logging
import traceback
import pkg_resources
from . import build_swath
from . import rw_data
#import skimulator.build_error as build_error
#import skimulator.build_error2 as build_error
from .error import generator
from . import mod_tools
from . import mod_run as run
from . import mod_parallel
# Define logger level for debug purposes
logger = logging.getLogger(__name__)
#logger = multiprocessing.log_to_stderr()
#logger.setLevel(logging.DEBUG)
# - Define global variables for progress bars
istep = 0
ntot = 1
ifile = 0
def run_simulator(p, die_on_error=False):
'''Main routine to run simulator, input is the imported parameter file,
no outputs are returned but netcdf grid and data files are written as well
as a skimulator.output file to store all used parameter.
'''
# - Initialize some parameters values
timestart = datetime.datetime.now()
mod_tools.initialize_parameters(p)
mod_tools.check_path(p)
model = p.model
'''
p.timeshift = getattr(p, 'timeshift', 0)
if p.shift_time is None:
p.timeshift = 0
'''
# - Read list of user model files """
if p.file_input is not None:
model_data, list_file = run.load_coordinate_model(p)
# if no modelbox is specified (modelbox=None), the domain of the input
# data is taken as a modelbox
# coordinates from the region defined by modelbox are selected
if p.modelbox is not None:
modelbox = numpy.array(p.modelbox, dtype='float')
# Use convert to 360 data
modelbox[0] = (modelbox[0] + 360) % 360
if modelbox[1] != 360:
modelbox[1] = (modelbox[1] + 360) % 360
else:
if p.file_input is not None:
modelbox = model_data.calc_box(p)
else:
logger.error('modelbox should be provided if no model file is'
'provided')
sys.exit(1)
# - Extract data on modelbox
# TODO: do only this step if modelbox is defined? Do it later?
if p.file_input is not None:
model_data.read_coordinates(p)
# Select model data in the region modelbox
model_data.model_index_lon = {}
model_data.model_index_lat = {}
model_data.model_index = {}
model_data.vloncirc = {}
for key in model_data.vlon.keys():
_lon = + model_data.vlon[key]
_lat = + model_data.vlat[key]
if p.grid == 'regular':
if modelbox[0] < modelbox[1]:
_tmp = numpy.where(((modelbox[0]-1) <= _lon)
& (_lon <= (modelbox[1]+1)))[0]
model_data.model_index_lon[key] = + _tmp
else:
_tmp = numpy.where(((modelbox[0]-1) <= _lon)
| (_lon <= (modelbox[1]+1)))[0]
model_data.model_index_lon[key] = + _tmp
_tmp = numpy.where(((modelbox[2]-1) <= _lat)
& (_lat <= (modelbox[3]+1)))[0]
model_data.model_index_lat[key] = + _tmp
model_data.vlon[key] = + _lon[model_data.model_index_lon[key]]
model_data.vlat[key] = + _lat[model_data.model_index_lat[key]]
else:
if modelbox[0] < modelbox[1]:
_tmp = numpy.where(((modelbox[0]-1) <= _lon)
& (_lon <= (modelbox[1]+1))
& ((modelbox[2]-1) <= _lat)
& (_lat <= (modelbox[3]+1)))
model_data.model_index[key] = + _tmp
else:
_tmp = numpy.where(((modelbox[0]-1) <= _lon)
| (_lon <= (modelbox[1]+1))
& ((modelbox[2]-1) <= _lat)
& (_lat <= (modelbox[3]+1)))
model_data.model_index[key] = + _tmp
# prevent IDL issues
_wlon = model_data.vlon[key]
model_data.vloncirc[key] = numpy.rad2deg(numpy.unwrap(_wlon))
model_data.model = model
else:
model_data = []
list_file = []
# avoid issue with 0=360 for global modelbox
if modelbox[1] == 0:
modelbox[1] = 359.99
# - Make SKIM grid if necessary """
if p.makesgrid is True:
logger.info('\n Force creation of SKIM grid')
# make nadir orbit
orb = build_swath.makeorbit(modelbox, p, orbitfile=p.filesat)
# build swath around this orbit
ok = False
try:
ok = build_swath.orbit2swath(modelbox, p, orb, die_on_error)
except mod_parallel.MultiprocessingError:
logger.error('An error occurred with the multiprocessing '
'framework')
traceback.print_exception(*sys.exc_info())
sys.exit(1)
except mod_parallel.DyingOnError:
logger.error('An error occurred and all errors are fatal')
sys.exit(1)
if not ok:
logger.error('Errors occurred while generating grid files')
sys.exit(1)
logger.info("\n SKIM Grids and nadir tracks have been written in"
"{}".format(p.outdatadir))
logger.info("-----------------------------------------------")
# - Compute interpolated velocity and errors for each pass, at each
# cycle
logger.info('Compute interpolated velocity and errors:')
# load all SKIM grid files (one for each pass)
listsgridfile = sorted(glob.glob(p.filesgrid + '_p*.nc'))
if not listsgridfile:
logger.error('\n There is no SKIM grid file in {}, run simulator with'
'option makesgrid set to true in your params'
'file'.format(p.outdatadir))
sys.exit(1)
# Build model time steps from parameter file
modeltime = numpy.arange(0, p.nstep*p.timestep, p.timestep)
# Remove the grid from the list of model files
if p.file_input and p.file_grid_model is None:
logger.info("WARNING: the first file is not used to build data")
list_file.remove(list_file[0])
# - Loop on SKIM grid files
jobs = []
p2 = mod_tools.todict(p)
#time_yaw = None
#vac_yaw = None
error_generator = generator.Generator(p)
#if p.attitude is True and os.path.isfile(p.yaw_file):
# time_yaw, vac_yaw = build_error.load_yaw_aocs(p.yaw_file)
# # time_yaw = time_yaw / 86400
for sgridfile in listsgridfile:
jobs.append([sgridfile, p2, listsgridfile, list_file,
modelbox, model_data, modeltime, error_generator])
ok = False
try:
ok = make_skim_data(p.proc_count, jobs, die_on_error, p.progress_bar)
except mod_parallel.MultiprocessingError:
logger.error('An error occurred with the multiprocessing framework')
traceback.print_exception(*sys.exc_info())
sys.exit(1)
except mod_parallel.DyingOnError:
logger.error('An error occurred and all errors are fatal')
sys.exit(1)
# - Write Selected parameters in a txt file
timestop = datetime.datetime.now()
timestop = timestop.strftime('%Y%m%dT%H%M%SZ')
timestart = timestart.strftime('%Y%m%dT%H%M%SZ')
rw_data.write_params(p, os.path.join(p.outdatadir,
'skim_simulator.output'))
if ok is True:
if p.progress_bar is True:
__ = mod_tools.update_progress(1, 'All passes have been processed',
'')
else:
__ = logger.info('All passes have been processed')
logger.info("\n Simulated skim files have been written in "
"{}".format(p.outdatadir))
logger.info(''.join(['-'] * 61))
sys.exit(0)
logger.error('\nERROR: At least one of the outputs was not saved.')
sys.exit(1)
def make_skim_data(_proc_count, jobs, die_on_error, progress_bar):
""" Compute SWOT-like data for all grids and all cycle, """
# - Set up parallelisation parameters
proc_count = min(len(jobs), _proc_count)
status_updater = mod_tools.update_progress_multiproc
jobs_manager = mod_parallel.JobsManager(proc_count,
status_updater,
exc_formatter,
err_formatter)
ok = jobs_manager.submit_jobs(worker_method_skim, jobs, die_on_error,
progress_bar)
if not ok:
# Display errors once the processing is done
jobs_manager.show_errors()
return ok
def exc_formatter(exc):
"""Format exception returned by sys.exc_info() as a string so that it can
be serialized by pickle and stored in the JobsManager."""
error_msg = traceback.format_exception(exc[0], exc[1], exc[2])
return error_msg
def err_formatter(pid, grid, cycle, exc):
"""Transform errors stored by the JobsManager into readable messages."""
msg = None
if cycle < 0:
msg = '/!\ Error occurred while processing grid {}'.format(grid)
else:
_msg = '/!\ Error occurred while processing cycle {} on grid {}'
msg = _msg.format(cycle, grid)
return msg
def worker_method_skim(*args, **kwargs):
msg_queue, sgridfile, p2, listsgridfile = args[:4]
list_file, modelbox, model_data, modeltime, error_generator = args[4:]
p = mod_tools.fromdict(p2)
# Load SKIM grid files (Swath and nadir)
sgrid = run.load_sgrid(sgridfile, p)
# duplicate SKIM grids to assure that data are not modified and are
# saved properly
sgrid_tmp = run.load_sgrid(sgridfile, p)
sgrid.gridfile = sgridfile
# Convert cycle in days
sgrid.cycle /= 86400.
sgrid_tmp.cycle /= 86400.
sgrid_tmp.indi = None
# Select model data around the swath to reduce interpolation cost in
# griddata ### TODO comment to be removed?
# - Generate SKIM like data
# Compute number of cycles needed to cover all nstep model timesteps
rcycle = (p.timestep * p.nstep)/float(sgrid.cycle)
ncycle = int(rcycle)
error = generator.Generator(p)
first_time = datetime.datetime.strptime(p.first_time, '%Y-%m-%dT%H:%M:%SZ')
# Loop on all cycles
for cycle in range(0, ncycle+1):
# if ifile > (p.nstep*p.timestep + 1):
# break
# Create SKIM-like data
msg_queue.put((os.getpid(), sgridfile, cycle + 1, None))
if p.file_input is None:
model_data = []
# Initialize all list of variables (each beam is appended to the
# list)
# Initialize velocity, indices and mask to empty lists
output_var = {}
#for key in p.list_output:
# output_var[key] = []
list_key = list(p.list_input_var.keys())
list_key.append('vindice')
for key in list_key:
output_var[key] = []
# TODO: proof
# sigma0 complusory
# uwnd, vwnd compulsory for Instrument and WaveDoppler
# hs uwnd vwnd mssu mssc mssclose for WaveDoppler
# Loop over the beams to interpolate model data
time_all = []
for i in range(len(p.list_pos) + 1):
sgrid_tmp.lon = sgrid.lon[i]
sgrid_tmp.lat = sgrid.lat[i]
sgrid_tmp.time = sgrid.time[i]
time = sgrid_tmp.time / 86400. + sgrid.cycle * cycle
# Interpolate observation on SKIM swath
create = run.interp_on_swath(cycle, list_file,
modelbox, sgrid_tmp,
model_data, modeltime, p)
output_var_i, time = create
time_all.append(time)
for key in output_var_i.keys():
output_var[key].append(output_var_i[key])
for key, value in output_var.items():
output_var[key] = numpy.transpose(numpy.array(value))
time_arr = numpy.transpose(numpy.array(time_all))
radial_angle = numpy.transpose(sgrid.radial_angle)
ac_angle = numpy.array(sgrid.angle)
radial_angle = numpy.array(sgrid.radial_angle)
output_var.update(run.compute_geov(output_var, ac_angle, radial_angle,
p))
dic_grid = {'lon': sgrid.lon, 'lat': sgrid.lat, 'incl': sgrid.incl,
'angle': sgrid.angle, 'ipass': sgrid.ipass}
#ac_angle m
## TODO remove sgrid.x_al_nadir variable
all_error = error.generate(sgrid.x_al, output_var, sgrid.angle,
time_arr, sgrid.cycle, first_time,
radial_angle,
sgrid.x_al[:, 0], sgrid.x_ac)
# for key, value in output_var.items():
# output_var[key] = numpy.transpose(value)
for k, v in all_error.items():
output_var[k] = v
# Compute correction
p.delta_azim = 15
all_corr = error.correct(dic_grid, output_var, radial_angle,
p.delta_azim)
for k, v in all_corr.items():
output_var[k] = v
# Add error to true interpolated fields
obs = error.generate_obs(output_var, all_error, all_corr)
for k, v in obs.items():
output_var[k] = v
# Save outputs in a netcdf file
if ((~numpy.isnan(numpy.array(output_var['vindice']))).any()
or not p.file_input):
sgrid.ncycle = cycle
run.save_SKIM(cycle, sgrid, time_all, output_var, p)
del time
if p.file_input is not None:
for key in model_data.vlon.keys():
model_data.vlon[key] = (model_data.vlon[key] + 360) % 360
modelbox[0] = (modelbox[0] + 360) % 360
modelbox[1] = (modelbox[1] + 360) % 360
del sgrid
msg_queue.put((os.getpid(), sgridfile, None, None))
| 15,674 | 39.29563 | 79 | py |
skimulator | skimulator-master/skimulator/cli.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import skimulator.mod_tools as mod_tools
import argparse
import logging
logger = logging.getLogger(__name__)
def run_script():
"""Run SKIM Simulator"""
from . import run_simulator
# Setup logging
main_logger = logging.getLogger()
main_logger.handlers = []
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
main_logger.addHandler(handler)
main_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('params_file', nargs='?', type=str, default=None,
help='Path of the parameters file')
parser.add_argument('--die-on-error', action='store_true', default=False,
help='Force simulation to quit on first error')
parser.add_argument('--debug', action='store_true', default=False,
help='Display debug log messages')
args = parser.parse_args()
if args.params_file is None:
logger.error('Please specify a parameter file')
sys.exit(1)
if args.debug is True:
main_logger.setLevel(logging.DEBUG)
file_param = args.params_file
p = mod_tools.load_python_file(file_param)
try:
run_simulator.run_simulator(p, args.die_on_error)
except KeyboardInterrupt:
logger.error('\nInterrupted by user (Ctrl+C)')
sys.exit(1)
sys.exit(0)
def run_l2c():
"""Run L2C reconstruction"""
import skimulator.regridding as regridding
# Setup logging
main_logger = logging.getLogger()
main_logger.handlers = []
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
main_logger.addHandler(handler)
main_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('params_file', nargs='?', type=str, default=None,
help='Path of the parameters file')
parser.add_argument('--die-on-error', action='store_true', default=False,
help='Force simulation to quit on first error')
parser.add_argument('--debug', action='store_true', default=False,
help='Display debug log messages')
args = parser.parse_args()
if args.params_file is None:
logger.error('Please specify a parameter file')
sys.exit(1)
if args.debug is True:
main_logger.setLevel(logging.DEBUG)
file_param = args.params_file
p = mod_tools.load_python_file(file_param)
try:
regridding.run_l2c(p, args.die_on_error)
except KeyboardInterrupt:
logger.error('\nInterrupted by user (Ctrl+C)')
sys.exit(1)
sys.exit(0)
def run_l2d():
"""Run L2D reconstruction"""
import skimulator.regridding_l2d as regridding
# Setup logging
main_logger = logging.getLogger()
main_logger.handlers = []
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
main_logger.addHandler(handler)
main_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('params_file', nargs='?', type=str, default=None,
help='Path of the parameters file')
parser.add_argument('--die-on-error', action='store_true', default=False,
help='Force simulation to quit on first error')
parser.add_argument('--debug', action='store_true', default=False,
help='Display debug log messages')
args = parser.parse_args()
if args.params_file is None:
logger.error('Please specify a parameter file')
sys.exit(1)
if args.debug is True:
main_logger.setLevel(logging.DEBUG)
file_param = args.params_file
p = mod_tools.load_python_file(file_param)
try:
regridding.run_l2d(p, args.die_on_error)
except KeyboardInterrupt:
logger.error('\nInterrupted by user (Ctrl+C)')
sys.exit(1)
sys.exit(0)
def interpolate_l2d():
"""Run L2D reconstruction"""
import skimulator.regridding_l2d as regridding
# Setup logging
main_logger = logging.getLogger()
main_logger.handlers = []
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
main_logger.addHandler(handler)
main_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('params_file', nargs='?', type=str, default=None,
help='Path of the parameters file')
parser.add_argument('--die-on-error', action='store_true', default=False,
help='Force simulation to quit on first error')
parser.add_argument('--debug', action='store_true', default=False,
help='Display debug log messages')
args = parser.parse_args()
if args.params_file is None:
logger.error('Please specify a parameter file')
sys.exit(1)
if args.debug is True:
main_logger.setLevel(logging.DEBUG)
file_param = args.params_file
p = mod_tools.load_python_file(file_param)
try:
regridding.offline_interpolation(p) #, args.die_on_error)
except KeyboardInterrupt:
logger.error('\nInterrupted by user (Ctrl+C)')
sys.exit(1)
sys.exit(0)
| 5,891 | 31.196721 | 77 | py |
skimulator | skimulator-master/skimulator/regridding_l2d.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
'''L2D computation from L2b SKIM like data
'''
import numpy
import os
import sys
import glob
import ctypes
import datetime
import time
import math
import netCDF4
from . import const
from . import rw_data as rw
from . import mod_tools
from . import mod_run as mod
import skimulator.mod_parallel
import logging
import traceback
import multiprocessing
logger = logging.getLogger(__name__)
# Using global variables is discouraged but this one will only be used as a
# simple way to share the observations vector with worker processes (the main
# process writes to this variable, the workers should only read from it)
obs_vector = {}
def build_grid(p) -> dict:
# Domain (spatial grid)
if p.spatial_domain is not None:
modelbox = numpy.array(p.spatial_domain, dtype='float')
# Use convert to 360 data
#modelbox[0] = (modelbox[0] + 360) % 360
#if modelbox[1] != 360:
# modelbox[1] = (modelbox[1] + 360) % 360
else:
logger.error('Please provide modelbox_l2d for L2d reconstruction')
sys.exit(1)
lon0, lon1, lat0, lat1 = modelbox
global_domain = False
if (lon1 - lon0) > 200:
global_domain = True
# Use convert to 360 data
lon0o = (lon0 + 360) % 360
if lon1 != 360:
lon1o = (lon1 + 360) % 360
# modelbox[0] = (modelbox[0] + 360) % 360
# if modelbox[1] != 360:
# modelbox[1] = (modelbox[1] + 360) % 360
dlon, dlat = p.posting_l2d
# #####################################
# Spatial grid construction
# #####################################
grd = make_grid(lon0, lon1, dlon, lat0, lat1, dlat)
dlatlr = dlat
dlonlr = dlon
grd['dlon'] = dlon
grd['dlonlr'] = dlonlr
grd['dlat'] = dlat
grd['dlatlr'] = dlatlr
grd['nxlr'] = int((lon1 - lon0) / dlonlr)
grd['nylr'] = int((lat1 - lat0) / dlatlr)
grd['modelbox'] = (lon0, lon1, lat0, lat1)
return grd
def run_l2d(p, die_on_error=False):
config = p.config
mod_tools.initialize_parameters(p)
resols = p.resol_spatial_l2d # km
resolt = p.resol_temporal_l2d # days
grd = build_grid(p)
lon0, lon1, lat0, lat1 = grd['modelbox']
# Filtering as a function of latitude (10d at Equator, 14 d at 60º)
resolt = (-4 * numpy.cos(numpy.deg2rad(grd['lat'])) + 9) * resolt
# #####################################
# READ L2B OBS
# #####################################
tcycle = p.satcycle
# Time domain
time0, time1, dt = p.time_domain
# TODO change
resoltmax = numpy.max(resolt)
c0 = int((max(time0 - resoltmax, 0)) / tcycle) + 1
c1 = int((time1 + resoltmax) / tcycle) + 1
obs = {}
# c0 = 1 ; c1 = 2
p2 = mod_tools.todict(p)
jobs = []
print(datetime.datetime.now())
for cycle in range(c0, c1 + 1, 1):
_pat = '{}_c{:02d}_p*'.format(p.config, cycle)
pat = os.path.join(p.outdatadir, _pat)
listfile = glob.glob(pat)
# Create specific funtion to retrieve time_unit
if not listfile:
continue
filev = os.path.join(p.outdatadir, listfile[0])
_, _, time_unit = read_l2b(filev)
for pattern in listfile:
jobs.append([p2, pattern, lon0, lon1, lat0, lat1, time0, time1,
resoltmax, grd['dlonlr'], grd['dlatlr']])
ok = False
task = 0
results = []
try:
ok, results = build_obs(p.proc_count, jobs, die_on_error,
p.progress_bar)
except skimulator.mod_parallel.MultiprocessingError:
logger.error('An error occurred with the multiprocessing framework')
traceback.print_exception(*sys.exc_info())
sys.exit(1)
except skimulator.mod_parallel.DyingOnError:
logger.error('An error occurred and all errors are fatal')
sys.exit(1)
print()
print('Aggregating results...', datetime.datetime.now())
for i in range(len(results)):
obs_rjobs = results[i]
#key, ind_key, obs_r = results[i]
for j in range(len(obs_rjobs)):
try:
obs_r = obs_rjobs[j]
except:
obs_r = obs_rjobs
obs_r_keys = list(obs_r.keys())
for key in obs_r_keys:
if key not in obs.keys():
obs[key] = {}
for ind_key in obs_r[key].keys():
if ind_key not in obs[key].keys():
obs[key][ind_key] = []
obs[key][ind_key] = numpy.concatenate((obs[key][ind_key],
obs_r[key][ind_key]))
del(obs_r[key])
del(results)
print('Building shared obs vector...', datetime.datetime.now())
# Populate the shared observations vector
global obs_vector
obs_vector = {}
for key in obs:
obs_vector[key] = {}
obs_ind_keys = list(obs[key].keys())
for ind_key in obs_ind_keys:
_buffer = multiprocessing.RawArray(ctypes.c_float,
numpy.size(obs[key][ind_key]))
narray = numpy.frombuffer(_buffer, dtype='float32')
numpy.copyto(narray, obs[key][ind_key])
obs_vector[key][ind_key] = _buffer
# Release memory from original array to keep the total amount of
# RAM as low as possible
if 'time' != key:
del(obs[key][ind_key]) # release memory as soon as possible
print(datetime.datetime.now())
print('Memory housekeeping...', datetime.datetime.now())
keys = list(obs.keys())
for key in keys:
if 'time' != key:
del(obs[key])
print(datetime.datetime.now())
#import pdb ; pdb.set_trace()
# #####################################
# Independant time loop for each analysis
# #####################################
enstime = numpy.arange(time0, time1 + dt, dt)
time_model = numpy.arange(time0, time1, p.timestep)
# Center at noon
window = dt / 2.
enstime = enstime + window
indice_mask = make_mask(p, 'ucur', grd)
list_key = {'ucur':'ux_true', 'vcur':'uy_true'} #, 'uwnd':'uwnd',
#'vwnd': 'vwnd'}
list_input = {}
for key in list_key:
list_input[key] = p.list_input_var_l2d[key]
for it, timeref in enumerate(enstime):
print(it, timeref)
iobs = {}
for ind_key in obs['time'].keys():
iobs[ind_key] = numpy.where((obs['time'][ind_key] > (timeref
- numpy.max(resoltmax)))
& (obs['time'][ind_key] < (timeref
+ numpy.max(resoltmax))))[0]
make_oi(p, p2, grd, iobs, resols, timeref, resolt, indice_mask,
die_on_error=die_on_error)
# Interpolate model data to have a true reference
#indice_model = it
#print('read model')
#model_data, out_var, list_file = read_model(p, it, p.dim_time,
# list_input)
#print('interpolate model')
#if global_domain is False:
# interpolate_model(p, model_data, out_var, grd, list_key)
pattern = os.path.join(p.outdatadir, '{}_{}_l2d_'.format(config,
p.config_l2d))
save_l2d(pattern, timeref, window, time_unit, grd)
print(datetime.datetime.now())
def offline_interpolation(p):
pattern = os.path.join(p.outdatadir, '{}_{}_l2d_'.format(p.config,
p.config_l2d))
listfile = glob.glob(os.path.join(p.outdatadir, '{}*.nc'.format(pattern)))
str_format = '%Y-%m-%dT%H:%M:%SZ'
list_key = {'ucur':'ux_true', 'vcur':'uy_true' } #, 'uwnd': 'uwnd', 'vwnd':'vwnd',}
list_input = {}
dimtime = 'time'
dimlon = 'lon'
dimlat = 'lat'
longname = { "ux_noerr": "Error-free zonal velocity",
"uy_noerr": "Error-free meridional velocity",
"ux_obs": "Observed zonal velocity",
"uy_obs": "Observed meridional velocity",
"ux_model": "Error-free zonal velocity",
"uy_model": "Error-free meridional velocity",
"ux_true": "True zonal velocity",
"uy_true": "True meridional velocity",
"uwnd": "Eastward wind",
"vwnd": "Northward wind",
"uuss": "Eastward Stockes drift",
"vuss": "Northward Stockes drift",
}
unit = {"ux_noerr": "m/s", "ux_obs": "m/s",
"uy_noerr": "m/s", "uy_obs": "m/s",
"ux_true": "m/s", "uy_true": "m/s",
"uwnd": "m/s", "vwnd": "m/s",
"uuss": "m/s", "vuss": "m/s",
}
for key in list_key:
list_input[key] = p.list_input_var_l2d[key]
for ifile in listfile:
fid = netCDF4.Dataset(ifile, 'a')
start_date = datetime.datetime.strptime(fid.time_coverage_start,
str_format)
end_date = datetime.datetime.strptime(fid.time_coverage_end,
str_format)
model_start = datetime.datetime.strptime(p.first_time, str_format)
it = -(model_start - start_date).days
print('read l2d coordinates')
grd = {}
grd['lon'] = fid.variables['lon'][:]
grd['lat'] = fid.variables['lat'][:]
grd['lon2'], grd['lat2'] = numpy.meshgrid(grd['lon'], grd['lat'])
print('read model')
print(start_date, model_start)
model_data, out_var, list_file = read_model(p, it, p.dim_time,
list_input)
print('interpolate model')
interpolate_model(p, model_data, out_var, grd, list_key)
list_var = ('ux_true', 'uy_true')
for key in list_var:
nvar = '{}'.format(key)
var = fid.createVariable(nvar, 'f4', (dimtime, dimlat, dimlon),
fill_value=-1.36e9)
value = grd[key]
try:
var.units = unit[str(key)]
except:
var.units = ''
try:
var.long_name = longname[str(key)]
except:
var.long_name = str(key)
if value.any():
mask = numpy.isnan(value)
value[numpy.where(mask)] = -1.36e9
mask_ind = numpy.where(value < -1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value > 1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value == numpy.PINF)
value[mask_ind] = -1.36e9
var[0, :, :] = value
def worker_build_obs(*args, **kwargs):
msg_queue, p2, pattern = args[:3]
p = mod_tools.fromdict(p2)
lon0, lon1, lat0, lat1, time0, time1, resolt, dlonlr, dlatlr = args[3:12]
res_queue = args[-1]
filev = os.path.join(p.outdatadir, pattern)
obs = {}
if os.path.isfile(filev):
obs_i, mask_data, time_unit = read_l2b(filev)
_lon = numpy.mod(obs_i['lon'] + 180, 360) - 180
# if lon1o < lon0o:
# mask_lon = ((obs_i['lon'] < lon0o) & (obs_i['lon'] > lon1o))
# else:
# mask_lon = ((obs_i['lon'] > lon0o) & (obs_i['lon'] < lon1o))
if lon1 < lon0:
mask_lon = ((_lon < lon0) & (_lon > lon1))
else:
mask_lon = ((_lon > lon0) & (_lon < lon1))
mask_lat = ((obs_i['lat'] > lat0) & (obs_i['lat'] < lat1))
mask_time = ((obs_i['time'] > time0 - resolt)
& (obs_i['time'] < time1 + resolt))
mask = (mask_time & mask_lat & mask_lon & mask_data)
#if not numpy.any(mask):
# msg_queue.put((os.getpid(), filev, None, None))
# return None
for key in obs_i.keys():
obs_i[key] = obs_i[key][mask]
_lon = _lon[mask]
ind_i = numpy.floor((obs_i['lat'] - lat0) / dlatlr)
ind_j = numpy.floor(numpy.mod(_lon - lon0, 360) / dlonlr)
unique_i = list(set(ind_i))
unique_j = list(set(ind_j))
for key in obs_i.keys():
if key not in obs.keys():
obs[key] = {}
for i in unique_i:
for j in unique_j:
mask_ind = numpy.where((ind_i==i) & (ind_j == j))
if mask_ind[0].any():
ind_key = 10000 * int(i) + int(j) # '{}_{}'.format(int(i), int(j))
for key in obs_i.keys():
if ind_key not in obs[key].keys():
obs[key][ind_key] = []
obs[key][ind_key] = numpy.concatenate((obs[key][ind_key],
obs_i[key][mask_ind]))
res_queue.put(obs) # Pass result to parent method
msg_queue.put((os.getpid(), filev, None, None))
def build_obs(_proc_count, jobs, die_on_error, progress_bar):
""" Compute SWOT-like data for all grids and all cycle, """
# - Set up parallelisation parameters
proc_count = min(len(jobs), _proc_count)
status_updater = mod_tools.update_progress_multiproc
jobs_manager = skimulator.mod_parallel.JobsManager(proc_count,
status_updater,
exc_formatter,
err_formatter)
# Include the results queue as last argument for each job
for j in jobs:
j.append(jobs_manager.res_queue)
res = []
ok = jobs_manager.submit_jobs(worker_build_obs, jobs, die_on_error,
progress_bar, results=res.append)
if not ok:
# Display errors once the processing is done
jobs_manager.show_errors()
return ok, res
def save_l2d(filenc, timeref, window, time_unit, grd):
dateformat = '%Y-%m-%dT%H:%M:%SZ'
datewformat = '%Y%m%dT%H%M%S'
time_std = netCDF4.num2date(timeref - window, time_unit)
time_start = time_std.strftime(format=dateformat)
time_std = netCDF4.num2date(timeref + window, time_unit)
time_end = time_std.strftime(format=dateformat)
time_std = netCDF4.num2date(timeref, time_unit)
time_middle = time_std.strftime(format=datewformat)
pattern = '{}{}.nc'.format(filenc, time_middle)
grd['time'] = timeref
metadata = {}
metadata['first_time'] = time_unit
metadata['file'] = pattern
metadata['time_coverage_start'] = time_start
metadata['time_coverage_end'] = time_end
metadata['file'] = pattern
if os.path.exists(pattern):
os.remove(pattern)
if 'uy_true' in grd.keys():
rw.write_l2d(metadata, grd, ux_noerr=grd['ux_noerr'],
uy_noerr=grd['uy_noerr'], ux_obs=grd['ux_obs'],
uy_obs=grd['uy_obs'], ux_true=grd['ux_true'],
uy_true=grd['uy_true'])
else:
rw.write_l2d(metadata, grd, ux_noerr=grd['ux_noerr'],
uy_noerr=grd['uy_noerr'], ux_obs=grd['ux_obs'],
uy_obs=grd['uy_obs']) #, ux_true=grd['ux_true'],
#uy_true=grd['uy_true'])
def read_l2b(nfile, model_nan=0):
fid = netCDF4.Dataset(nfile, 'r')
obs_i = {}
obs_i['ux'] = numpy.ma.array(fid.variables['ucur'][:]).flatten()
obs_i['uy'] = numpy.ma.array(fid.variables['vcur'][:]).flatten()
if 'ur_true' in fid.variables.keys():
obs_i['ur_true'] = numpy.ma.array(fid.variables['ur_true'][:]).flatten()
if 'ur_obs' in fid.variables.keys():
obs_i['ur_obs'] = numpy.ma.array(fid.variables['ur_obs'][:]).flatten()
if 'ussr' in fid.variables.keys():
obs_i['ussr'] = numpy.ma.array(fid.variables['ussr'][:]).flatten()
if 'ussr_est' in fid.variables.keys():
obs_i['ussr_est'] = numpy.ma.array(fid.variables['ussr_est'][:]).flatten()
if 'instr' in fid.variables.keys():
obs_i['instr'] = numpy.ma.array(fid.variables['instr'][:]).flatten()
if 'uwnd' in fid.variables.keys():
vwnd = numpy.ma.array(fid.variables['vwnd'][:]).flatten()
uwnd = numpy.ma.array(fid.variables['uwnd'][:]).flatten()
nwnd = numpy.sqrt(uwnd**2 + vwnd**2)
if 'wd' in fid.variables.keys():
obs_i['wdre'] = numpy.ma.array(fid.variables['wd'][:]).flatten()
if 'wd_est' in fid.variables.keys():
obs_i['wdre'] -= numpy.ma.array(fid.variables['wd_est'][:]).flatten()
obs_i['lon'] = numpy.ma.array(fid.variables['lon'][:]).flatten()
#obs_i['lon'] = numpy.mod(obs_i['lon'] -180, 360) + 180
obs_i['lon'] = numpy.mod(obs_i['lon'] + 360, 360)
obs_i['lat'] = numpy.ma.array(fid.variables['lat'][:]).flatten()
obs_i['time'] = numpy.ma.array(fid.variables['time'][:]).flatten()
angle = numpy.ma.array(fid.variables['radial_angle'][:]).flatten()
obs_i['angle'] = numpy.mod(angle, 2 * numpy.pi)
mask_invalid = (numpy.ma.getmaskarray(obs_i['ux'])
| numpy.ma.getmaskarray(obs_i['uy'])
| numpy.ma.getmaskarray(obs_i['ur_true'])
| numpy.ma.getmaskarray(obs_i['ur_obs']))
mask_invalid = (mask_invalid | (obs_i['ux']==model_nan)
| (obs_i['uy']==model_nan)
| (obs_i['ur_true']==model_nan)
| (obs_i['ur_obs']==model_nan))
if 'uwnd' in fid.variables.keys():
mask_invalid = (mask_invalid | (nwnd < 4))
if 'wd' in fid.variables.keys():
mask_invalid = (mask_invalid | (abs(obs_i['wdre'])>0.5))
mask_data = ~(mask_invalid)
time_unit = fid.variables['time'].units
fid.close()
return obs_i, mask_data, time_unit
def make_grid(lon0, lon1, dlon, lat0, lat1, dlat):
grd = {}
grd['lon'] = numpy.arange(lon0, lon1 + dlon, dlon)
grd['lon'] = numpy.mod(grd['lon'] + 360, 360) - 360
grd['lat'] = numpy.arange(lat0, lat1 + dlat, dlat)
grd['lon'] = numpy.mod(grd['lon'] +360, 360)
grd['lon2'], grd['lat2'] = numpy.meshgrid(grd['lon'], grd['lat'])
grd['nx'] = len(grd['lon'])
grd['ny'] = len(grd['lat'])
return grd
def make_oi(p, p2, grd, iobs, resols, timeref, resolt, index,
die_on_error=False):
indices = [(j, i) for j, i in zip(index[0], index[1])]
# Split the list of (j, i) couples into chunks that the workers will
# process in parallel. The method to decide how many chunks are required is
# purely empirical
chunk_size = int(len(indices) / (3 * p.proc_count)) # 3 is a magic number
chunk_size = max(chunk_size, 1) # avoid chunk_size = 0 due to rounding
ind_count = math.ceil(len(indices) / chunk_size)
jobs = []
for k in range(0, ind_count):
jobs.append([p2, grd, iobs, resols, timeref, resolt, index,
indices[k * chunk_size :(k + 1) * chunk_size]])
ok = False
try:
ok = par_make_oi(grd, p.proc_count, jobs, die_on_error, p.progress_bar)
except skimulator.mod_parallel.MultiprocessingError:
logger.error('An error occurred with the multiprocessing framework')
traceback.print_exception(*sys.exc_info())
sys.exit(1)
except skimulator.mod_parallel.DyingOnError:
logger.error('An error occurred and all errors are fatal')
sys.exit(1)
def init_oi_worker(obs_dict):
"""Initialize the obs_vector global variable with the value passed by the
parent process.
obs_vector is not modified by the worker, so sharing this object between
the main process and the workers should not consume additional memory due
to Copy On Write fork/spawn behavior (at least on Linux)."""
global obs_vector
obs_vector = obs_dict
def par_make_oi(grd, _proc_count, jobs, die_on_error, progress_bar):
""" Compute SWOT-like data for all grids and all cycle, """
global obs_vector
# - Set up parallelisation parameters
proc_count = min(len(jobs), _proc_count)
status_updater = mod_tools.update_progress_multiproc
jobs_manager = skimulator.mod_parallel.JobsManager(proc_count,
status_updater,
exc_formatter,
err_formatter,
init_oi_worker,
(obs_vector,))
for j in jobs:
j.append(jobs_manager.res_queue)
jobs_res = []
ok = jobs_manager.submit_jobs(worker_make_oi, jobs, die_on_error,
progress_bar, results=jobs_res.append)
print()
print('par_make_oi complete', datetime.datetime.now())
if not ok:
# Display errors once the processing is done
jobs_manager.show_errors()
# Reconstruct results grids
grd['ux_noerr'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
grd['uy_noerr'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
grd['ux_obs'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
grd['uy_obs'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
grd['ux_uss'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
grd['uy_uss'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
#grd['ux_'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
#grd['uy_uwd'] = numpy.full((grd['ny'], grd['nx']), numpy.nan)
for k in range(0, len(jobs_res)):
for res in jobs_res[k]:
j, i, ux_noerr, uy_noerr, ux_obs, uy_obs = res
grd['ux_noerr'][j, i] = ux_noerr
grd['uy_noerr'][j, i] = uy_noerr
grd['ux_obs'][j, i] = ux_obs
grd['uy_obs'][j, i] = uy_obs
del(jobs_res)
return ok
def worker_make_oi(*args, **kwargs):
msg_queue = args[0]
p2, grd, iobs, resols, timeref, resolt, index, indices = args[1:9]
res_queue = args[-1]
p = mod_tools.fromdict(p2)
global obs_vector
jobs_res = []
for j, i in indices:
obs = {}
ni = 1
nj = 1
dlat = grd['dlat']
dlatlr = grd['dlatlr']
dlon = grd['dlon']
dlonlr = grd['dlonlr']
resoltij = resolt[j]
# Filtering as a function of latitude (90km at Eqautor, 40 km at 60º)
resolsij = (100 * numpy.cos(numpy.deg2rad(grd['lat'][j])) - 10) * resols
jlr = int(numpy.floor(j*dlat/dlatlr))
ilr = int(numpy.floor(i*dlon/dlonlr))
for jx in range(max(0, jlr-nj), min(jlr + nj + 1, grd['nylr'])):
for jy in range(max(0, ilr-ni), min(ilr + ni + 1, grd['nxlr'])):
# ind_key = '{}_{}'.format(jx, jy)
ind_key = 10000 * int(jx) + int(jy) # '{}_{}'.format(int(i), int(j))
if ind_key not in iobs.keys():
continue
if ~iobs[ind_key].any():
continue
for key in obs_vector.keys():
if key not in obs.keys():
obs[key] = []
# Rebuild numpy array from shared memory (no copy)
_buffer = obs_vector[key][ind_key]
narray = numpy.frombuffer(_buffer, dtype='float32')
_obs = narray[numpy.array(iobs[ind_key], dtype='int')]
obs[key] = numpy.concatenate(([obs[key], _obs]))
# TODO: to be optimized, especially for global, ...
flat_ind = j + i*grd['nylr']
if 'lon' not in obs.keys():
continue
# Handle IDL and Greenwich line
_lonobs = numpy.mod(obs['lon'] + 360, 360)
_longrd = numpy.mod(grd['lon2'][j, i] + 360, 360)
dlon = 180 - abs(abs(_lonobs - _longrd) - 180)
# dist = 110. * (numpy.cos(numpy.deg2rad(grd['lat'][j]))**2 * (dlon)**2
# + (obs['lat'] - grd['lat2'][j, i])**2)**0.5
fac = 1
if abs(grd['lat'][j]) < 10:
fac = fac + (10 - abs(grd['lat'][j])) / 8
dist = 110. * (1 / fac*numpy.cos(numpy.deg2rad(grd['lat'][j]))**2 * (dlon)**2
+ fac * (obs['lat'] - grd['lat2'][j, i])**2)**0.5
iiobs=numpy.where((dist < resolsij))[0]
if len(iiobs)>=2:
H = numpy.zeros((len(iiobs), 2))
H[:, 0] = numpy.cos(obs['angle'][iiobs])
H[:, 1] = numpy.sin(obs['angle'][iiobs])
win_s = numpy.exp(-dist[iiobs]**2/(0.5*resolsij)**2)
time_cen = obs['time'][iiobs] - timeref
win_t = numpy.exp(-time_cen**2/(0.5 * resoltij)**2) # exp window
Ri = win_s * win_t
RiH = numpy.tile(Ri, (2, 1)).T*H
M = numpy.dot(H.T, RiH)
if numpy.linalg.cond(M) < 1e3:
Mi = numpy.linalg.inv(M)
eta_true = numpy.dot(numpy.dot(Mi, RiH.T),
obs['ur_true'][iiobs])
eta_obs = numpy.dot(numpy.dot(Mi, RiH.T),
obs['ur_obs'][iiobs])
jobs_res.append((j, i, eta_true[0], eta_true[1], eta_obs[0],
eta_obs[1]))
# Pass results for all the processed (j, i) to the parent process
res_queue.put(jobs_res)
# Notify parent process about job completion
msg_queue.put((os.getpid(), j, i, None))
def read_model(p, ifile, dim_time, list_input):
model_data, list_file = mod.load_coordinate_model(p)
model_data.read_coordinates(p)
nfile = int(ifile)
print(nfile)
filename = os.path.join(p.indatadir, list_file[nfile])
print(filename)
model_step_ctor = getattr(rw, p.model)
out_var = {}
for i in range(dim_time):
model_step = model_step_ctor(p, ifile=(filename, ),
list_input_var=list_input,
time=i)
model_step.read_var(p)
for key in list_input.keys():
if not key in out_var.keys():
out_var[key] = model_step.input_var[key] / dim_time
else:
out_var[key] = (out_var[key]
+ model_step.input_var[key] / dim_time)
return model_data, out_var, list_file
def make_mask(p, key, grid):
""" Return indices of points on the ocean (non masked value in the model)
"""
list_input = {key: p.list_input_var_l2d[key]}
model_data, out_var, list_file = read_model(p, 0, 1, list_input)
mask_ucur = numpy.ma.getmaskarray(out_var['ucur'])
mask_ucur = (mask_ucur | numpy.isnan(out_var['ucur']))
out_var['ucur'] = numpy.ma.array(out_var['ucur'], mask=mask_ucur)
list_key = {'ucur':'mask'}
interpolate_model(p, model_data, out_var, grid, list_key)
mask_index = numpy.where(~numpy.ma.getmaskarray(grid['mask']))
# & (grid['mask'] != 0)
# & (~numpy.isnan(grid['mask'])))
# TODO TO proof if mask_index is empty
return mask_index
def interpolate_model(p, model_data, model_var, grd, list_key):
import pyresample as pr
wrap_lon = pr.utils.wrap_longitudes
geom = pr.geometry.SwathDefinition
interp = mod.interpolate_irregular_pyresample
lon = wrap_lon(grd['lon2'])
for ikey, okey in list_key.items():
if len(p.list_input_var[ikey]) > 2:
grid_number = p.list_input_var[ikey][2]
else:
grid_number = 0
_lon = wrap_lon(model_data.vlon[grid_number])
_lat = model_data.vlat[grid_number]
if len(numpy.shape(_lon)) <= 1:
_lon, _lat = numpy.meshgrid(_lon, _lat)
swath_def = geom(lons=_lon, lats=_lat)
grid_def = geom(lons=lon, lats=grd['lat2'])
var = model_var[ikey]
grd[okey] = interp(swath_def, var, grid_def, p.resol,
interp_type=p.interpolation)
grd[okey][grd[okey] == p.model_nan] = numpy.nan
return grd
def exc_formatter(exc):
"""Format exception returned by sys.exc_info() as a string
so that it can
be serialized by pickle and stored in the JobsManager."""
error_msg = traceback.format_exception(exc[0], exc[1], exc[2])
return error_msg
def err_formatter(pid, it, cycle, exc):
"""Transform errors stored by the JobsManager into readable messages."""
msg = '/!\ Error occurred while processing it {}'.format(it)
#if cycle < 0:
# msg = '/!\ Error occurred while processing grid'
#else:
# _msg = '/!\ Error occurred while processing cycle {}'
# msg = _msg.format(cycle)
return msg
| 29,349 | 38.715832 | 87 | py |
skimulator | skimulator-master/skimulator/regridding.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
'''L2C computation from L2b SKIM like data
'''
import numpy
import os
import sys
import glob
import scipy.interpolate
import datetime
import time
import skimulator.const as const
import skimulator.rw_data as rw
import skimulator.mod_tools as mod_tools
import skimulator.mod_run as mod
import skimulator.mod_parallel
import logging
import traceback
logger = logging.getLogger(__name__)
def make_obs(p, data, grid, obs):
#obs['vrt'] = obs['vrt'][ind]
ind = obs['ind']
obs['vxt'] = numpy.squeeze(numpy.array(data['ucur']).flatten())[ind]
obs['vyt'] = numpy.squeeze(numpy.array(data['vcur']).flatten())[ind]
obs['vmodr'] = numpy.squeeze(numpy.array(data['ur_true']).flatten())[ind]
if 'Instrument' in p.noise:
obs['instr'] = numpy.squeeze(numpy.array(data['instr']).flatten())[ind]
if 'WaveDoppler' in p.noise:
if 'wd_est' in data.keys():
obs['wdre'] = numpy.squeeze((numpy.array(data['wd'])
- numpy.array(data['wd_est'])).flatten())[ind]
else:
obs['wdre'] = numpy.squeeze(numpy.array(data['wd']).flatten())[ind]
obs['lon'] = numpy.squeeze(numpy.array(data['lon']).flatten())[ind]
obs['lat'] = numpy.squeeze(numpy.array(data['lat']).flatten())[ind]
obs['lon_nadir'] = numpy.squeeze(numpy.array(data['lon_nadir'][:]))
obs['lat_nadir'] = numpy.squeeze(numpy.array(data['lat_nadir'][:]))
obs['time'] = numpy.squeeze(numpy.array(data['time']).flatten())[ind]
# + (cycle-1)*self.tcycle
obs['time_nadir'] = numpy.array(data['time_nadir'][:])
obs['vindice'] = numpy.squeeze(numpy.array(data['vindice']).flatten())[ind]
obs['dir'] = numpy.squeeze(numpy.mod(grid.angle + numpy.pi/2,
2*numpy.pi).flatten())[ind]
obs['angle'] = numpy.squeeze(numpy.mod(grid.radial_angle,
2*numpy.pi).flatten())[ind]
obs['al_nadir'] = grid.x_al_nadir
if p.type_satellite == 'stream':
obs['al'] = (numpy.array(grid.x_al) + obs['al_nadir'])
# + numpy.tile(obs['al_nadir'], (obs['nbeam'], 1)).transpose())
else:
obs['al'] = (numpy.array(grid.x_al) + numpy.tile(obs['al_nadir'],
(obs['nbeam'], 1)).transpose())
obs['al'] = numpy.squeeze(obs['al'].flatten())[ind]
obs['ac'] = numpy.squeeze(numpy.array(grid.x_ac).flatten())[ind]
return obs
def across_track(lon, lat, posting, max_ac, desc=False):
npoints = 1
nind = len(lon)
SatDir = numpy.zeros((int(nind/npoints), 3))
SatLoc = numpy.zeros((int((nind)/npoints), 3))
s2cart = mod_tools.spher2cart(lon[::npoints], lat[::npoints])
SatLoc[:, 0], SatLoc[:, 1], SatLoc[:, 2] = s2cart
# Compute satellite direction (SatLoc is periodic)
SatDir[1: -1, 0] = ((SatLoc[2:, 0] - SatLoc[: -2, 0])
/ numpy.sqrt(SatLoc[1: -1, 0]**2
+ SatLoc[1: -1, 1]**2 + SatLoc[1: -1, 2]**2))
SatDir[1: -1, 1] = ((SatLoc[2:, 1] - SatLoc[: -2, 1])
/ numpy.sqrt(SatLoc[1: -1, 0]**2
+ SatLoc[1: -1, 1]**2 + SatLoc[1: -1, 2]**2))
SatDir[1: -1, 2] = ((SatLoc[2:, 2] - SatLoc[: -2, 2])
/ numpy.sqrt(SatLoc[1: -1, 0]**2
+ SatLoc[1: -1, 1]**2 + SatLoc[1: -1, 2]**2))
SatDir[-1, :] = SatDir[-2, :]
SatDir[0, :] = SatDir[1, :]
# Rotate from earth center around satellite direction to compute
# swath points of angles between the borders of the swath in left
# and right swath
nhalfswath = int(max_ac/posting)
new_lon = numpy.zeros((nind, 2*nhalfswath+1))
new_lat = numpy.zeros((nind, 2*nhalfswath+1))
ac = numpy.zeros((2*nhalfswath+1))
for i in range(0, nind, npoints):
for j in range(0, int(nhalfswath)+1):
#j = int(nhalfswath) - j
ac[nhalfswath + j] = j*posting
ac[nhalfswath - j] = -j*posting
R = mod_tools.rotationmat3D(float((j*posting)
/ (const.Rearth*10**-3)),
SatDir[int(i/npoints), :])
ObsLoc = numpy.dot(R, SatLoc[int(i/npoints)])
cs = mod_tools.cart2spher(ObsLoc[0], ObsLoc[1], ObsLoc[2])
new_lon[i, nhalfswath+j], new_lat[i, nhalfswath+j] = cs
ObsLoc = numpy.dot(numpy.transpose(R), SatLoc[int(i/npoints)])
cs = mod_tools.cart2spher(ObsLoc[0], ObsLoc[1], ObsLoc[2])
new_lon[i, nhalfswath-j], new_lat[i, nhalfswath-j] = cs
new_lon = new_lon[:, ::-1]
new_lat = new_lat[:, ::-1]
return new_lon, new_lat, ac
def make_grid(grid, obs, posting, desc=False):
grd = {}
if obs['ac'].max() < 0:
return None
max_ac = abs(obs['ac']).max()
# OI grid
grd['dal'] = posting
# if desc is True:
# ind = numpy.where(obs['al_nadir'] > obs['al'].min())
# obs['al_nadir'] = obs['al_nadir'][ind]
grd['al'] = numpy.arange(obs['al_nadir'].min(), obs['al_nadir'].max(),
grd['dal'])
#grd['lon'] = scipy.interpolate.griddata(_alac, obs['lon'], _inalac,
# method='linear')
#grd['lat'] = scipy.interpolate.griddata(_alac, obs['lat'], _inalac,
# method='linear')
lon360 = numpy.mod(obs['lon_nadir'] + 180, 360) - 180
lon = scipy.interpolate.griddata(obs['al_nadir'], obs['lon_nadir'],
(grd['al']), method='linear')
lat = scipy.interpolate.griddata(obs['al_nadir'], obs['lat_nadir'],
(grd['al']), method='linear')
grd['lon'], grd['lat'], grd['ac'] = across_track(lon, lat, posting,
max_ac + grd['dal'],
desc=desc)
grd['ac2'], grd['al2'] = numpy.meshgrid(grd['ac'], grd['al'])
grd['nal'], grd['nac'] = numpy.shape(grd['ac2'])
grd['time'] = scipy.interpolate.griddata(obs['al_nadir'], obs['time_nadir'],
(grd['al']), method='linear')
grd['time_nadir'] = obs['time_nadir']
grd['angle'] = numpy.full((grd['nal'], grd['nac']), numpy.nan)
grd['angle'][:, :-1] = numpy.angle((grd['lon'][:, 1:] - grd['lon'][:, :-1])
* (numpy.cos(numpy.deg2rad(grd['lat'][:, 1:])))
+ 1j * (grd['lat'][:, 1:] - grd['lat'][:, :-1]))
grd['angle'][:, -1]= grd['angle'][:, -2]
#grd['vobsal'] = numpy.full((grd['nal'], grd['nac']), numpy.nan)
#grd['vobsac'] = numpy.full((grd['nal'], grd['nac']), numpy.nan)
#grd['vmodal'] = numpy.full((grd['nal'], grd['nac']), numpy.nan)
#grd['vmodac'] = numpy.full((grd['nal'], grd['nac']), numpy.nan)
return grd
def perform_oi_1(grd, obs, resol, desc=False):
obsal = numpy.full((grd['nal'], grd['nac']), numpy.nan)
obsac = numpy.full((grd['nal'], grd['nac']), numpy.nan)
for j in range(grd['nac']):
for i in range(grd['nal']):
dist = numpy.sqrt((obs['ac'] - grd['ac2'][i, j])**2
+ (obs['al'] - grd['al2'][i, j])**2)
ios = numpy.where((dist < resol))[0]
if len(ios) >= 4:
H = numpy.zeros((len(ios), 2))
H[:, 0] = numpy.cos(obs['dir'][ios])
H[:, 1] = numpy.sin(obs['dir'][ios])
std_err = numpy.ones((len(ios)))
# rectangular filtering window for now...
# Ri = std_err**-2
Ri=numpy.exp(-dist[ios]**2/(0.5*resol)**2) # exp window
RiH = numpy.tile(Ri, (2, 1)).T * H
M = numpy.dot(H.T, RiH)
Mi = numpy.linalg.inv(M)
eta_obs = numpy.dot(numpy.dot(Mi, RiH.T), obs['vobsr'][ios])
#eta_mod = numpy.dot(numpy.dot(Mi, RiH.T), obs['vmodr'][ios])
j2 = j
#if desc is True:
# j2 = grd['nac'] - 1 - j
obsal[i, j2]=eta_obs[0]
obsac[i, j2]=eta_obs[1]
#grd['vmodal'][i, j2]=eta_mod[0]
#grd['vmodac'][i, j2]=eta_mod[1]
return obsal, obsac
"""
def perform_oi_2(grd, obs, resol):
# - In parameter file ## TODO -
# Number of pixel (resolution for healpix)
nside = 256
# Number of diamonds for healpix
ndiam = 12
ntotpixel = nside * nside * ndiam
# Conditionning threshold
thresh_cond = 10
ph = 2 * numpy.pi - numpy.deg2rad(lon)
th = numpy.pi / 2 - numpy.deg2rad(lat)
pidx = heal.ang2pix(nside, th, ph)
for i in range(nbeam):
for j in range(ndata):
if ur[j, i] > -1E9:
ip = pidx[j,i]
# compute imulated model
im[ip, 1] += u[j, i]
im[ip, 2] += v[j,i]
nim[ip] += 1
# compute covariance(s) model
co = numpy.cos(rangle[j,i])
si = numpy.sin(rangle[j,i])
w = ww[j,i]
cov[ip, 0, 0] += co * co
cov[ip, 1, 0] += si * co
cov[ip, 0, 1] += si * co
cov[ip, 1, 1] += si * si
cov2[ip, 0, 0] += w * co * co
cov2[ip, 1, 0] += w * si * co
cov2[ip, 0, 1] += w * si * co
cov2[ip, 1, 1] += w * si * si
# compute data vector model
vec[ip, 0] += co * ur[j,i]
vec[ip, 1] += si * ur[j,i]
# compute data noise vector model
vec2[ip, 0] += w* co * uro[j,i]
vec2[ip, 1] += w * si * uro[j,i]
# compute doppler projection
for k in range(3):
vecdop[k, ip, 0] += w * co * tdop[j,i,k]
vecdop[k, ip, 1] += w * si * tdop[j,i,k]
"""
def read_model(p, indice):
model_data, list_file = mod.load_coordinate_model(p)
model_data.read_coordinates(p)
list_model_step = []
for ifile in indice:
nfile = int(ifile /p.dim_time)
filetime = ifile - nfile * p.dim_time
_tmpfilename = list_file[nfile]
filename = os.path.join(p.indatadir, _tmpfilename)
model_step_ctor = getattr(rw, p.model)
model_step = model_step_ctor(p, ifile=(filename, ),
list_input_var=p.list_input_var,
time=filetime)
model_step.read_var(p)
list_model_step.append(model_step)
return model_data, list_model_step, list_file
def interpolate_model(p, model_data, list_model_step, grd, list_obs, list_key,
desc=False):
import pyresample as pr
wrap_lon = pr.utils.wrap_longitudes
geom = pr.geometry.SwathDefinition
interp = mod.interpolate_irregular_pyresample
lon = wrap_lon(grd['lon'])
# list_key = {'ucur':'u_model', 'vcur':'v_model'}
for ikey, okey in list_key.items():
grd[okey] = numpy.full(numpy.shape(grd['lat']), numpy.nan)
if ikey not in p.list_input_var.keys():
continue
if len(p.list_input_var[ikey]) > 2:
grid_number = p.list_input_var[ikey][2]
else:
grid_number = 0
_lon = wrap_lon(model_data.vlon[grid_number])
_lat = model_data.vlat[grid_number]
if len(numpy.shape(_lon)) <= 1:
_lon, _lat = numpy.meshgrid(_lon, _lat)
swath_def = geom(lons=_lon, lats=_lat)
for i in range(len(list_model_step)):
model_step = list_model_step[i]
if desc is False:
ind_lat = numpy.where(grd['lat']> list_obs[i])
else:
ind_lat = numpy.where(grd['lat']< list_obs[i])
model_step.read_var(p)
grid_def = geom(lons=lon[ind_lat], lats=grd['lat'][ind_lat])
#grid_def = pr.geometry.SwathDefinition(lons=lon, #[ind_lat],
# lats=grd['lat']) #[ind_lat])
var = model_step.input_var[ikey]
_tmp = interp(swath_def, var, grid_def, 4*p.resol,
interp_type=p.interpolation)
grd[okey][ind_lat] = _tmp
return grd
def write_l2(outfile, grd, obs, cycle, passn, firsttime):
if os.path.exists(outfile):
os.remove(outfile)
metadata = {}
metadata['file'] = outfile
dateformat = '%Y-%m-%dT%H:%M:%SZ'
time_model = datetime.datetime.strptime(firsttime, '%Y-%m-%dT%H:%M:%SZ')
grdtime0 = numpy.nanmin(grd['time_nadir'])
grdtime1 = numpy.nanmax(grd['time_nadir'])
if numpy.isnan(grdtime0):
grdtime0 = 0
if numpy.isnan(grdtime1):
grdtime1 = 0
day = numpy.floor(grdtime0)
seconds = (grdtime0 - day) * 86400
time0 = time_model + datetime.timedelta(day, seconds)
day = numpy.floor(grdtime1)
seconds = (grdtime1 - day) * 86400
time1 = time_model + datetime.timedelta(day, seconds)
metadata['time_coverage_start'] = time0.strftime(format=dateformat)
metadata['time_coverage_end'] = time1.strftime(format=dateformat)
metadata['cycle'] = cycle
metadata['pass'] = passn
metadata['first_time'] = firsttime
#geolocation = {}
#geolocation['lon']
rw.write_l2c(metadata, grd, u_ac_obs=obs['ur_obs']['ac'],
u_al_obs=obs['ur_obs']['al'],
u_ac_noerr=obs['ur_true']['ac'], u_al_noerr=obs['ur_true']['al'],
angle=grd['angle'], ux_obs=obs['ur_obs']['x'],
uy_obs=obs['ur_obs']['y'],
ux_noerr=obs['ur_true']['x'], uy_noerr=obs['ur_true']['y'],
ux_true=grd['u_model'], uy_true=grd['v_model'],
u_ac_true=grd['vtrueac'], u_al_true=grd['vtrueal'],
u_ac_instr=obs['instr']['ac'], u_al_instr=obs['instr']['al'],
#u_ac_wdrem=grd['uwdreac'], u_al_wdrem=grd['uwdreal'],
#u_ac_uss_obs=grd['ussobsac'], u_al_uss_obs=grd['ussobsal'],
#u_ac_uss_oi=grd['ussoiac'], u_al_uss_oi=grd['ussoial'],
#u_ac_uss_true=grd['usstrueac'], u_al_uss_true=grd['usstrueal'],
uwnd=grd['uwnd'], vwnd=grd['vwnd'], rain=grd['rain'],
# mssu=grd['mssu'], mssc=grd['mssc'],
#u_ac_wd=obs['wd']['ac'], u_al_wd=obs['wd']['al'],
#u_ac_dsigma=grd['dsigmaac'], u_al_dsigma=grd['dsigmaal'],
)
def run_l2c(p, die_on_error=False):
timestart = datetime.datetime.now()
pattern = os.path.join(p.outdatadir, '{}_c*'.format(p.config))
list_file = glob.glob(pattern)
gpath = os.path.join(p.outdatadir, '{}_grid'.format(p.config))
mod_tools.initialize_parameters(p)
iterate = 0
# - Loop on SKIM grid files
jobs = []
p2 = mod_tools.todict(p)
for ifile in list_file:
jobs.append([ifile, p2, gpath])
ok = False
try:
ok = make_skim_l2c(p.proc_count, jobs, die_on_error, p.progress_bar)
except skimulator.mod_parallel.MultiprocessingError:
logger.error('An error occurred with the multiprocessing framework')
traceback.print_exception(*sys.exc_info())
sys.exit(1)
except skimulator.mod_parallel.DyingOnError:
logger.error('An error occurred and all errors are fatal')
sys.exit(1)
# - Write Selected parameters in a txt file
timestop = datetime.datetime.now()
timestop = timestop.strftime('%Y%m%dT%H%M%SZ')
timestart = timestart.strftime('%Y%m%dT%H%M%SZ')
rw.write_params(p, os.path.join(p.outdatadir,
'skim_l2c.output'))
if ok is True:
if p.progress_bar is True:
__ = mod_tools.update_progress(1, 'All passes have been processed',
'')
else:
__ = logger.info('All passes have been processed')
logger.info("\n Simulated skim files have been written in "
"{}".format(p.outdatadir))
logger.info(''.join(['-'] * 61))
sys.exit(0)
logger.error('\nERROR: At least one of the outputs was not saved.')
sys.exit(1)
def make_skim_l2c(_proc_count, jobs, die_on_error, progress_bar):
""" Compute SWOT-like data for all grids and all cycle, """
# - Set up parallelisation parameters
proc_count = min(len(jobs), _proc_count)
status_updater = mod_tools.update_progress_multiproc
jobs_manager = skimulator.mod_parallel.JobsManager(proc_count,
status_updater,
exc_formatter,
err_formatter)
ok = jobs_manager.submit_jobs(worker_method_l2c, jobs, die_on_error,
progress_bar)
if not ok:
# Display errors once the processing is done
jobs_manager.show_errors()
return ok
def exc_formatter(exc):
"""Format exception returned by sys.exc_info() as a string so that it can
be serialized by pickle and stored in the JobsManager."""
error_msg = traceback.format_exception(exc[0], exc[1], exc[2])
return error_msg
def err_formatter(pid, grid, cycle, exc):
"""Transform errors stored by the JobsManager into readable messages."""
msg = None
if cycle < 0:
msg = '/!\ Error occurred while processing grid {}'.format(grid)
else:
_msg = '/!\ Error occurred while processing cycle {} on grid {}'
msg = _msg.format(cycle, grid)
return msg
def worker_method_l2c(*args, **kwargs):
msg_queue, ifile, p2, gpath = args[:]
p = mod_tools.fromdict(p2)
passn = int(ifile[-6:-3])
if passn % 2 == 0:
desc = True
else:
desc = False
cycle = int(ifile[-10:-8])
fileg = '{}_p{:03d}.nc'.format(gpath, passn)
data = rw.Sat_SKIM(ifile=ifile)
grid = rw.Sat_SKIM(ifile=fileg)
list_var_l2c = ['ur_obs', 'ur_true', 'instr', 'wdre', 'wd', 'dsigma', 'ussr',
'ussr_est']
list_var_l2c = ['ur_obs', 'ur_true', 'instr', 'wd']
l2b_dic = data.load_data(p, list_var_l2c, ucur=[],
vcur=[], time=[], lon_nadir=[], lat_nadir=[],
lon=[], lat=[], time_nadir=[], vindice=[],
uwnd=[], vwnd=[])
#instr=[], uwd=[], uwd_est=[], dsigma=[], rain=[], ussr=[], ussr_est=[], uwnd=[], vwnd=[])
grid.load_swath(p, radial_angle=[], angle=[], x_al=[], x_al_nadir=[],
x_ac=[])
dic = {}
l2c_dic = {}
for key in list_var_l2c:
dic[key] = {}
l2c_dic[key] = {}
test = numpy.array(l2b_dic['ur_true'])
if len(numpy.shape(test))==2:
onebeam = False
nil, nbeams = numpy.shape(test)
else:
onebeam = True
nil = numpy.shape(test)[0]
nbeams = 1
sbeam_incid = numpy.zeros((nil, nbeams))
### TODO Change this
nwnd = numpy.sqrt(numpy.array(data.uwnd)**2 + numpy.array(data.vwnd)**2)
for key in list_var_l2c:
dic[key]['vobsr'] = numpy.squeeze(numpy.array(l2b_dic[key]).flatten())
dic[key] = flag_data(dic[key], p.rain_threshold)
if key == 'ur_true':
dic[key]['vmodr'] = numpy.squeeze(numpy.array(l2b_dic[key]).flatten())
dic[key]['nsamp'], dic[key]['nbeam'] = nil, nbeams
dic = select_valid_data(dic)
if len(dic['ur_obs']['vobsr'][:]) > 2 and len(data.lon_nadir) >2:
try:
for key in list_var_l2c:
dic[key] = make_obs(p, l2b_dic, grid, dic[key])
grd = make_grid(grid, dic[list_var_l2c[0]], p.posting, desc=desc)
grdnoerr = make_grid(grid, dic['ur_true'], p.posting, desc=desc)
## TODO proof error
if grd is None:
logger.info('no grid found')
return
if grdnoerr is None:
logger.info('no grid found')
return
# OI
for key in list_var_l2c:
if key == 'ur_true':
_grd = grdnoerr.copy()
else:
_grd = grd.copy()
l2c_dic[key]['al'], l2c_dic[key]['ac'] = perform_oi_1(_grd, dic[key], p.resol, desc=desc)
except:
logger.info(f'pass {passn}')
return
vindice = l2b_dic['vindice']
diff_indice = vindice[1:] - vindice[:-1]
ind = numpy.where(diff_indice != 0)[0]
first_lat = numpy.min(grd['lat'])
sign_uv = 1
for key in list_var_l2c:
grd[key] = l2c_dic[key]
#grd['vmodac'] = + grdnoerr['vobsac'][:]
#grd['vmodal'] = + grdnoerr['vobsal'][:]
if desc is True:
first_lat = numpy.max(grd['lat'])
sign_uv = -1
if ind.any():
vindice = [dic['ur_obs']['vindice'][0], dic['ur_obs']['vindice'][ind[0] + 1]]
ind_lat = [first_lat, dic['ur_obs']['lat'][ind[0] + 1]]
else:
vindice = [dic['ur_obs']['vindice'][0],]
ind_lat = [first_lat,]
vindice = numpy.array(vindice)
vmask = (vindice<0)
if numpy.any(vmask):
vindice[numpy.where(vmask)] = 0
model_data, model_step, list_file2 = read_model(p, vindice)
list_model_key = {'ucur':'u_model', 'vcur':'v_model', 'uwnd': 'uwnd',
'vwnd': 'vwnd','mssx':'mssu', 'mssy': 'mssc', 'uuss': 'uuss', 'vuss': 'vuss' }
if (p.rain is True) and (p.rain_file is None):
list_model_key['rain'] = 'rain'
else:
grd['rain'] = numpy.array([])
grd = interpolate_model(p, model_data, model_step, grd, ind_lat,
list_model_key, desc=desc)
ac_thresh = p.ac_threshold
grd, l2c_dic = alac2xy(grd, l2c_dic, ac_thresh, sign_uv)
grd, l2c_dic = mask_from_model(grd, l2c_dic, list_model_key)
pattern_out = '{}{}_l2c_c{:02d}_p{:03d}.nc'.format(p.config,
p.config_l2c, cycle,
passn)
outfile = os.path.join(p.outdatadir, pattern_out)
write_l2(outfile, grd, l2c_dic, cycle, passn, p.first_time)
msg_queue.put((os.getpid(), ifile, None, None))
def alac2xy(grd: dict, l2c_dic: dict, ac_thresh: float, sign_uv: float
)-> (dict, dict):
for key in l2c_dic.keys():
l2c_dic[key]['ac'][numpy.abs(grd['ac2']) < ac_thresh] = numpy.nan
l2c_dic[key]['x'] = sign_uv * (l2c_dic[key]['ac'] * numpy.cos(grd['angle'])
+ l2c_dic[key]['al'] * numpy.cos(grd['angle']+numpy.pi/2))
l2c_dic[key]['y'] = sign_uv * (l2c_dic[key]['ac'] * numpy.sin(grd['angle'])
+ l2c_dic[key]['al'] * numpy.sin(grd['angle']+numpy.pi/2))
grd['vtrueac'] = sign_uv *(grd['u_model']*numpy.cos(grd['angle'])
+ grd['v_model'] * numpy.sin(grd['angle']))
grd['vtrueal'] = sign_uv * (-grd['u_model']*numpy.sin(grd['angle'])
+ grd['v_model']*numpy.cos(grd['angle']))
return grd, l2c_dic
def mask_from_model(grd: dict, l2c_dic: dict, list_model_key: list
) -> (dict, dict):
_masku = numpy.ma.getmaskarray(grd['u_model'])
_maskv = numpy.ma.getmaskarray(grd['v_model'])
mask = ((grd['u_model'] == 0) | (grd['v_model'] == 0) | _masku | _maskv
| (abs(grd['u_model']) > 10) | (abs(grd['v_model']) > 10))
for skey in list_model_key.keys():
key = list_model_key[skey]
grd[key][mask] = numpy.nan
grd['vtrueac'][mask] = numpy.nan
grd['vtrueal'][mask] = numpy.nan
for key in l2c_dic.keys():
for key2 in l2c_dic[key].keys():
l2c_dic[key][key2][mask] = numpy.nan
return grd, l2c_dic
def select_valid_data(dic: dict) -> dict:
if 'wdre' in dic.keys() and 'ur_true' in dic.keys():
ind_mod = numpy.where((dic['ur_true']['vmodr'] > -100)
& (abs(dic['wdre']['vobsr']) < 100)
& (numpy.isfinite(dic['wdre']['vobsr'])))[0]
dic['ur_true']['vobsr'] = dic['noerr']['vobsr'][ind_mod]
dic['ur_true']['ind'] = ind_mod
elif 'ur_true' in dic.keys():
ind_mod = numpy.where((dic['ur_true']['vmodr'] > -100))[0]
dic['ur_true']['vobsr'] = dic['ur_true']['vobsr'][ind_mod]
dic['ur_true']['ind'] = ind_mod
else:
ind_mod = None
if 'ur_obs' in dic.keys():
ind_obs = numpy.where((abs(dic['ur_obs']['vobsr']) < 100)
& (numpy.isfinite(dic['ur_obs']['vobsr'])))[0]
dic['ur_obs']['vobsr'] = dic['ur_obs']['vobsr'][ind_mod]
dic['ur_obs']['ind'] = ind_mod
else:
ind_mod = None
if 'wdre' in dic.keys():
indwd = numpy.where((abs(dic['wdre']['vobsr']) < 100)
& (numpy.isfinite(dic['wdre']['vobsr'])))[0]
dic['wdre']['vobsr'] = dic['wdre']['vobsr'][indwd]
dic['wd']['vobsr'] = dic['wd']['vobsr'][indwd]
dic['wdre']['ind'] = indwd
elif 'wd'in dic.keys():
dic['wd']['vobsr'] = dic['wd']['vobsr'][ind_mod]
dic['wd']['ind'] = ind_mod
for key in dic.keys():
if key != 'ur_true' and key != 'ur_obs' and key != 'wdre' and key != 'wd':
dic[key]['vobsr'] = dic[key]['vobsr'][ind_mod]
dic[key]['ind'] = ind_mod
return dic
def flag_data(dic: dict, rain_threshold: float) -> dict:
if 'rain' in dic.keys():
dic['obs']['vobsr'][dic['obs']['rain'] > rain_threshold] = numpy.nan
#obs['vobsr'][obs['rain']>p.rain_threshold] = numpy.nan
if 'dsigma' in dic.keys():
dic['dsigma'][dic['dsigma']>0.5] = numpy.nan
if 'wdre' in dic.keys():
dic['wdre']['vobsr'][abs(dic['wdre']['vobsr'])>1] = numpy.nan
dic['wdre']['vobsr'][dic['nwnd'].flatten() < 4] = numpy.nan
return dic
| 26,773 | 41.633758 | 109 | py |
skimulator | skimulator-master/skimulator/mod_run.py | '''Module to create one beam data:
\n
\n
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
'''
import os
from scipy import interpolate
import numpy
import math
import glob
import sys
import time
import pickle
import datetime
import logging
from . import build_swath
from . import rw_data
from . import mod_tools
from . import const
from .error import utils
from typing import Tuple, Dict
import multiprocessing
# Define logger level for debug purposes
logger = logging.getLogger(__name__)
#logger = multiprocessing.log_to_stderr()
#logger.setLevel(logging.DEBUG)
def load_sgrid(sgridfile, p):
'''Load SKIM swath and Nadir data for file sgridfile '''
# Load SKIM swath file
sgrid = rw_data.Sat_SKIM(ifile=sgridfile)
cycle = 0
x_al = []
x_ac = []
al_cycle = 0
timeshift = 0
sgrid.load_swath(p, cycle=cycle, x_al=x_al, x_al_nadir=[], x_ac=x_ac,
al_cycle=al_cycle, timeshift=timeshift)
# sgrid.loncirc = []
# for i in range(len(sgrid.lon)):
# sgrid.loncirc.append(numpy.rad2deg(numpy.unwrap(sgrid.lon[i])))
# Extract the pass number from the file name
ipass = int(sgridfile[-6: -3])
sgrid.ipass = ipass
return sgrid
def load_coordinate_model(p):
model = p.model
if p.file_input is not None:
list_file = [line.strip() for line in open(p.file_input)]
else:
list_file = None
# - Read model input coordinates '''
# If a list of model files are specified, read model file coordinates
if p.file_input is not None:
model_data_ctor = getattr(rw_data, model)
if p.file_grid_model is not None:
_filename = list(p.file_grid_model)
else:
logger.info("WARNING: First file of list of files is used for"
"coordinates only")
_filename = list_file[0].split(',')
filename = []
for ifile in _filename:
filename.append(os.path.join(p.indatadir, ifile))
#filename = os.path.join(p.indatadir, list_file[0])
model_data = model_data_ctor(p, ifile=filename,
lon=list(p.lon), lat=list(p.lat))
return model_data, list_file
def interpolate_regular_1D(lon_in, lat_in, var, lon_out, lat_out, Teval=None):
''' Interpolation of data when grid is regular and coordinate in 1D. '''
# To correct for IDL issues
ind_sort = numpy.argsort(lon_in)
lon_in = lon_in[ind_sort]
var = var[:, ind_sort]
if numpy.max(lon_in) > 359 and numpy.min(lon_in) < 1:
ind_in1 = numpy.where(lon_in <= 180)
ind_in2 = numpy.where(lon_in > 180)
# lon_in[lon_in > 180] = lon_in[lon_in > 180] - 360
# lon_in = np.mod(lon_in - (lref - 180), 360) + (lref - 180)
# lon_in = numpy.rad2deg(numpy.unwrap(numpy.deg2rad(lon_in)))
ind_out1 = numpy.where(lon_out <= 180)
ind_out2 = numpy.where(lon_out > 180)
# lon_out[lon_out > 180] = lon_out[lon_out > 180] - 360
# lon_out = numpy.rad2deg(numpy.unwrap(numpy.deg2rad(lon_out)))
interp = interpolate.RectBivariateSpline
mask_teval = (numpy.isnan(var) | numpy.ma.getmaskarray(var))
if Teval is None:
Teval = numpy.zeros(numpy.shape(lon_out))
if ind_out1[0].any():
_tmp = interp(lat_in, lon_in[ind_in1],
mask_teval[:, ind_in1[0]], kx=1, ky=1,
s=0)
Teval[ind_out1] = _tmp.ev(lat_out[ind_out1], lon_out[ind_out1])
if ind_out2[0].any():
_tmp = interp(lat_in, lon_in[ind_in2],
mask_teval[:, ind_in2[0]], kx=1, ky=1,
s=0)
Teval[ind_out2] = _tmp.ev(lat_out[ind_out2], lon_out[ind_out2])
# Trick to avoid nan in interpolation
var_mask = + var
var_mask[numpy.isnan(var_mask) | numpy.ma.getmaskarray(var_mask)] = 0.
# Interpolate variable
var_out = numpy.full(numpy.shape(lon_out), numpy.nan)
if ind_out1[0].any():
_tmp = interp(lat_in, lon_in[ind_in1], var_mask[:, ind_in1[0]],
kx=1, ky=1, s=0)
var_out[ind_out1] = _tmp.ev(lat_out[ind_out1], lon_out[ind_out1])
if ind_out2[0].any():
_tmp = interp(lat_in, lon_in[ind_in2], var_mask[:, ind_in2[0]],
kx=1, ky=1, s=0)
var_out[ind_out2] = _tmp.ev(lat_out[ind_out2], lon_out[ind_out2])
else:
mask_teval = (numpy.isnan(var) | numpy.ma.getmaskarray(var))
# Interpolate mask if it has not been done (Teval is None)
interp = interpolate.RectBivariateSpline
if Teval is None:
_Teval = interp(lat_in, lon_in, mask_teval, kx=1, ky=1, s=0)
Teval = _Teval.ev(lat_out, lon_out)
# Trick to avoid nan in interpolation
var_mask = + var
var_mask[numpy.isnan(var_mask) | numpy.ma.getmaskarray(var_mask)] = 0.
# Interpolate variable
_var_out = interp(lat_in, lon_in, var_mask, kx=1, ky=1, s=0)
var_out = _var_out.ev(lat_out, lon_out)
# Mask variable with Teval
var_out[Teval > 0] = numpy.nan
#var_out[Teval > 0] = numpy.nan
#var_out[abs(var_out) > 1000] = numpy.nan
return var_out, Teval
def interpolate_irregular_pyresample(swath_in, var, grid_out, radius,
interp_type='nearest'):
''' Interpolation of data when grid is irregular and pyresample is
installed.'''
import pyresample as pr
interp = pr.kd_tree.resample_nearest
if interp_type == 'nearest':
interp = pr.kd_tree.resample_nearest
radius_n = radius * 10**3
var_out = interp(swath_in, var, grid_out, radius_of_influence=radius_n,
epsilon=100)
else:
interp = pr.kd_tree.resample_gauss
radius_g = radius * 3 * 10**3
sigma_g = radius * 10**3
var_out = interp(swath_in, var, grid_out, radius_of_influence=radius_g,
sigmas=sigma_g, fill_value=None)
return var_out
def interp_on_swath(cycle, list_file, modelbox,
sgrid, model_data, modeltime,
p):
'''Create SKIM and nadir errors err and errnad, interpolate model velocity\
model_data on swath and nadir track,
compute SKIM-like and nadir-like data for cycle, SKIM grid sgrid and
ngrid. '''
# Initialiaze errors and velocity
output_var_i = {}
shape_i = numpy.shape(sgrid.lon)[0]
for key in p.list_input_var.keys():
output_var_i[key] = numpy.full(shape_i, numpy.nan)
output_var_i['vindice'] = numpy.full(shape_i, numpy.nan)
date1 = cycle * sgrid.cycle
# Definition of the time in the model
time = sgrid.time / 86400. + date1 # convert seconds in days
lon = sgrid.lon
lat = sgrid.lat
timeshift = sgrid.timeshift / 86400. # in days
# Look for satellite data that are beween step-p.timestep/2 and
# step+p.timestep/2
if p.file_input is not None:
lon2D = {}
lat2D = {}
# meshgrid in 2D for interpolation purposes
for key in model_data.vlon.keys():
if p.grid == 'irregular':
lon2D[key], lat2D[key] = numpy.meshgrid(model_data.vlon[key],
model_data.vlat[key])
index_filemodel = numpy.where(((time[-1] - timeshift) >=
(modeltime-p.timestep/2.))
& ((time[0] - timeshift) <
(modeltime+p.timestep/2.)))
# local variable to find time record in file for WW3
nfile = 0
time_offset = 0
# At each step, look for the corresponding time in the satellite data
for ifile in index_filemodel[0]:
# If there are satellite data, Get true velcoity from model
if numpy.shape(index_filemodel)[1] > 0:
# Select part of the track that corresponds to the time of the
# model (+-timestep/2)
ind_time = numpy.where(((time-timeshift) >=
(modeltime[ifile]-p.timestep/2.))
& ((time-timeshift) <
(modeltime[ifile]+p.timestep/2.)))
else:
logger.error('No model file is found around time')
sys.exit(1)
# Load data from this model file
# if output from ww3, time dimension is >1 (hourly outputs,
# one file per month): conversion of ifile into file number
# and record number
## TODO : Clean
filetime = (ifile - time_offset)%p.dim_time
# read next file when the time dimension is reached
if filetime >= (time_offset + p.dim_time):
time_offset += p.dim_time
nfile += 1
filetime = (ifile - time_offset)%p.dim_time
nfile = int(ifile /p.dim_time)
filetime = ifile - nfile * p.dim_time
_tmpfilename = list_file[nfile]
filename = os.path.join(p.indatadir, _tmpfilename)
model_step_ctor = getattr(rw_data, model_data.model)
model_step = model_step_ctor(p, ifile=(filename, ),
list_input_var=p.list_input_var,
time=filetime)
input_var_i = {}
if p.grid == 'regular':
model_step.read_var(p, ind_lon=model_data.ind_lon)
for key in model_step.input_var.keys():
grid_key = model_step.numgrid[key]
_indlat = model_data.model_index_lat[grid_key]
_tmp = model_step.input_var[key][_indlat, :]
_indlon = model_data.model_index_lon[grid_key]
input_var_i[key] = +_tmp[:, _indlon]
else:
model_step.read_var(p, index=None)
for key in model_step.input_var.keys():
_ind = model_data.model_index[model_step.numgrid[key]]
input_var_i[key] = + model_step.input_var[key][_ind]
# - Interpolate Model data on a SKIM grid and/or along the
# nadir track
# if grid is regular, use interpolate.RectBivariateSpline to
# interpolate
if p.grid == 'regular' and \
len(numpy.shape(model_data.vlon[0])) == 1:
# Flatten satellite grid and select part of the track
# corresponding to the model time
for key in model_step.input_var.keys():
mlon = model_data.vlon[model_step.numgrid[key]]
mlat = model_data.vlat[model_step.numgrid[key]]
_tmp, Teval_u = interpolate_regular_1D(mlon, mlat,
input_var_i[key],
lon[ind_time[0]],
lat[ind_time[0]])
output_var_i[key][ind_time[0]] = + _tmp
else:
# Grid is irregular, interpolation can be done using
# pyresample module if it is installed or griddata
# function from scipy.
# Note that griddata is slower than pyresample functions.
try:
import pyresample as pr
lon_wrap = pr.utils.wrap_longitudes
geom = pr.geometry.SwathDefinition
lon = lon_wrap(lon)
grid_def = geom(lons=lon, lats=lat)
for key in model_step.input_var.keys():
grid_key = model_step.numgrid[key]
_ind = model_data.model_index[grid_key]
_mlon = lon_wrap(model_data.vlon[grid_key])
_mlat = model_data.vlat[grid_key]
if len(_mlon[0]) <= 1:
_mlon = lon_wrap(lon2D[grid_key])
_mlat = lat2D[grid_key]
swath_def = geom(lons=_mlon[_ind], lats=_mlat[_ind])
_tmp = interpolate_irregular_pyresample(
swath_def, input_var_i[key],
grid_def,
p.posting,
interp_type=p.interpolation)
output_var_i[key][ind_time[0]] = + _tmp
except ImportError:
for key in model_step.input_var.keys():
grid_key = model_step.numgrid[key]
_ind = model_data.model_index[grid_key]
_mlon = model_data.vlon[grid_key]
_mlat = model_data.vlat[grid_key]
if len(_mlon) <= 1:
_mlon = lon2D[grid_key]
_mlat = lat2D[grid_key]
lonravel = + _mlon[_ind].ravel()
latravel = + _mlat[_ind].ravel()
_tmp = + input_var_i[key].ravel()
interp = interpolate.griddata((lonravel, latravel),
_tmp, (lon[ind_time[0]],
lat[ind_time[0]]),
method=p.interpolation)
output_var_i[key][ind_time[0]] = + interp
# Force value outside modelbox at nan
if modelbox[0] > modelbox[1]:
for key in model_step.input_var.keys():
_ind = numpy.where(((lon > modelbox[0])
& (lon < modelbox[1]))
| (lat < modelbox[2])
| (lat > modelbox[3]))
output_var_i[key][_ind] = numpy.nan
else:
for key in model_step.input_var.keys():
_ind = numpy.where((lon < modelbox[0])
| (lon > modelbox[1])
| (lat < modelbox[2])
| (lat > modelbox[3]))
output_var_i[key][_ind] = numpy.nan
output_var_i['vindice'][ind_time[0]] = ifile
else:
pass
# TODO to proof: creation of empty mss if no mss and p.instr is True
return output_var_i, time
def compute_geov(input_dic:dict, ac_angle: numpy.array,
radial_angle, p, fc: int=1)-> Dict[str, numpy.ndarray]:
_dic = {}
# list compulsory " ucur, vcur + ice + require compute_sigma_water
sigma0 = compute_sigma(input_dic, p.list_angle, ac_angle, p, fc=fc)
_dic['sigma0'] = sigma0
_dic['ur_true'] = utils.proj_radial(input_dic['ucur'][:, fc:],
input_dic['vcur'][:, fc:],
radial_angle)
_dic['ssh_true'] = input_dic['ssh']
_dic['radial_angle'] = radial_angle
return _dic
def compute_sigma_water(input_var, beam_angle, radial_angle, fc: int=1):
required = ('mssx', 'mssy', 'mssd', 'uwnd', 'vwnd', 'uwnd', 'ucur',
'vcur')
missing = [_ for _ in required if _ not in input_var.keys()]
if 0 < len(missing):
logger.info('Missing file to compute sigma, instrumental error not'
' computed')
logger.info('Missing parameters: {}'.format(', '.join(missing)))
return None
else:
mssd = numpy.deg2rad(input_var['mssd'])
mssu = + input_var['mssx']
mssc = + input_var['mssy']
uwnd = + input_var['uwnd']
vwnd = + input_var['vwnd']
ucur = + input_var['ucur']
vcur = + input_var['vcur']
mssxl = mssu * numpy.cos(mssd)**2 + mssc * numpy.sin(mssd)**2
mssyl = mssu * numpy.sin(mssd)**2 + mssc * numpy.cos(mssd)**2
mssxyl = (mssu - mssc) * numpy.sin(2 * mssd) / 2
nwr = numpy.sqrt((uwnd - ucur)**2 + (vwnd - vcur)**2)
mask = (nwr == 0)
nwr[mask] = numpy.nan
uwnd[mask] = numpy.nan
vwnd[mask] = numpy.nan
ucur[mask] = numpy.nan
vcur[mask] = numpy.nan
wrd = numpy.pi / 2 - numpy.arctan2(vwnd - vcur, uwnd - ucur)
mssshort = numpy.log(nwr + 0.7) * 0.009
# Replace nan values by 0 to avoid a runtime warning (nan values
# will be restored afterwards)
mssshort_nanmask_ind = numpy.where(numpy.isnan(mssshort))
mssshort[mssshort_nanmask_ind] = 0
mssshort[mssshort < 0] = 0
mssshort[mssshort_nanmask_ind] = numpy.nan # restore nan values
# Directionality for short wave mss (if 0.5: isotrophic)
facssdw = 0.6
mssds = facssdw * mssshort
msscs = mssshort - mssds
mssxs = msscs * numpy.sin(wrd)**2 + mssds * numpy.cos(wrd)**2
mssys = mssds * numpy.sin(wrd)**2 + msscs * numpy.cos(wrd)**2
mssxys = abs(mssds - msscs) * numpy.sin(2* wrd) / 2
input_var['mssx'] = mssxs + mssxl
input_var['mssy'] = mssys + mssyl
input_var['mssxy'] = mssxys + mssxyl
input_var['mssc'] = mssc
input_var['mssu'] = mssu
R2 = 0.55
nsample = numpy.shape(radial_angle)[0]
rbeam_angle = [numpy.deg2rad(beam_angle), ]*nsample
mssx = (mssxs + mssxl)[:, fc:]
mssy = (mssys + mssyl)[:, fc:]
mssxy = (mssxys + mssxyl)[:, fc:]
mask = ((mssx == 0) | (mssy == 0))
mssx[mask] = numpy.nan
mssy[mask] = numpy.nan
expo = (-0.5 * numpy.tan(rbeam_angle)**2 * (numpy.cos(radial_angle)**2
*mssy + numpy.sin(radial_angle)**2 * mssx
- numpy.sin(2 * radial_angle) * mssxy) / (mssx * mssy))
coeff = R2 / (2 * numpy.cos(rbeam_angle)**4 * numpy.sqrt(mssx * mssy))
sigma_water = coeff * numpy.exp(expo)
# TODO sigma_water[:, 0] = numpy.nan
#del input_var['mssd']
return sigma_water
def compute_sigma(output_var_i, beam_angle, radial_angle, p, fc: int=1):
sigma_water = compute_sigma_water(output_var_i, beam_angle, radial_angle,
fc=fc)
if (p.ice is True) and ('ice' in output_var_i.keys()):
vice = output_var_i['ice'][:, fc:]
mat_beam = numpy.array([beam_angle,]*numpy.shape(vice)[0])
sigma_ice = numpy.ones(numpy.shape(vice))
sigma_ice[mat_beam == 6] = 2.5
# TODO proof when angle is different from 6 and 12
#if beam_angle == 6:
# sigma_ice = 2.5
#elif beam_angle == 12:
# sigma_ice = 1
#else:
# logger.info('beam angle is {} but should be either 6 or 12, '
# 'sigma_ice is set to 1'.format(beam_angle))
mask = (numpy.isnan(vice))
c_ice = numpy.ma.MaskedArray(vice, mask=mask)
c_ice[c_ice.mask] = 0
sigma0 = (1 - c_ice) * sigma_water + c_ice * sigma_ice
else:
sigma0 = sigma_water
return sigma0
def save_SKIM(cycle, sgrid, time, outdata, p):
file_output = '{}_c{:02d}_p{:03d}.nc'.format(p.file_output, cycle + 1,
sgrid.ipass)
OutputSKIM = rw_data.Sat_SKIM(ifile=file_output, lon=sgrid.lon,
lat=sgrid.lat, time=time,
x_al=sgrid.x_al, cycle=sgrid.cycle)
OutputSKIM.gridfile = sgrid.gridfile
OutputSKIM.ipass = sgrid.ipass
OutputSKIM.ncycle = sgrid.ncycle
OutputSKIM.write_data(p, outdata) #ur_model=ur_model, index=vindice,
# uss_err=err_uss, ur_uss=ur_uss, std_uss=std_uss,
# nadir_err=[err.nadir, ], ur_obs=ur_obs,
# instr=err_instr, u_model=u_model, v_model=v_model,
# errdcos=errdcos)
return None
| 20,831 | 43.8 | 79 | py |
skimulator | skimulator-master/skimulator/grid_check.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import zlib
import base64
import struct
import hashlib
import logging
logger = logging.getLogger(__name__)
def _text_file_md5(file_path, block_size=1024*1024):
""""""
if not os.path.exists(file_path):
raise FileNotFoundError(file_path)
md5 = hashlib.md5()
with open(file_path, 'r', encoding='utf-8') as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data.encode('utf-8'))
file_hash = md5.hexdigest()
return file_hash
def get_hash_from_grid_params(p):
""""""
orbit_path = p.filesat # Path to a text file containing the elevation, the cycle and the track of the satellite
orbit_cols = p.order_orbit_col # None|int[3] (order of columns in orbit file)
modelbox = p.modelbox # None|float[4] (lonmin, lonmax, latmin, latmax)
rotation_speed = p.rotation_speed # float (rotation speed of the antenna in tr/min)
cycle = p.cycle # float (beam cycle duration)
list_pos = p.list_pos # float[N] (position of each beam)
list_angle = p.list_angle # float[N] (inclination of each beam)
list_shift = p.list_shift # int[N] (time shift wrt nadir for each beam)
shift_lon = p.shift_lon # None|float (longitude shift in degrees)
shift_time = p.shift_time # None|float (time shift in days)
orbit_file_hash = _text_file_md5(orbit_path)
orbit_md5 = orbit_file_hash.encode('utf-8')
logger.debug('Orbit file MD5: {}'.format(orbit_file_hash))
# Flags for optional parameters
has_cols = orbit_cols is not None
has_modelbox = modelbox is not None
has_shift_lon = shift_lon is not None
has_shift_time = shift_time is not None
fmt = '!B32sffI{}{}{}'.format('f' * len(list_pos),
'f' * len(list_angle),
'i' * len(list_shift))
logger.debug('Mandatory params serialization format: {}'.format(fmt))
flags = 0b00000000
opts = []
if has_cols:
opts.append(struct.pack('!BBB', orbit_cols))
else:
flags = flags + 0b00000001
if has_modelbox:
opts.append(struct.pack('!ffff', modelbox))
else:
flags = flags + 0b00000010
if has_shift_lon:
opts.append(struct.pack('!f', shift_lon))
else:
flags = flags + 0b00000100
if has_shift_time:
opts.append(struct.pack('!f', shift_time))
else:
flags = flags + 0b00001000
h = struct.pack(fmt, flags, orbit_md5, rotation_speed, cycle,
len(list_pos), *list_pos, *list_angle, *list_shift)
for d in opts:
h = h + d
return h
def get_grid_params_from_hash(h):
""""""
flags, orbit_md5, rot, cycle, nbeams = struct.unpack('!B32sffI', h[:45])
has_cols = 0 >= (flags & 0b00000001)
has_modelbox = 0 >= (flags & 0b00000010)
has_shift_lon = 0 >= (flags & 0b00000100)
has_shift_time = 0 >= (flags & 0b00001000)
istart = 45
iend = 45 + 4 * nbeams # assuming 1 float <=> 4 bytes
list_pos = struct.unpack('!{}f'.format(nbeams), h[istart:iend])
istart = iend
iend = istart + 4 * nbeams # assuming 1 float <=> 4 bytes
list_angle = struct.unpack('!{}f'.format(nbeams), h[istart:iend])
istart = iend
iend = istart + 4 * nbeams # assuming 1 int <=> 4 bytes
list_shift = struct.unpack('!{}i'.format(nbeams), h[istart:iend])
istart = iend
if has_cols:
iend = istart + 3
orbit_cols = struct.unpack('!BBB', h[istart:iend])
istart = iend
else:
orbit_cols = None
if has_modelbox:
iend = istart + 4 * 4 # assuming 1 float <=> 4 bytes
modelbox = struct.unpack('!ffff', h[istart:iend])
istart = iend
else:
modelbox = None
if has_shift_lon:
iend = istart + 4 # assuming 1 float <=> 4 bytes
shift_lon = struct.unpack('!f', h[istart:iend])[0]
istart = iend
else:
shift_lon = None
if has_shift_time:
iend = istart + 4 # assuming 1 float <=> 4 bytes
shift_time = struct.unpack('!f', h[istart:iend])[0]
istart = iend
else:
shift_time = None
result = {'order_orbit_col': orbit_cols,
'rotation_speed': rot,
'cycle': cycle,
'list_pos': list_pos,
'list_angle': list_angle,
'list_shift': list_shift,
'modelbox': modelbox,
'shift_lon': shift_lon,
'shift_time': shift_time,
'orbit_file_md5': orbit_md5}
logger.debug('')
logger.debug('=== Reconstructed parameters ===')
logger.debug('orbit file MD5: {}'.format(orbit_md5.decode('utf-8')))
logger.debug('rotation_speed: {}'.format(rot))
logger.debug('cycle: {}'.format(cycle))
logger.debug('nbeams: {}'.format(nbeams))
logger.debug('list_pos: {}'.format(list_pos))
logger.debug('list_angle: {}'.format(list_angle))
logger.debug('list_shift: {}'.format(list_shift))
logger.debug('order_orbit_col: {}'.format(orbit_cols))
logger.debug('modelbox: {}'.format(modelbox))
logger.debug('shift_lon: {}'.format(shift_lon))
logger.debug('shift_time: {}'.format(shift_time))
return result
def get_b64_gzipped_hash(p):
""""""
h = get_hash_from_grid_params(p)
compressed = zlib.compress(h, 9)
b64_bytes = base64.b64encode(compressed)
b64_str = b64_bytes.decode('utf-8')
return b64_str
def revert_b64_gzipped_hash(b64_str):
""""""
b64_bytes = b64_str.encode('utf-8')
compressed = base64.b64decode(b64_bytes)
h = zlib.decompress(compressed)
p = get_grid_params_from_hash(h)
return p
if '__main__' == __name__:
import sys
import logging
import params_8beams_fram as p
main_logger = logging.getLogger()
main_logger.handlers = []
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
main_logger.addHandler(handler)
main_logger.setLevel(logging.DEBUG)
try:
h = compute_grid_hash(p)
except FileNotFoundError:
_, e, _ = sys.exc_info()
logger.error('Orbit file "{}" not found'.format(e.args[0]))
# Compress information as much as possible...
print(base64.b64encode(zlib.compress(h, 9)).decode('utf-8'))
# Maybe use the CRC32 in the filename and include the hash in the NetCDF
# attributes
print(zlib.crc32(h))
read_grid_params_from_hash(h)
| 7,140 | 30.879464 | 116 | py |
skimulator | skimulator-master/skimulator/__init__.py | # =======================================================================
# General Documentation
"""Utilities for SKIM Science Simulator for the ocean
Some useful online help commands for the package:
* help(skimulator): Help for the package. A list of all modules in
this package is found in the "Package Contents" section of the
help output.
* help(skimulator.M): Details of each module "M", where "M" is the
module's name.
#-----------------------------------------------------------------------
# Additional Documentation
# Authors: Lucile Gaultier
#
# Modification History:
# - Jan 2021: V4.0
# - Jul 2019: V3.0
# - Oct 2018: V2.0
# - Dec 2017: V1.0
# - Mar 2017: Original by Lucile Gaultier, ODL
#
# Notes:
# - Written for Python 3.9, tested with Python 3.7, 3.9
#
#-----------------------------------------------------------------------
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
# -----------------------------------------------------------------------
# ---------------- Module General Import and Declarations ---------------
# - Set module version to package version:
__version__ = '4.0'
__author__ = 'Lucile Gaultier <lucile.gaultier@oceandatalab.com>'
__date__ = '2021-01-01'
__email__ = 'lucile.gaultier@oceandatalab.com'
__url__ = ''
__description__ = ('SKIM Simulator')
__author_email__ = ('lucile.gaultier@oceandatalab.com')
__keywords__ = ()
import os
# Try to improve accuracy of the version number by reading the build number in
# the share/VERSION.txt file if it is available
try:
import pkg_resources
_version_path = pkg_resources.resource_filename('skimulator',
'share/VERSION.txt')
if os.path.exists(_version_path):
with open(_version_path, 'rt') as f:
_commits = f.readline().strip()
__version__ = '{}.{}'.format(__version__, _commits)
_ = f.readline() # Commit hash, useful for debug
__date__ = f.readline().strip()
except ImportError:
pass
except KeyError:
# It seems that some versions of setuptools/pkg_resources try to read the
# file once its path has been rebuilt, which causes an error when the file
# does not exist.
pass
# - If you're importing this module in testing mode, or you're running
# pydoc on this module via the command line, import user-specific
# settings to make sure any non-standard libraries are found:
import sys
if (__name__ == "__main__") or \
("pydoc" in os.path.basename(sys.argv[0])):
import user
# - Find python version number
__python_version__ = sys.version[:3]
# - Import numerical array formats
try:
import numpy
except ImportError:
print(''' Numpy is not available on this platform,
''')
# - Import scientific librairies
try:
import scipy
except ImportError:
print("""Scipy is not available on this platform,
""")
# - Import netcdf reading librairies
try:
import netCDF4
except ImportError:
print(''' netCDF4 is not available on this machine,
''')
# reading and writing netcdf functions in rw_data.py won't work'''
| 3,814 | 31.887931 | 78 | py |
skimulator | skimulator-master/skimulator/spline.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
#/********************************************************
#* bplsine_init_uniform *
#* *
#* initialize an allocated bspline struct with *
#* uniform knots between a and b *
#* *
#* - bspline : a pointer to an allocated bspline *
#* - a, b : interval of the bspline *
#* */
class spline:
def __init__(self, k, gp2):
self.k = k
self.g = gp2 - 2 # number of interior knots
self.Nknots = self.g + 2 + 2 * self.k # total number of knots
self.knots = numpy.zeros([self.Nknots])
self.Nvals = self.g + self.k + 1
self.Ncoefs = self.Nvals
self.coefs = numpy.zeros([self.Ncoefs])
self.dp = numpy.zeros([self.k + 1])
self.dm = numpy.zeros([self.k + 1])
def init_uniform(self, a, b):
self.a = a
self.b = b
k = self.k
g = self.g
Nknots = self.Nknots
for i in range(k):
self.knots[i] = a
for i in numpy.arange(k,k + g+1,1):
self.knots[i] = a + (i - k) / (g + 1.) * (b - a)
for i in numpy.arange(k + g + 1,Nknots,1):
self.knots[i] = b
def find_interval_uniform(self,x):
k = self.k
g = self.g
a = self.a
b = self.b
j = numpy.floor(((x - a) / (b - a) * (g + 1.)) + k).astype('int')
if (x < a).any():
j[numpy.where(x < a)] = -1
if (x > b).any():
j[numpy.where(x > b)] = -1
if (j > g + k).any():
j[numpy.where(j > g + k)] = g+k
return(j)
def bspline_value (self, x):
k = self.k
g = self.g
a = self.a
b = self.b
dp = self.dp
dm = self.dm
n=len(x)
# Find interval
i=self.find_interval_uniform (x)
if (i < 0).any():
return([0, 0, 0, 0], -1, -2)
# interval of vals affected
istart = i - k
iend = i
vals = numpy.zeros([k + 1, n])
dp = numpy.zeros([self.k + 1, n])
dm = numpy.zeros([self.k + 1, n])
vals[0, :] = 1.
for s in numpy.arange(1, k + 1, 1):
dp[s - 1,:] = self.knots[i + s] - x
dm[s - 1,:] = x - self.knots[i + 1 - s]
prev = numpy.zeros([n])
old = vals[0 ,:]
for r in range(1, s + 1, 1):
M = old[:] / (dp[r - 1, :] + dm[s + 1 - r - 1, :])
old = 1*vals[r , :]
vals[r - 1 , :] = prev[:] + dp[r - 1, :] * M[:]
vals[r , :] = dm[s + 1 - r - 1, :] * M[:]
prev = 1*vals[r , :]
return (vals,istart,iend)
def init_val(self,yy):
self.coefs=yy
def get_val(self,xx):
vals,istart,iend=self.bspline_value(xx)
sy=0*xx
# print(istart.min(),istart.max())
# print(iend.min(),iend.max())
for i in range(4):
# print((istart[:] + i).max())
sy+=vals[i, :]*self.coefs[istart[:]+i]
return(sy)
class spline_circular:
def __init__(self,k,gp2):
self.k = k
self.g = gp2 - 1 # number of interior knots
self.Nknots = self.g + 2 + 2 * (self.k+1) # total number of knots
self.knots = numpy.zeros([self.Nknots])
self.Nvals = self.g + self.k + 1
self.Ncoefs = self.Nvals
self.coefs = numpy.zeros([self.Ncoefs])
self.dp = numpy.zeros([self.k + 1])
self.dm = numpy.zeros([self.k + 1])
def init_uniform(self,a,b):
self.a = a
self.b = b
k = self.k
g = self.g
Nknots = self.Nknots
self.knots = (numpy.arange(Nknots)-k)*(b-a)/(g+1)+a
def find_interval_uniform(self,x):
k = self.k
g = self.g
a = self.a
b = self.b
j = numpy.floor(((x - a) / (b - a) * (g + 1.)) + k).astype('int')
if (x < a).any():
j[numpy.where(x < a)] = -1
if (x > b).any():
j[numpy.where(x > b)] = -1
if (j > g + k).any():
j[numpy.where(j > g + k)] = g+k
return(j)
def bspline_value (self, x):
k = self.k
g = self.g
a = self.a
b = self.b
dp = self.dp
dm = self.dm
n=len(x)
# Find interval
i=self.find_interval_uniform (x)
if (i < 0).any():
return([0,0,0,0],-1,-2)
# interval of vals affected
istart = i - k
iend = i
vals = numpy.zeros([k+1,n])
dp = numpy.zeros([self.k + 1,n])
dm = numpy.zeros([self.k + 1,n])
vals[0,:] = 1.
for s in numpy.arange(1,k+1,1):
dp[s - 1,:] = self.knots[i + s] - x
dm[s - 1,:] = x - self.knots[i + 1 - s]
prev = numpy.zeros([n])
old = vals[0 ,:]
for r in range(1,s+1,1):
M = old[:] / (dp[r - 1,:] + dm[s + 1 - r - 1,:])
old = 1*vals[r ,:]
vals[r - 1 ,:] = prev[:] + dp[r - 1,:] * M[:]
vals[r ,:] = dm[s + 1 - r - 1,:] * M[:]
prev = 1*vals[r ,:]
return (vals,istart%(g+1),iend)
def init_val(self,yy):
self.coefs=yy
def get_val(self,xx):
vals,istart,iend=self.bspline_value(xx)
sy=0*xx
for i in range(4):
sy+=vals[i,:]*self.coefs[istart[:]+i]
return(sy)
| 6,038 | 25.603524 | 74 | py |
skimulator | skimulator-master/skimulator/rw_data.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
'''
Module to read and write data
Contains functions to read variables and coordinates from a netcdf files. \n
Contains model classes: \n
-ROMS \n
-NEMO \n
-NETCDF_MODEL \n
-WW3 \n
Contains satellite class: Sat_SKIM \n
Contains file instrumentation class: file_instr \n
'''
import skimulator
import skimulator.grid_check
from netCDF4 import Dataset
import numpy
import sys
import time as ti
import logging
import datetime
import os
version = skimulator.__version__
logger = logging.getLogger(__name__)
class IncompatibleGridError(Exception):
"""Raised"""
def __init__(self, path, grid_hash, params_hash, *args, **kwargs):
""""""
self.path = path
self.grid = 0 #skimulator.grid_check.revert_b64_gzipped_hash(grid_hash)
self.p = 0
#self.p = skimulator.grid_check.revert_b64_gzipped_hash(params_hash)
def write_params(params, pfile):
""" Write parameters that have been selected to run swot_simulator. """
with open(pfile, 'w') as f:
for key in dir(params):
if not key[0:2] == '__':
f.write('{} = {}\n'.format(key, params.__dict__[key]))
def read_coordinates(nfile, nlon, nlat, twoD=True):
''' General routine to read coordinates in a netcdf file. \n
Inputs are file name, longitude name, latitude name. \n
Outputs are longitudes and latitudes (2D arrays).'''
# - Open Netcdf file
try:
fid = Dataset(nfile, 'r')
except IOError:
logger.error('There was an error opening the file {}'.format(nfile))
sys.exit(1)
# - Read 1d or 2d coordinates
try:
vartmp = fid.variables[nlat]
except:
logger.error('Coordinates {} not found in file {}'.format(nlat, nfile))
sys.exit(1)
try:
vartmp = fid.variables[nlon]
except:
logger.error('Coordinates {} not found in file {}'.format(nlon, nfile))
sys.exit(1)
if len(vartmp.shape) == 1:
lon_tmp = numpy.array(fid.variables[nlon][:]).squeeze()
lat_tmp = numpy.array(fid.variables[nlat][:]).squeeze()
if twoD:
lon, lat = numpy.meshgrid(lon_tmp, lat_tmp)
else:
lon = lon_tmp
lat = lat_tmp
elif len(vartmp.shape) == 2:
lon = numpy.array(fid.variables[nlon][:, :]).squeeze()
lat = numpy.array(fid.variables[nlat][:, :]).squeeze()
if not twoD:
lon = lon[0, :]
lat = lat[:, 0]
else:
logger.warn('unknown dimension for lon and lat')
fid.close()
return lon, lat
def read_var(nfile, var, index=None, time=0, depth=0, model_nan=None):
''' General routine to read variables in a netcdf file. \n
Inputs are file name, variable name, index=index to read part
of the variables, time=time to read a specific time, depth=depth to read a
specific depth, model_nan=nan value '''
# - Open Netcdf file
try:
fid = Dataset(nfile, 'r')
except IOError:
logger.error('There was an error opening the file {}'.format(nfile))
sys.exit(1)
# - Check dimension of variable
try:
vartmp = fid.variables[var]
except:
logger.error('Variable {} not found in file {}'.format(var, nfile))
sys.exit(1)
# - Read variable
if index is None:
if len(vartmp.shape) == 1:
T = numpy.ma.array(fid.variables[var][:]).squeeze()
elif len(vartmp.shape) == 2:
T = numpy.ma.array(fid.variables[var][:, :]).squeeze()
elif len(vartmp.shape) == 3:
if time is None:
T = numpy.ma.array(fid.variables[var][:, :, :]).squeeze()
else:
T = numpy.ma.array(fid.variables[var][time, :, :]).squeeze()
elif len(vartmp.shape) == 4:
if time is None:
if depth is None:
T = numpy.ma.array(fid.variables[var][:, :, :, :]).squeeze()
else:
T = numpy.ma.array(fid.variables[var][:, depth,
:, :]).squeeze()
elif depth is None:
T = numpy.ma.array(fid.variables[var][time, :, :, :]).squeeze()
else:
T = numpy.ma.array(fid.variables[var][time,depth, :, :]).squeeze()
else:
logger.error('wrong dimension in variables {}'.format(var))
sys.exit(1)
else:
if len(vartmp.shape) == 1:
Ttmp = numpy.ma.array(fid.variables[var][:]).squeeze()
T = Ttmp[index]
elif len(vartmp.shape) == 2:
Ttmp = numpy.ma.array(fid.variables[var][:, :]).squeeze()
T = Ttmp[index]
elif len(vartmp.shape) == 3:
if time is None:
U = numpy.ma.array(fid.variables[var][:, :, :]).squeeze()
T = U[:, index]
else:
U = numpy.ma.array(fid.variables[var][time, :, :]).squeeze()
T = U[index]
elif len(vartmp.shape) == 4:
if time is None:
if depth is None:
U = numpy.ma.array(fid.variables[var][:, :, :,
:]).squeeze()
T = U[:, :, index]
else:
U = numpy.ma.array(fid.variables[var][:, depth, :,
:]).squeeze()
T = U[:, index]
elif depth is None:
U = numpy.ma.array(fid.variables[var][time, :, :,
:]).squeeze()
T = U[:, index]
else:
U = numpy.ma.array(fid.variables[var][time, depth, :,
:]).squeeze()
T = U[index]
else:
logger.error('Wrong dimension')
sys.exit(1)
fid.close()
# - Mask value that are NaN
#try:
# mask = (fid.variables[var]._fill_value == fill_value)
#except:
# mask = (T == model_nan)
# else:
# mask = (T == numpy.nan)
if model_nan is not None:
_mask = numpy.ma.getmaskarray(T)
# TODO remove hard coded value?
_mask = (_mask | (T == model_nan) | (abs(T) > 1000))
T = numpy.ma.array(T, mask=_mask)
#T[numpy.where(T == model_nan)] = numpy.nan
T._shared_mask = False
return T #numpy.ma.MaskedArray(T, mask=mask)
def global_attributes(fid, level):
fid.title = 'SKIM {} simulated by SKIM simulator'.format(level)
fid.keywords = 'SKIM, Doppler' # Check keywords
fid.Conventions = "CF-1.6"
fid.summary = 'SKIM data produced'
fid.description = "SKIM fixed swath"
fid.Metadata_Conventions = "Unidata Dataset Discovery v1.0"
fid.history = '{} File created by skimulator version {}'.format(level,
version)
fid.processing_level = level
fid.standard_name_vocabulary = "CF-1.6"
fid.creator_name = "Lucile Gaultier"
fid.creator_email = "lucile.gaultier@gmail.com"
fid.publisher_url = ""
fid.project = "SKIM"
fid.date_created = ti.strftime("%Y-%m-%dT%H:%M:%SZ")
fid.date_modified = ti.strftime("%Y-%m-%dT%H:%M:%SZ")
fid.keywords_vocabulary = ""
fid.references = ""
def write_l2c(metadata, geolocation, **kwargs):
ti = datetime.datetime.now()
lat = geolocation['lat']
lon = geolocation['lon']
lon = numpy.mod(lon + 180, 360) - 180
tim = geolocation['time']
# - Open Netcdf file in write mode
fid = Dataset(metadata['file'], 'w', format='NETCDF4_CLASSIC')
# - Create Global attribute
global_attributes(fid, 'L2C')
fid.description = "SKIM fixed swath"
fid.time_coverage_start = metadata['time_coverage_start']
# p.date0+"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.time_coverage_end = metadata['time_coverage_end']
# p.date0 +"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.geospatial_lat_min = "{:.2f}".format(numpy.min(lat))
fid.geospatial_lat_max = "{:.2f}".format(numpy.max(lat))
fid.geospatial_lat_units = "degrees_north"
fid.geospatial_lon_max = "{:.2f}".format(numpy.max(lon))
fid.geospatial_lon_min = "{:.2f}".format(numpy.min(lon))
fid.geospatial_lon_units = "degrees_east"
fid.project = "SKIM"
fid.date_created = ti.strftime("%Y-%m-%dT%H:%M:%SZ")
fid.date_modified = ti.strftime("%Y-%m-%dT%H:%M:%SZ")
fid.cycle = "{0:d}".format(int(metadata['cycle']))
fid.track = "{} th pass".format(metadata['pass'])
# - Create dimensions
# if (not os.path.isfile(self.file)):
dimlon = 'al'
dimlat = 'ac'
dimtime = 'time'
nlon = numpy.shape(lon)[0] - 1
nlat = numpy.shape(lat)[1]
ntime = None
fid.createDimension(dimlat, nlat)
fid.createDimension(dimtime, ntime)
# - Create and write Variables
vtime = fid.createVariable('time', 'f8', (dimtime))
vtime.axis = "T"
vtime.units = "days since {}".format(metadata['first_time'])
vtime.long_name = "Time"
vtime.standard_name = "time"
vtime.calendar = "gregorian"
vtime[:] = tim
vlon = fid.createVariable('lon', 'f4', (dimtime, dimlat))
vlon.axis = "X"
vlon.long_name = "Longitude"
vlon.standard_name = "longitude"
vlon.units = "degrees_east"
vlon[:, :] = lon
val = fid.createVariable('x_al', 'f4', (dimtime))
val.axis = "Y"
val.long_name = "Along track distance from beginning of cycle"
val.standard_name = "Along track distance"
val.units = "km"
val[:] = geolocation['al']
vac = fid.createVariable('x_ac', 'f4', (dimlat))
vac.axis = "Y"
vac.long_name = "Along track distance from nadir"
vac.standard_name = "Across track distance"
vac.units = "km"
vac[:] = geolocation['ac']
vlat = fid.createVariable('lat', 'f4', (dimtime, dimlat))
vlat.axis = "Y"
vlat.long_name = "Latitude"
vlat.standard_name = "latitude"
vlat.units = "degrees_north"
vlat[:, :] = lat
longname = { "ux_noerr": "Error-free zonal velocity",
"uy_noerr": "Error-free meridional velocity",
"ux_obs": "Observed zonal velocity",
"uy_obs": "Observed meridional velocity",
"u_ac_obs": "Observed across track velocity",
"u_al_obs": "Observed along track velocity",
"ux_model": "Error-free zonal velocity",
"uy_model": "Error-free meridional velocity",
"u_ac_noerr": "Error-free across track velocity",
"u_al_noerr": "Error-free along track velocity",
"angle": "angle of xac with eastward vector",
"ux_true": "True zonal velocity",
"uy_true": "True meridional velocity",
"x_al": "Along track distance from beginning of cycle",
"x_ac": "Across track distance from nadir",
"u_ac_true": "True across track velocity",
"u_al_true": "True along track velocity",
"u_ac_instr": "Intrumental across track error",
"u_al_instr": "Intrumental along track error",
"u_al_wdrem": "Along track error in the wave doppler estimation",
"u_ac_wdrem": "Across track error in the wave doppler estimation",
"u_al_wd": "Along track wave doppler",
"u_ac_wd": "Across track wave doppler",
"u_ac_uss_obs": "Estimated across track Stokes drift",
"u_al_uss_obs": "Estimated along track Stokes drift",
"u_ac_uss_oi": "Error-free across track Stokes drift",
"u_al_uss_oi": "Error-free along track Stokes drift",
"u_ac_uss_true": "True across track Stokes drift",
"u_al_uss_true": "True along track Stokes drift",
"u_ac_dsigma":"Across track error due to dsigma",
"u_al_dsigma":"Along track error due to dsigma",
"uwnd": "Zonal wind at 10m",
"vwnd": "Merdional wind at 10m",
"rain": "Rain"
}
unit = {"ux_noerr": "m/s", "ux_obs": "m/s", "u_ac_obs": "m/s",
"uy_noerr": "m/s", "uy_obs": "m/s", "u_al_obs": "m/s",
"angle": "rad", "ux_model": "m/s", "uy_model": "m/s",
"u_ac_noerr": "m/s", "u_al_noerr": "m/s",
"u_ac_true": "m/s", "u_al_true": "m/s",
"ux_true": "m/s", "uy_true": "m/s", "x_al": "km", "x_ac": "km",
"u_ac_instr": "m/s", "u_al_instr": "m/s", "u_ac_wdrem": "m/s",
"u_al_wdrem":"m/s", "u_ac_wd": "m/s", "u_al_wd": "m/s",
"u_ac_uss_obs":"m/s", "u_al_uss_obs":"m/s",
"u_ac_uss_oi": "m/s", "u_al_uss_oi":"m/s",
"u_ac_uss_true":"m/s", "u_al_uss_true":"m/s",
"u_ac_dsigma":"m/s", "u_al_dsigma":"m/s","uwnd":"m/s",
"vwnd":"m/s", "rain":"mm/h"
}
for key, value in kwargs.items():
if value is not None:
nvar = '{}'.format(key)
var = fid.createVariable(nvar, 'f4', (dimtime, dimlat),
fill_value=-1.36e9)
try:
var.units = unit[str(key)]
except:
var.units = ''
try:
var.long_name = longname[str(key)]
except:
var.long_name = str(key)
if value.any():
mask = numpy.isnan(value)
value[numpy.where(mask)] = -1.36e9
mask_ind = numpy.where(value < -1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value > 1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value == numpy.PINF)
value[mask_ind] = -1.36e9
var[:, :] = value
fid.close()
def write_l2d(metadata, geolocation, **kwargs):
ti = datetime.datetime.now()
lat = geolocation['lat']
lon = geolocation['lon']
tim = geolocation['time']
# - Open Netcdf file in write mode
fid = Dataset(metadata['file'], 'w', format='NETCDF4_CLASSIC')
# - Create Global attribute
global_attributes(fid, 'L2D')
fid.description = "SKIM fixed grid"
fid.time_coverage_start = metadata['time_coverage_start']
# p.date0+"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.time_coverage_end = metadata['time_coverage_end']
# p.date0 +"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.geospatial_lat_min = "{:.2f}".format(numpy.min(lat))
fid.geospatial_lat_max = "{:.2f}".format(numpy.max(lat))
fid.geospatial_lat_units = "degrees_north"
_lon_max = numpy.mod(lon + 360, 360)
fid.geospatial_lon_max = "{:.2f}".format(numpy.max(_lon_max))
fid.geospatial_lon_min = "{:.2f}".format(numpy.min(_lon_max))
fid.geospatial_lon_units = "degrees_east"
# - Create dimensions
# if (not os.path.isfile(self.file)):
dimlon = 'lon'
dimlat = 'lat'
dimtime = 'time'
nlon = numpy.shape(lon)[0]
nlat = numpy.shape(lat)[0]
ntime = None
fid.createDimension(dimlat, nlat)
fid.createDimension(dimlon, nlon)
fid.createDimension(dimtime, ntime)
# - Create and write Variables
vtime = fid.createVariable('time', 'f8', (dimtime))
vtime.axis = "T"
vtime.units = metadata['first_time']
vtime.long_name = "Time"
vtime.standard_name = "time"
vtime.calendar = "gregorian"
vtime[:] = tim
vlon = fid.createVariable('lon', 'f4', (dimlon, ))
vlon.axis = "X"
vlon.long_name = "Longitude"
vlon.standard_name = "longitude"
vlon.units = "degrees_east"
vlon[:] = lon
vlat = fid.createVariable('lat', 'f4', (dimlat, ))
vlat.axis = "Y"
vlat.long_name = "Latitude"
vlat.standard_name = "latitude"
vlat.units = "degrees_north"
vlat[:] = lat
longname = { "ux_noerr": "Error-free zonal velocity",
"uy_noerr": "Error-free meridional velocity",
"ux_obs": "Observed zonal velocity",
"uy_obs": "Observed meridional velocity",
"ux_model": "Error-free zonal velocity",
"uy_model": "Error-free meridional velocity",
"ux_true": "True zonal velocity",
"uy_true": "True meridional velocity",
}
unit = {"ux_noerr": "m/s", "ux_obs": "m/s",
"uy_noerr": "m/s", "uy_obs": "m/s",
"ux_true": "m/s", "uy_true": "m/s"
}
for key, value in kwargs.items():
if value is not None:
nvar = '{}'.format(key)
var = fid.createVariable(nvar, 'f4', (dimtime, dimlat, dimlon),
fill_value=-1.36e9)
try:
var.units = unit[str(key)]
except:
var.units = ''
try:
var.long_name = longname[str(key)]
except:
var.long_name = str(key)
if value.any():
mask = numpy.isnan(value)
value[numpy.where(mask)] = -1.36e9
mask_ind = numpy.where(value < -1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value > 1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value == numpy.PINF)
value[mask_ind] = -1.36e9
var[0, :, :] = value
fid.close()
class Sat_SKIM():
''' Sat_SKIM class: to read and write data that has been
created by SKIM simulator '''
def __init__(self, ifile=None, lon=None, lat=None, lon_nadir=None,
lat_nadir=None, time=None, cycle=None, al_cycle=None,
x_al=None, timeshift=None):
self.file = ifile
self.lon = lon
self.lat = lat
self.lon_nadir = lon_nadir
self.lat_nadir = lat_nadir
self.time = time
self.cycle = cycle
self.x_al = x_al
self.al_cycle = al_cycle
self.timeshift = timeshift
def write_swath(self, p, **kwargs):
'''Write swath location in Satellite grid file sgridfile.\n
Dimensions are time (i.e. along track), x_ac (across
track) and cycle (1). \n
Variables are longitude, latitude, number of days in a cycle,
distance crossed in a cycle, time, along track and across track
distances are stored.'''
#grid_params_hash = skimulator.grid_check.get_b64_gzipped_hash(p)
grid_params_hash = 0 #skimulator.grid_check.get_b64_gzipped_hash(p)
# - Open Netcdf file in write mode
fid = Dataset(self.file, 'w', format='NETCDF4_CLASSIC')
# - Create Global attribute
global_attributes(fid, 'swath')
fid.description = "SKIM swath"
fid.time_coverage_start = self.time[0][0]
# p.date0+"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.time_coverage_end = self.time[-1][-1]
# p.date0 +"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.geospatial_lat_min = "{:.2f}".format(numpy.min(self.lat[0]))
fid.geospatial_lat_max = "{:.2f}".format(numpy.max(self.lat[0]))
fid.geospatial_lat_units = "degrees_north"
fid.geospatial_lon_max = "{:.2f}".format(numpy.max(self.lon[0]))
fid.geospatial_lon_min = "{:.2f}".format(numpy.min(self.lon[0]))
fid.geospatial_lon_units = "degrees_east"
fid.cycle = "{0:d}".format(int(self.al_cycle))
fid.track = "{} th pass".format(self.ipass)
fid.grid_params_hash = grid_params_hash
# - Create dimensions
# if (not os.path.isfile(self.file)):
dimsample = 'sample'
maxpos = numpy.argmax(numpy.array(p.list_shift))
nsample = numpy.shape(self.lon[maxpos + 1])[0]
fid.createDimension(dimsample, nsample)
# fid.createDimension('time_nadir', numpy.shape(self.lon)[0])
dimcycle = 'cycle'
fid.createDimension(dimcycle, 1)
nbeam = len(self.list_pos)
dimnbeam = 'nbeam'
fid.createDimension(dimnbeam, nbeam)
# - Create and write Variables
vtime = fid.createVariable('time', 'f8', (dimsample, dimnbeam))
vtime.axis = "T"
vtime.units = "seconds since {}".format(p.first_time)
vtime.long_name = "Time"
vtime.standard_name = "time"
vtime.calendar = "gregorian"
vtime_nadir = fid.createVariable('time_nadir', 'f8', (dimsample,))
vtime_nadir.axis = "T"
vtime_nadir.units = "seconds since {}".format(p.first_time)
vtime_nadir.long_name = "Time at nadir"
vtime_nadir.standard_name = "time"
vtime_nadir.calendar = "gregorian"
vlon = fid.createVariable('lon', 'f4', (dimsample, dimnbeam))
vlon.axis = "X"
vlon.long_name = "Longitude"
vlon.standard_name = "longitude"
vlon.units = "degrees_east"
vlon_nadir = fid.createVariable('lon_nadir', 'f4', (dimsample,))
vlon_nadir.axis = "X"
vlon_nadir.long_name = "Longitude at nadir"
vlon_nadir.standard_name = "longitude"
vlon_nadir.units = "degrees_east"
vlat = fid.createVariable('lat', 'f4', (dimsample, dimnbeam))
vlat.axis = "Y"
vlat.long_name = "Latitude"
vlat.standard_name = "latitude"
vlat.units = "degrees_north"
vlat_nadir = fid.createVariable('lat_nadir', 'f4', (dimsample,))
vlat_nadir.axis = "Y"
vlat_nadir.long_name = "Latitude at nadir"
vlat_nadir.standard_name = "latitude"
vlat_nadir.units = "degrees_north"
vx_al = fid.createVariable('x_al', 'f4', (dimsample, dimnbeam))
vx_al.units = "km"
vx_al.long_name = "Along track distance from the nadir"
# vx_al_tot = fid.createVariable('x_al_total', 'f4', (dimsample,
# dimnbeam))
# vx_al_tot.units = "km"
# vx_al_tot.long_name = "Along track distance from the beginning of "\
# "the pass projected on nadir"
vx_ac = fid.createVariable('x_ac', 'f4', (dimsample, dimnbeam))
vx_ac.units = "km"
vx_ac.long_name = "Across track distance from the nadir"
vx_al_nadir = fid.createVariable('x_al_nadir', 'f4', (dimsample,))
vx_al_nadir.units = "km"
vx_al_nadir.long_name = "Nadir along track distance from the"\
"beginning of the cycle"
vangle = fid.createVariable('angle', 'f8', (dimsample, dimnbeam))
vangle.units = "rad"
vangle.long_name = "Angle of the beam refered to the across track"\
" direction"
vrangle = fid.createVariable('radial_angle', 'f8',
(dimsample, dimnbeam))
vrangle.units = "rad"
vrangle.long_name = "Radial angle refered to (longitude towards east,"\
" latitude toward north)"
for i in range(nbeam + 1):
if i == 0:
vtime_nadir[:] = self.time[i][:nsample]
vlon_nadir[:] = self.lon[i][:nsample]
vlat_nadir[:] = self.lat[i][:nsample]
vx_al_nadir[:] = self.x_al[i][:nsample]
else:
vtime[:, i - 1] = self.time[i][:nsample]
vlon[:, i - 1] = self.lon[i][:nsample]
vlat[:, i - 1] = self.lat[i][:nsample]
vx_al[:, i - 1] = self.x_al[i][:nsample]
vx_ac[:, i - 1] = self.x_ac[i][:nsample]
vangle[:, i - 1] = self.beam_angle[i][:nsample]
vrangle[:, i - 1] = self.radial_angle[i][:nsample]
vcycle = fid.createVariable('cycle', 'f4', (dimcycle, ))
valcycle = fid.createVariable('al_cycle', 'f4', (dimcycle, ))
vtimeshift = fid.createVariable('timeshift', 'f4', (dimcycle, ))
vcycle[:] = self.cycle
vcycle.units = "second"
vcycle.long_name = "seconds during a cycle"
valcycle[:] = self.al_cycle
valcycle.units = "km"
valcycle.long_name = " Distance travelled during the pass"
vtimeshift[:] = self.timeshift
vtimeshift.units = "day"
vtimeshift.long_name = "Shift time to match model time"
vlistpos = fid.createVariable('beam_position', 'f4', (dimnbeam, ))
vlistpos[:] = self.list_pos
vlistpos.units = ""
vlistpos.long_name = "Beam position"
vlistangle = fid.createVariable('beam_angle', 'f8', (dimnbeam, ))
vlistangle[:] = self.list_angle
vlistangle.units = ""
vlistangle.long_name = "Beam angle"
vincl = fid.createVariable('inclination', 'f4', (dimsample, ))
vincl.units = "rad"
vincl.long_name = "Track inclination at nadir"
# vincl[:] = numpy.array(self.angle)[0,:nsample]
vincl[:] = self.angle[:nsample]
fid.close()
return None
def write_data(self, p, outdata): # **kwargs):
'''Write SKIM data in output file file_output
Dimensions are x_al (along track distance), x_ac (across
track distance). \n
Variables are longitude, latitude, index (file number),
error-free radial velocity (velocity interpolated from the model and
projected with the radial angle), selected
errors (instrument, uss bias, radial uss) and velocity with errors. \n
'''
# - Open netcdf file in write mode
fid = Dataset(self.file, 'w', format='NETCDF4_CLASSIC')
global_attributes(fid, 'L2B')
fid.description = "SKIM L2B from SKIMulator"
try:
fid.corresponding_grid = self.gridfile
except:
pass
dateformat = '%Y-%m-%dT%H:%M:%SZ'
time_model = datetime.datetime.strptime(p.first_time, '%Y-%m-%dT%H:%M:%SZ')
mintime = numpy.nanmin(self.time)
maxtime = numpy.nanmax(self.time)
day = numpy.floor(mintime)
seconds = (mintime - day) * 86400
time0 = time_model + datetime.timedelta(day, seconds)
fid.time_coverage_start = time0.strftime(format=dateformat)
# p.date0+"YYYY-MM-DDThh:mmZ" #tim0 converted to format
day = numpy.floor(maxtime)
seconds = (maxtime - day) * 86400
time0 = time_model + datetime.timedelta(day, seconds)
fid.time_coverage_end = time0.strftime(format=dateformat)
# p.date0 +"YYYY-MM-DDThh:mmZ" #tim0 converted to format
fid.geospatial_lat_min = "{:.2f}".format(numpy.min(self.lat))
fid.geospatial_lat_max = "{:.2f}".format(numpy.max(self.lat))
fid.geospatial_lat_units = "degrees_north"
fid.geospatial_lon_max = "{:.2f}".format(numpy.max(self.lon))
fid.geospatial_lon_min = "{:.2f}".format(numpy.min(self.lon))
fid.geospatial_lon_units = "degrees_east"
fid.cycle = "{} th cycle".format(self.ncycle)
fid.track = "{} th pass".format(self.ipass)
dimsample = 'sample'
fid.createDimension(dimsample, numpy.shape(self.lon[0])[0])
# fid.createDimension('time_nadir', numpy.shape(self.lon)[0])
dimcycle = 'cycle'
fid.createDimension(dimcycle, 1)
nbeam = len(p.list_pos)
dimnbeam = 'nbeam'
fid.createDimension(dimnbeam, nbeam)
# - Create and write Variables
vtime = fid.createVariable('time', 'f8', (dimsample, dimnbeam))
vtime.axis = "T"
vtime.units = "days since {}".format(p.first_time)
vtime.long_name = "Time"
vtime.standard_name = "time"
vtime.calendar = "gregorian"
vtime_nadir = fid.createVariable('time_nadir', 'f8', (dimsample,))
vtime_nadir.axis = "T"
vtime_nadir.units = "days since {}".format(p.first_time)
vtime_nadir.long_name = "Time at nadir"
vtime_nadir.standard_name = "time"
vtime_nadir.calendar = "gregorian"
vlon = fid.createVariable('lon', 'f4', (dimsample, dimnbeam))
vlon.axis = "X"
vlon.long_name = "Longitude"
vlon.standard_name = "longitude"
vlon.units = "degrees_east"
vlon_nadir = fid.createVariable('lon_nadir', 'f4', (dimsample,))
vlon_nadir.axis = "X"
vlon_nadir.long_name = "Longitude at nadir"
vlon_nadir.standard_name = "longitude"
vlon_nadir.units = "degrees_east"
vlat = fid.createVariable('lat', 'f4', (dimsample, dimnbeam))
vlat.axis = "Y"
vlat.long_name = "Latitude"
vlat.standard_name = "latitude"
vlat.units = "degrees_north"
vlat_nadir = fid.createVariable('lat_nadir', 'f4', (dimsample,))
vlat_nadir.axis = "Y"
vlat_nadir.long_name = "Latitude at nadir"
vlat_nadir.standard_name = "latitude"
vlat_nadir.units = "degrees_north"
for i in range(nbeam + 1):
if i == 0:
vtime_nadir[:] = self.time[i][:]
vlon_nadir[:] = self.lon[i][:]
vlat_nadir[:] = self.lat[i][:]
else:
vtime[:, i - 1] = self.time[i][:]
vlon[:, i - 1] = self.lon[i][:]
vlat[:, i - 1] = self.lat[i][:]
longname = {"sigma0": "sigma0",
"ur_true": "Radial velocity interpolated from model",
"ucur": "Zonal velocity interpolated from model",
"vcur": "Meridional velocity interpolated from model",
"ur_obs": "Observed radial velocity (Ur_model+errors)",
"index": "Equivalent model output number in list of file",
"ur_uss": "Stokes drift radial velocity bias",
"uwd": "True Radial Wave doppler componant",
"uwd_est": "Estimated radial wave doppler componant",
"uwnd": "Eastward wind at 10m ",
"vwnd": "Northward wind at 10m ",
"altimeter": "Nadir error",
"wlv": "SSH interpolated from model",
"ssh_obs": "Observed SSH",
"ice": "Sea ice concentration",
"uuss": "Meridional Stokes drift",
"vuss": "Zonal Stokes drift",
"instr": "instrumental error velocity",
"radial_angle": "Azimuthal angle, trigonometric from East",
"sigma0" : "NRCS",
"ussr": "Radial Stokes drift",
"ussr_est": "Reconstructed radial Stokes drift",
"dsigma0": "Velocity due to the azimuthal sigma0 gradient",
"dsigm-atm": "Velocity due to atmospheric sigma0 gradient",
"yaw_oacs": "Velocity due to uncertainty in gyro",
"yaw_ted": "Velocity due to TED",
"yaw": "Total velocity due to mispointing",
"yaw_rem": "Remaining yaw after offline correction",
}
unit = {"sigma0": "", "ur_true": "m/s", "ur_obs": "m/s",
"index": " ", "ur_uss": "m/s", "uwnd": "m/s",
"vwnd": "m/s", "uwd": "m/s", "ucur": "m/s",
"vcur": "m/s", "ssh_obs": "m", "wlv": "m",
"altimeter": "m", "ssh_obs":"m", "uwd_est": "m/s",
"uuss": "m/s", "vuss": "m/s", "radial_angle": "rad",
"ussr": "m/s", "ussr_est": "m/s", "dsigma0": "m/s",
"disgm-atm":"m/s", "yaw_aocs": "m/s", "yaw_ted": "m/s",
"yaw": "m/s", "yaw_rem": "m/s"
}
list_nadir_only = ("altimeter", )
list_nadir = ("ssh_true", "ssh_obs", "ice", "sigma0", "vindice",
"mssu", "mssc", "uwnd", "vwnd")
for key, value in outdata.items():
if numpy.shape(value)[1] > nbeam + 1:
print(key, 'wrong value dimension', numpy.shape(value))
if value is not None:
if key in list_nadir or key in list_nadir_only:
if key not in list_nadir_only:
nvar_nadir = '{}_nadir'.format(key)
else:
nvar_nadir = key
var_nadir = fid.createVariable(nvar_nadir, 'f4',
(dimsample, ),
fill_value=-1.36e9)
try:
var_nadir.units = unit[str(key)]
except:
var_nadir.units = ''
try:
var_nadir.long_name = longname[str(key)]
except:
var_nadir.long_name = str(key)
if key not in list_nadir_only:
nvar = '{}'.format(key)
if key == 'radial_angle':
ntype = 'f8'
else:
ntype = 'f4'
var = fid.createVariable(nvar, ntype, (dimsample, dimnbeam),
fill_value=-1.36e9)
try:
var.units = unit[str(key)]
except:
var.units = ''
try:
var.long_name = longname[str(key)]
except:
var.long_name = str(key)
mask = numpy.ma.getdata(numpy.isnan(value))
value[mask] = -1.36e9
mask_ind = numpy.where(value < -1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value > 1e7)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value == numpy.PINF)
value[mask_ind] = -1.36e9
mask_ind = numpy.where(value == p.model_nan)
value[mask_ind] = -1.36e9
if numpy.shape(value)[1] == 1:
var_nadir[:] = value[:, 0]
elif numpy.shape(value)[1] == nbeam:
var[:] = value[:, :]
elif numpy.shape(value)[1] == nbeam + 1:
var[:] = value[:, 1:]
if key in list_nadir:
var_nadir[:] = value[:, 0]
else:
print(key, 'wrong value dimension', numpy.shape(value))
# TODO set range values
fid.close()
return None
def load_swath(self, p, **kwargs):
'''Load swath variables stored in Satellite grid file sgridfile. \n
(longitude, latitude, number of days in a cycle, crossed distance
during a cycle, time, along track and across track position).'''
# - Open Netcdf file
try:
fid = Dataset(self.file, 'r')
except IOError:
logger.error('There was an error opening the file '
'{}'.format(self.file))
sys.exit(1)
if 'grid_params_hash' in fid.ncattrs():
grid_params_hash = 0 #skimulator.grid_check.get_b64_gzipped_hash(p)
#if fid.grid_params_hash != grid_params_hash:
# raise IncompatibleGridError(self.file, fid.grid_params_hash,
# grid_params_hash)
# fid = Dataset(self.file, 'r')
time = []
lon = []
lat = []
# cycle = []
# x_al = []
listvar = {'time': time, 'lon': lon, 'lat': lat, }
self.lon = []
self.lat = []
self.time = []
# - Read variables in listvar and return them
for stringvar in listvar:
var = fid.variables['{}{}'.format(stringvar, '_nadir')]
listvar[stringvar].append(numpy.array(var[:]).squeeze())
var = fid.variables[stringvar]
for i in range(len(p.list_pos)):
listvar[stringvar].append(numpy.array(var[:, i]).squeeze())
setattr(self, stringvar, listvar[stringvar])
# - Read variables in arguments
for key, value in kwargs.items():
var = fid.variables[key]
value = numpy.array(fid.variables[key][:]).squeeze()
# value[value == var.fill_value] = numpy.nan
setattr(self, key, value)
self.pos = numpy.array(fid.variables['beam_position'][:])
_dpos = numpy.abs(self.pos - numpy.array(p.list_pos,
dtype=numpy.float32))
if (numpy.all(_dpos) > 0.0001):
logger.error('List of beam positions has changed,'
' reprocess the grids')
sys.exit(1)
self.angle = numpy.array(fid.variables['beam_angle'][:])
_dangle = numpy.abs(self.angle - numpy.array(p.list_angle,
dtype=numpy.float32))
if (numpy.all(_dangle) > 0.0001):
logger.error('List of beam angles has changed,'
' reprocess the grids')
sys.exit(1)
self.radial_angle = numpy.array(fid.variables['radial_angle'][:])
self.angle = numpy.array(fid.variables['angle'][:])
try:
self.corresponding_grid = fid.corresponding_grid
except AttributeError:
pass
try:
self.incl = numpy.array(fid.variables['inclination'][:]).squeeze()
except:
logger.info('inclination variable not found')
fid.close()
return None
def load_data(self, p, list_var, **kwargs):
'''Load swath variables stored in Satellite grid file sgridfile. \n
(longitude, latitude, number of days in a cycle, crossed distance
during a cycle, time, along track and across track position).'''
# - Open Netcdf file
try:
fid = Dataset(self.file, 'r')
except IOError:
logger.error('There was an error opening the file '
'{}'.format(self.file))
sys.exit(1)
# fid = Dataset(self.file, 'r')
time = []
lon = []
lat = []
# cycle = []
# x_al = []
#listvar = {'time': time, 'lon': lon, 'lat': lat, }
#self.lon = []
#self.lat = []
#self.time = []
# - Read variables in listvar and return them
#for stringvar in listvar:
# var = fid.variables['{}{}'.format(stringvar, '_nadir')]
# listvar[stringvar].append(numpy.array(var[:]).squeeze())
# var = fid.variables[stringvar]
# for i in range(len(p.list_pos)):
# listvar[stringvar].append(numpy.array(var[:, i]).squeeze())
# setattr(self, stringvar, listvar[stringvar])
# - Read variables in arguments
dic = {}
for key in list_var:
var = fid.variables[key]
value = numpy.array(fid.variables[key][:]).squeeze()
# value[value == var.fill_value] = numpy.nan
setattr(self, key, value)
dic[key] = value
for key, value in kwargs.items():
var = fid.variables[key]
value = numpy.array(fid.variables[key][:]).squeeze()
# value[value == var.fill_value] = numpy.nan
setattr(self, key, value)
dic[key] = value
try:
self.corresponding_grid = fid.corresponding_grid
except AttributeError:
pass
fid.close()
return dic
class NETCDF_MODEL():
'''Class to read any netcdf data.\n
USAGE is NETCDF_MODEL(file=name of file ,var= variable name,
lon=variable longitude, lat=variable latitude, units=).\n
Argument file is mandatory, arguments var, lon, lat
are specified in params file. \n
'''
def __init__(self, p, ifile=None, list_input_var=None, lon=('longitude', ),
lat=('latitude', ), depth=0, time=0):
if p.list_input_var is None:
logger.error('Specify your list_input_var in parameter file')
sys.exit(1)
self.input_var_list = {'ucur': ['uo', ''],
'vcur': ['vo', ''],
}
else:
self.input_var_list = p.list_input_var
self.input_var = {}
self.numgrid = {}
self.nlon = list(lon)
self.nlat = list(lat)
self.nfile = ifile
self.depth = depth
self.time = time
self.model_nan = getattr(p, 'model_nan', 0)
p.model_nan = self.model_nan
logger.debug('Nan Values {}, {}'.format(p.model_nan, self.model_nan))
def read_var(self, p, ind_lon=None, index=None):
'''Read variables from netcdf file \n
Argument is index=index to load part of the variable.'''
ind_lon = None
for key, value in self.input_var_list.items():
nfile0 = self.nfile[0]
_nfile = '{}{}.nc'.format(nfile0, value[1])
if os.path.exists(_nfile):
_tmp = read_var(_nfile, value[0], index=index, time=self.time,
depth=self.depth, model_nan=self.model_nan)
self.input_var[key] = _tmp
if ind_lon is not None:
self.input_var[key] = self.input_var[key][:, ind_lon]
if len(value) == 3:
self.numgrid[key] = value[2]
else:
self.numgrid[key] = 0
else:
logger.info('{} not found'.format(_nfile))
# self.vvar[numpy.where(numpy.isnan(self.vvar))]=0
return None
def read_coordinates(self, p, index=None):
'''Read coordinates from netcdf file \n
Argument is index=index to load part of the variable.'''
self.vlon = {}
self.ind_lon = {}
self.vlat = {}
for ikey in range(len(list(self.nfile))):
ifile = self.nfile[ikey]
if p.grid == 'regular':
lon, lat = read_coordinates(ifile, self.nlon[ikey],
self.nlat[ikey], twoD=False)
lon = numpy.mod(lon + 360, 360)
ind_lon = None # numpy.argsort(lon)
else:
lon, lat = read_coordinates(ifile, self.nlon[ikey],
self.nlat[ikey])
self.vlat[ikey] = lat
if ind_lon is not None:
self.vlon[ikey] = lon[ind_lon] #numpy.mod(lon + 360, 360)
self.ind_lon[ikey] = ind_lon
else:
self.vlon[ikey] = lon
self.ind_lon[ikey] = None
return None
def calc_box(self, p):
'''Calculate subdomain coordinates from netcdf file
Return minimum, maximum longitude and minimum, maximum latitude'''
self.read_coordinates(p)
if (numpy.min(self.vlon[0]) < 1.) and (numpy.max(self.vlon[0]) > 359.):
_ind = numpy.where(self.vlon[0] > 180.)
self.vlon[0][_ind] = self.vlon[0][_ind] - 360
lon1 = (numpy.min(self.vlon[0]) + 360) % 360
lon2 = (numpy.max(self.vlon[0]) + 360) % 360
else:
lon1 = numpy.min(self.vlon[0])
lon2 = numpy.max(self.vlon[0])
return [lon1, lon2, numpy.min(self.vlat[0]), numpy.max(self.vlat[0])]
class WW3():
'''Class to read ww3 netcdf data.\n
USAGE is NETCDF_MODEL(file=name of file ,var= variable name,
lon=variable longitude, lat=variable latitude, units=).\n
Argument file is mandatory, arguments var, lon, lat
are specified in params file. \n
'''
def __init__(self, p, ifile=None, list_input_var=None, lon=('longitude',),
lat=('latitude',), depth=0, time=0):
self.nlon = list(lon)
self.nlat = list(lat)
self.nfile = ifile
self.depth = depth
self.time = time
if list_input_var is None:
self.input_var_list = {'ucur': ['ucur', 'cur', 0],
'vcur': ['vcur', 'cur', 0],
'uuss': ['uuss', 'uss', 0],
'vuss': ['vuss', 'uss', 0],
'ice': ['ice', 'ice', 0],
'mssd': ['mssd', 'msd', 0],
'mssx': ['mssx', 'mss', 0],
'mssy':['mssy', 'mss', 0],
'ssh': ['wlv', 'wlv', 0],
'uwnd': ['uwnd', 'wnd', 0],
'vwnd': ['vwnd', 'wnd', 0]}
else:
self.input_var_list = p.list_input_var
self.input_var = {}
self.numgrid = {}
self.model_nan = getattr(p, 'model_nan', 0.)
p.model_nan = self.model_nan
logger.debug('Nan Values {}, {}'.format(p.model_nan, self.model_nan))
def read_var(self, p, ind_lon=None, index=None):
'''Read variables from netcdf file \n
Argument is index=index to load part of the variable.'''
for key, value in self.input_var_list.items():
nfile0 = self.nfile[0]
_nfile = '{}{}.nc'.format(nfile0, value[1])
if os.path.exists(_nfile):
_tmp = read_var(_nfile, value[0], index=index, time=self.time,
depth=self.depth, model_nan=self.model_nan)
self.input_var[key] = _tmp
if len(value) == 3:
self.numgrid[key] = value[2]
else:
self.numgrid[key] = 0
else:
logger.info('{} not found'.format(_nfile))
return None
def read_coordinates(self, p, index=None):
'''Read coordinates from netcdf file \n
Argument is index=index to load part of the variable.'''
self.vlon = {}
self.ind_lon = {}
self.vlat = {}
for ikey in range(len(list(self.nfile))):
ifile = self.nfile[ikey]
if p.grid == 'regular':
lon, lat = read_coordinates(ifile, self.nlon[ikey],
self.nlat[ikey], twoD=False)
else:
lon, lat = read_coordinates(ifile, self.nlon[ikey],
self.nlat[ikey])
self.vlat[ikey] = lat
self.ind_lon[ikey] = None
self.vlon[ikey] = (lon + 360) % 360
return None
def calc_box(self, p):
'''Calculate subdomain coordinates from netcdf file
Return minimum, maximum longitude and minimum, maximum latitude'''
self.read_coordinates(p)
if (numpy.min(self.vlon[0]) < 1.) and (numpy.max(self.vlon[0]) > 359.):
_ind = numpy.where(self.vlon[0] > 180.)
self.vlon[0][_ind] = self.vlon[0][_ind] - 360
lon1 = (numpy.min(self.vlon[0]) + 360) % 360
lon2 = (numpy.max(self.vlon[0]) + 360) % 360
else:
lon1 = numpy.min(self.vlon[0])
lon2 = numpy.max(self.vlon[0])
return [lon1, lon2, numpy.min(self.vlat[0]), numpy.max(self.vlat[0])]
| 48,046 | 42.246625 | 83 | py |
skimulator | skimulator-master/skimulator/mod_tools.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
''' Spectral and algebra tools for SKIM simulator. \n
Contains the following functions:
- load_python_file: load and parse parameter file \n
- rotationmat3d: rotate data \n
- spher2cart: convert spherical to cartesian coordinates \n
- cart2spher: convert cartesian to spherical coordinates \n
- cart2spher: convert cartesian to spherical coordinates for vectors \n
- update_progress: Progress bar'''
import numpy
import math
import logging
import sys
import os
import types
import datetime
from . import const
# Define logger level for debug purposes
logger = logging.getLogger(__name__)
def load_python_file(file_path):
"""Load a file and parse it as a Python module."""
if not os.path.exists(file_path):
raise IOError('File not found: {}'.format(file_path))
full_path = os.path.abspath(file_path)
python_filename = os.path.basename(full_path)
module_name, _ = os.path.splitext(python_filename)
module_dir = os.path.dirname(full_path)
if module_dir not in sys.path:
sys.path.append(module_dir)
module = __import__(module_name, globals(), locals(), [], 0)
return module
def initialize_parameters(p):
p.shift_lon = getattr(p, 'shift_lon', None)
p.shift_time = getattr(p, 'shift_time', None)
if p.shift_time is None:
p.timeshift = 0
p.timeshift = getattr(p, 'timeshift', 0)
if p.shift_time is None:
p.timeshift = 0
model = getattr(p, 'model', 'NETCDF_MODEL')
p.model = model
p.model_nan = getattr(p, 'model_nan', 0)
p.vel_factor = getattr(p, 'vel_factor', 1.)
p.first_time = getattr(p, 'first_time', '2018-01-01T00:00:00Z')
p.list_input_var = getattr(p, 'list_input_var', None)
p.grid = getattr(p, 'grid', 'regular')
p.cycle = getattr(p, 'cycle', 0.0368)
p.order_orbit_col = getattr(p, 'order_orbit_col', None)
p.satcycle = getattr(p, 'satcycle', None)
p.sat_elev = getattr(p, 'sat_elev', None)
p.ice_mask = getattr(p, 'ice_mask', True)
p.vsat = getattr(p, 'vsat', const.vsat)
# TODO Remove ?
listo = ['wlv', 'ssh_obs', 'ur_true', 'ucur', 'vcur', 'uuss', 'vuss',
'radial_angle', 'vwnd', 'mssx', 'mssy', 'mssxy', 'uwb',
'vindice', 'ur_obs', 'mask', 'uwnd', 'sigma0', 'ice']
p.list_output = getattr(p, 'list_output', listo)
p.snr_coeff = getattr(p, 'snr_coeff', 3e-3)
p.nadir = getattr(p, 'nadir', False)
p.ice = getattr(p, 'ice', False)
p.proc_count = getattr(p, 'proc_number', 1)
p.progress_bar = getattr(p, 'progress_bar', True)
p.ac_threshold = getattr(p, 'ac_threshold', 20) # in km
p.delta_al = getattr(p, 'delta_al', 2)
#p.resol = getattr(p, 'resol_spatial_l2c', 40) # in km
p.resol = getattr(p, 'resol', 40) # in km
p.posting = getattr(p, 'posting_l2c', 5) # in km
p.resol_spatial_l2d = getattr(p, 'resol_spatial_l2d', 50) # in km
p.resol_temporal_l2d = getattr(p, 'resol_temporal_l2d', 8) # in days
p.posting_l2d = getattr(p, 'posting_l2d', (0.1, 0.1)) # in degrees
# Time domain: (start_time, end_time, dtime) in days
p.time_domain = getattr(p, 'time_domain', (5, 25, 1)) # in days
p.list_input_var_l2c = getattr(p, 'list_input_var_l2c', p.list_input_var)
p.list_input_var_l2d = getattr(p, 'list_input_var_l2d', p.list_input_var)
p.config_l2d = getattr(p, 'config_l2d', '')
p.config_l2c = getattr(p, 'config_l2c', '')
p.config = getattr(p, 'config', 'SKIM')
p.rain = getattr(p, 'rain', False)
p.rain_file = getattr(p, 'rain_file', None)
p.rain_threshold = getattr(p, 'rain_threshold', 0.1)
p.yaw_file = getattr(p, 'yaw_file', None)
p.instr_configuration = getattr(p, 'instr_configuration', 'A')
p.Rearth = const.Rearth
p.type_satellite = getattr(p, 'type_satellite', 'skim')
make_list_output(p)
return None
def make_list_output(p):
#if p.rain is True:
compulsory = ('ur_true', 'ur_obs', 'radial_angle', 'vindice', 'ur_obs')
for key in compulsory:
if key not in p.list_output:
p.list_output.append(key)
if p.nadir is True:
compulsory = ('ssh_obs',)
for key in compulsory:
if key not in p.list_output:
p.list_output.append(key)
if 'Instrument' in p.noise:
compulsory = ('instr', 'mssxy', 'sigma0')
for key in compulsory:
if key not in p.list_output:
p.list_output.append(key)
if 'WaveDoppler' in p.noise:
compulsory = ('mssc', 'mssu')
for key in compulsory:
if key not in p.list_output:
p.list_output.append(key)
return None
def check_path(p):
if os.path.isdir(p.dir_setup) is False:
logger.error('Data directory {} not found'.format(p.dir_setup))
sys.exit(1)
if p.file_input is not None:
if os.path.isdir(p.indatadir) is False:
logger.error('Input directory {} not found'.format(p.indatadir))
sys.exit(1)
if os.path.isdir(p.outdatadir) is False:
logger.warn('Output directory {} did not exist and was '
'created'.format(p.outdatadir))
os.makedirs(p.outdatadir)
filesat_path = os.path.join(p.dir_setup, p.filesat)
if os.path.isfile(filesat_path) is False:
logger.error('Orbit file {} not found'.format(filesat_path))
sys.exit(1)
if p.file_input is not None:
if os.path.isfile(p.file_input) is False:
logger.error('Model file list {} not found'.format(p.file_input))
sys.exit(1)
return None
def rotationmat3D(theta, axis):
''' Creates a rotation matrix: Slow method. \n
Inputs are rotation angle theta and rotation axis axis.
The rotation matrix correspond to a rotation of angle theta
with respect to axis axis. \n
Return the rotation matrix.'''
# mat = numpy.eye(3, 3)
axis = axis / math.sqrt(numpy.dot(axis, axis))
a = math.cos(theta/2.)
b, c, d = -axis*math.sin(theta/2.)
return numpy.array([[a*a + b*b - c*c - d*d, 2*(b*c - a*d), 2*(b*d + a*c)],
[2*(b*c + a*d), a*a + c*c - b*b - d*d, 2*(c*d - a*b)],
[2*(b*d - a*c), 2*(c*d + a*b), a*a + d*d - b*b - c*c]])
def sign(a):
return (a > 0) - (a < 0)
def spher2cart(lon, lat):
''' Convert spherical coordinates to cartesian coordinates.\n
Inputs are longitude, latitude. \n
Return x, y, z'''
x = numpy.cos(lon*math.pi/180.) * numpy.cos(lat*math.pi/180.)
y = numpy.sin(lon*math.pi/180.) * numpy.cos(lat*math.pi/180.)
z = numpy.sin(lat*math.pi/180.)
return x, y, z
def cart2sphervect(x, y, z):
''' Convert cartesian coordinates to spherical coordinates. \n
Inputs are cartiesian coordinates x, y, z. \n
Return lon, lat. '''
norm = numpy.sqrt(x*x + y*y + z*z)
lat = numpy.arcsin(z/norm) * 180./math.pi
lon = numpy.arctan(y/x) % (2*math.pi)
if (x < 0).any():
lon[x < 0] = (numpy.arctan(y[x < 0] / x[x < 0]) % (2*math.pi)
+ x[x < 0] / x[x < 0]*math.pi)
lon = lon * 180/math.pi
return lon % 360, lat
def cart2spher(x, y, z):
''' Convert cartesian coordinates to spherical coordinates. \n
Inputs are cartiesian coordinates x, y, z. \n
Return lon, lat. '''
norm = numpy.sqrt(x*x + y*y + z*z)
lat = numpy.arcsin(z/norm) * 180./math.pi
if (x < 0):
lon = (numpy.arctan(y/x) % (2*math.pi)
+ max(-numpy.sign(x), 0)*math.pi)
else:
lon = numpy.arctan(y/x) % (2*math.pi)
lon = lon * 180/math.pi
return lon % 360, lat
def todict(p):
result = {}
for attr in dir(p):
value = getattr(p, attr)
if (isinstance(value, types.ModuleType)
or isinstance(value, types.MethodType)
or isinstance(value, types.FunctionType)
or attr.startswith('__')):
continue
result[attr] = value
return result
def fromdict(result):
p = type('skimparam', (object,), result)
return p
def update_progress_multiproc(status, info):
"""Creation of progress bar: print on screen progress of run, optimized
for parrallelised tasks"""
if info[3] is not None:
# There has been an error
return False
pid = info[0]
grid_name = info[1]
if isinstance(grid_name, str):
ipass = grid_name[-6:-3]
else:
ipass = '{:03d}'.format(grid_name)
cycle = info[2]
count = len(status.keys())
sys.stdout.write(_term_move_up() * count)
now = datetime.datetime.now().strftime('%H:%M:%S')
for pid in status:
if grid_name in status[pid]['jobs']:
if cycle is None:
# Grid has been completely processed
status[pid]['done'] += 1
status[pid]['extra'] = '{}|> pass: {} ....... DONE'.format(
now, ipass)
else:
# Just update extra info
status[pid]['extra'] = '{}|> pass: {}, cycle: {:04d}'.format(
now, ipass, cycle)
bar_size = 20
for pid, proc_state in status.items():
done = math.floor(bar_size * proc_state['done'] / proc_state['total'])
todo = bar_size - done
proc_elems = ['\n[']
proc_elems.extend(['#'] * int(math.ceil(done)))
proc_elems.extend([' '] * int(math.ceil(todo)))
proc_elems.extend(['] {}'.format(proc_state['extra'])])
sys.stdout.write(''.join(proc_elems))
sys.stdout.flush()
return True
def update_progress(progress, arg1, arg2):
'''Creation of a progress bar: print on screen the progress of the run'''
barLength = 20 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
if arg1 and arg2:
text = "\r[{0}] {1}%, {2}, {3}".format("#"*block + "-"*(barLength-block), "%.2f" % (progress*100), arg1 + ', ' + arg2, status)
elif arg1:
text = "\r[{0}] {1}%, {2}, {3}".format("#"*block + "-"*(barLength-block), "%.2f" % (progress*100), arg1, status)
else:
text = "\r[{0}] {1}%, {2} ".format("#"*block + "-"*(barLength-block), "%.2f" % (progress*100), status)
sys.stdout.write(text)
sys.stdout.flush()
return progress
def _term_move_up(): # pragma: no cover
"""Borrowed from https://github.com/tqdm/tqdm/blob/master/tqdm/_tqdm.py
MIT 2016 (c) [PR #96] on behalf of Google Inc.
MIT 2013 (c) Noam Yorav-Raphael, original author."""
colorama = None
return '' if (os.name == 'nt') and (colorama is None) else '\x1b[A'
| 11,664 | 36.149682 | 134 | py |
skimulator | skimulator-master/skimulator/error/wet_troposphere.py | # Copyright (c) 2020 CNES/JPL
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Wet troposphere errors
----------------------
"""
from typing import Dict, List, Tuple
try:
import numba as nb
except ImportError:
nb = None
import numpy as np
import scipy.ndimage.filters
from . import utils
#from .. import F_KA, VOLUMETRIC_MEAN_RADIUS, CELERITY, BASELINE
def meshgrid(x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
nx = x.size
ny = y.size
mx = np.empty((ny, nx))
my = np.empty((ny, nx))
for idx in range(ny):
mx[idx, :] = x
for idx in range(nx):
my[:, idx] = y
return mx, my
def calculate_path_delay_lr(beam_positions: List[float], sigma: float,
radio_r: np.ndarray, radio_l: np.ndarray,
x_al: np.ndarray, x_ac_large: np.ndarray,
wt_large: np.ndarray):
beam_r = np.empty((x_al.shape[0], ))
beam_l = np.empty((x_al.shape[0], ))
# Find righ and leftacross track indices in the gaussian
# footprint of 2.*p.sigma
ind_r = x_ac_large + beam_positions[1]
indac_r = np.where((ind_r < 2 * sigma) & (ind_r > -2 * sigma))[0]
ind_l = x_ac_large + beam_positions[0]
indac_l = np.where((ind_l < 2 * sigma) & (ind_l > -2 * sigma))[0]
factor = 1 / (2 * np.pi * sigma**2)
for idx, xal in enumerate(x_al):
# Find along track indices in the gaussian footprint
# of 2.*p.sigma
delta_x_al = x_al - xal
indal = np.where((delta_x_al <= (2 * sigma))
& (delta_x_al > (-2 * sigma)))[0]
slice_al = slice(indal[0], indal[-1] + 1)
slice_acr = slice(indac_r[0], indac_r[-1] + 1)
slice_acl = slice(indac_l[0], indac_l[-1] + 1)
x, y = _meshgrid(x_ac_large[slice_acr], x_al[slice_al] - xal)
# Compute path delay on left and right gaussian footprint
g = factor * np.exp(-(x**2 + y**2) / (2 * sigma**2))
beam_r[idx] = np.sum(
g * wt_large[slice_al, slice_acr]) / np.sum(g) + radio_r[idx]
x, y = _meshgrid(x_ac_large[slice_acl], x_al[slice_al] - xal)
g = factor * np.exp(-(x**2 + y**2) / (2 * sigma**2))
beam_l[idx] = np.sum(
g * wt_large[slice_al, slice_acl]) / np.sum(g) + radio_l[idx]
return beam_r, beam_l
def calculate_path_delay(sigma: float, radio: np.ndarray, x_al: np.ndarray,
x_ac_large: np.ndarray, wt_large: np.ndarray):
beam = np.empty((x_al.shape[0], ))
# Find across track indices in the gaussian footprint of
# 2. * sigma
indac = np.where((x_ac_large < 2 * sigma) & (x_ac_large > -2 * sigma))[0]
factor = 1 / (2 * np.pi * sigma**2)
for idx, xal in enumerate(x_al):
delta_x_al = x_al[:] - xal
indal = np.where((delta_x_al <= (2 * sigma))
& (delta_x_al > (-2 * sigma)))[0]
slice_al = slice(indal[0], indal[-1] + 1)
slice_ac = slice(indac[0], indac[-1] + 1)
x, y = _meshgrid(x_ac_large[slice_ac], x_al[slice_al] - xal)
# Compute path delay on gaussian footprint
g = factor * np.exp(-(x**2 + y**2) / (2 * sigma**2))
beam[idx] = np.sum(
g * wt_large[slice_al, slice_ac]) / np.sum(g) + radio[idx]
return beam
class WetTroposphere:
"""Wet troposphere errors
Args:
parameters (settings.Parameters): Simulation settings
"""
ALPHA = 10
LC_MAX = 500
F_MAX = 0.05
def __init__(self, parameters: settings.Parameters) -> None:
# Store the generation parameters of the random signal.
self.beam_positions = parameters.beam_position
self.delta_ac = parameters.delta_ac
self.delta_al = parameters.delta_al
self.len_repeat = parameters.len_repeat
self.nbeam = parameters.nbeam
self.nseed = parameters.nseed + 4
self.sigma = parameters.sigma
# TODO
self.conversion_factor = (
1 / (F_KA * 2 * np.pi / CELERITY * BASELINE) *
(1 + (parameters.height * 1e-3) / VOLUMETRIC_MEAN_RADIUS) * np.pi /
180 * 1e3)
# Define power spectrum of error in path delay due to wet tropo
freq = np.arange(1 / 3000, 1 / (2 * self.delta_al), 1 / 3000)
# Global mean wet tropo power spectrum in cm**2/(cycle/km)
# for L >= 100 km
pswt = 3.156 * 1e-05 * freq**(-8 / 3)
# Wet tropo power spectrum in cm**2/(cycle/km) for L < 100 km
mask = freq > 1e-2
pswt[mask] = 1.4875 * 1e-4 * freq[mask]**(-2.33)
self.pswt = pswt
self.freq = freq
self.fminx = 1 / self.len_repeat
self.ps2d, self.f = utils.gen_ps2d(freq,
pswt,
fminx=self.fminx,
fminy=1 / self.LC_MAX,
fmax=self.F_MAX,
alpha=self.ALPHA,
lf_extpl=True,
hf_extpl=True)
def _radiometer_error(self,
x_al: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
# Define radiometer error power spectrum for a beam
# High frequencies are cut to filter the associated error:
# during the reconstruction of the wet trop signal
psradio = 9.5 * 1e-5 * self.freq**-1.79
psradio[self.freq < 1e-3] = 9.5 * 1e-5 * (1e-3)**-1.79
mask = (self.freq > 0.0023) & (self.freq <= 0.0683)
psradio[mask] = 0.036 * self.freq[mask]**-0.814
psradio[self.freq > 0.0683] = 0.32
# Compute random coefficients (1D) for the radiometer error
# power spectrum for right and left beams
hrad = utils.gen_signal_1d(self.freq,
psradio,
x_al,
fmin=1 / self.len_repeat,
fmax=1 / (2 * self.delta_al),
alpha=10,
nseed=self.nseed + 100,
hf_extpl=True,
lf_extpl=True)
radio_r = hrad * 1e-2
hrad = utils.gen_signal_1d(self.freq,
psradio,
x_al,
fmin=1 / self.len_repeat,
fmax=1 / (2 * self.delta_al),
alpha=10,
nseed=self.nseed + 200,
hf_extpl=True,
lf_extpl=True)
radio_l = hrad * 1e-2
return radio_r, radio_l
def generate(self, x_al: np.array,
x_ac: np.array) -> Dict[str, np.ndarray]:
"""
Generate wet troposphere errors
Args:
x_al (numpy.ndarray): Along track distance
x_ac (numpy.ndarray): Across track distance
Returns:
dict: variable name and errors simulated.
"""
num_lines = x_al.shape[0]
num_pixels = x_ac.shape[0]
# Initialization of radiometer error in right and left beam
radio_r, radio_l = self._radiometer_error(x_al)
# Initialization of swath matrices and large swath matrices (which
# include wet tropo data around the nadir and outside the swath)
start_x = -2 * self.sigma / self.delta_ac + x_ac[0]
stop_x = (2 * self.sigma / self.delta_ac + x_ac[-1] + self.delta_ac)
# x_ac_large and wt_large are necessary to compute the gaussian
# footprint of a beam on the nadir or near the edge of the swath
x_ac_large = np.arange(start_x, stop_x, self.delta_ac)
naclarge = np.shape(x_ac_large)[0]
# Compute path delay error due to wet tropo and radiometer error
# using random coefficient initialized with power spectrums
wt = utils.gen_signal_2d_rectangle(self.ps2d,
self.f,
x_al,
x_ac,
fminx=self.fminx,
fminy=1 / self.LC_MAX,
fmax=self.F_MAX,
alpha=self.ALPHA,
nseed=self.nseed)
wt = wt.T * 1e-2
wt_large = utils.gen_signal_2d_rectangle(self.ps2d,
self.f,
x_al,
x_ac_large,
fminx=self.fminx,
fminy=1 / self.LC_MAX,
fmax=self.F_MAX,
alpha=self.ALPHA,
nseed=self.nseed)
wt_large = wt_large.T * 1e-2
# Compute Residual path delay error after a 1-beam radiometer
# correction
if self.nbeam == 1:
beam = _calculate_path_delay(self.sigma, radio_l, x_al, x_ac_large,
wt_large)
beam = scipy.ndimage.filters.gaussian_filter(
beam, 30. / self.delta_al)
beam2d = np.vstack(num_pixels * (beam, )).T
# Compute residual path delay
wet_tropo = wt - beam2d
wet_tropo_nadir = wt_large[:, naclarge // 2] - beam
# Compute Residual path delay error after a 2-beams radiometer
# correction
elif self.nbeam == 2:
beam_r, beam_l = _calculate_path_delay_lr(self.beam_positions,
self.sigma, radio_r,
radio_l, x_al,
x_ac_large, wt_large)
# Filtering beam signal to cut frequencies higher than 125 km
beam_r = scipy.ndimage.filters.gaussian_filter(
beam_r, 30 / self.delta_al)
beam_l = scipy.ndimage.filters.gaussian_filter(
beam_l, 30 / self.delta_al)
# Compute residual path delay (linear combination of left
# and right path delay)
polyfit = np.polynomial.polynomial.polyfit
pol = polyfit([self.beam_positions[0], self.beam_positions[1]],
[beam_l, beam_r], 1)
beam = (np.array(num_pixels * [pol[0]]).T +
np.array(num_lines * [x_ac]) *
np.array(num_pixels * [pol[1]]).T)
wet_tropo = wt - beam
wet_tropo_nadir = wt_large[:, naclarge //
2] - beam[:, num_pixels // 2]
else:
raise ValueError("nbeam must be in [1, 2]")
# wt_nadir = wt_large[:, naclarge // 2]
return {
"wet_troposphere": wet_tropo,
"wet_troposphere_nadir": wet_tropo_nadir
}
if nb is not None:
_calculate_path_delay = nb.njit(cache=True)(calculate_path_delay)
_calculate_path_delay_lr = nb.njit(cache=True)(calculate_path_delay_lr)
_meshgrid = nb.njit(cache=True)(meshgrid)
else:
_calculate_path_delay = calculate_path_delay
_calculate_path_delay_lr = calculate_path_delay_lr
_meshgrid = meshgrid
| 12,414 | 41.958478 | 79 | py |
skimulator | skimulator-master/skimulator/error/rain.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import numpy
import pickle
from typing import Dict, Tuple
from scipy import interpolate
from . import utils
import logging
logger = logging.getLogger(__name__)
def compute_rain(time: float, sx_al: numpy.ndarray, sx_al_nadir: numpy.ndarray,
sx_ac: numpy.ndarray, dic: dict, nseed: int
)-> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray,
numpy.ndarray]:
hour = int((time - numpy.floor(time))*24)
size_dic = len(dic['xal'][hour])
numpy.random.seed(nseed)
rr_ind = int(numpy.random.random_sample() * size_dic)
xal = dic['xal'][hour][rr_ind]
var = dic['rr'][hour][rr_ind]
var2 = dic['pia'][hour][rr_ind]
xac = dic['xac'][hour][rr_ind]
x_al_g_tot = + sgrid.x_al
for i in range(numpy.shape(x_al)[1]):
x_al_g_tot[:, i] = sx_al[:, i] + sx_al_nadir
xal_g = numpy.mod(x_al_g_tot - numpy.min(x_al_g_tot), numpy.max(xal))
interp = interpolate.RectBivariateSpline
_Teval = interp(xal, xac, numpy.isnan(var), kx=1, ky=1, s=0)
Teval = _Teval.ev(xal_g, sx_ac)
# Trick to avoid nan in interpolation
var_mask = + var
var_mask[numpy.isnan(var_mask)] = 0.
var2_mask = + var2
var2_mask[numpy.isnan(var2_mask)] = 0.
# Interpolate variable
_var_out = interp(xal, xac, var_mask, kx=1, ky=1, s=0)
var_out = _var_out.ev(xal_g, sx_ac)
_var2_out = interp(xal, xac, var2_mask, kx=1, ky=1, s=0)
var2_out = _var2_out.ev(xal_g, sx_ac)
# Mask variable with Teval
var2_out[Teval > 0] = numpy.nan
var_out[Teval > 0] = numpy.nan
xal_n = numpy.mod(sx_al_nadir - numpy.min(sx_al_nadir),
numpy.max(xal))
var_nad = numpy.zeros(numpy.shape(xal_n))
return var_out, var_nad, var2_out, var_nad
def load_rain(rain_file: str) -> dict:
with open(rain_file, 'rb') as frain:
dic = pickle.load(frain)
return dic
class Rain():
'''Class that simulate random rain and pia from pyo scenes for all beams
Args:
parameters (nseed, list_angle, rain_file, vsat)'''
def __init__(self, parameters):
if parameters.rain_file is not None:
self.rain_dic = load_rain(parameters.rain_file)
else:
self.rain_dic = {}
self.nseed = parameters.nseed + 3
self.beam_angle = parameters.list_angle
self.nbeam = len(self.beam_angle)
self.vsat = parameters.vsat
def generate(self, time: numpy.ndarray, x_al: numpy.ndarray,
x_al_nadir: numpy.ndarray, x_ac: numpy.ndarray,
ac_angle: numpy.ndarray) -> Dict[str, numpy.ndarray]:
''' Generate rain and gpia for an ensemble of beams as well as nadir
Args:
time: time # TODO check format
x_al: along track distance from nadir
x_al_nadir: along track distance since beginning of pass
x_ac: across track distance from nadir
ac_angle: Angle from the across track direction
'''
mean_time = numpy.mean(time)
rain_all = []
gpia_err_all = []
if self.rain_dic:
rain, rain_nad, gpia, gpia_nad = compute_rain(mean_time, x_al,
x_al_nadir, x_ac,
self.rain_dic,
self.nseed)
rain_all.append(rain_nad)
gpia_err_all.append(gpia_nad)
for i in range(self.nbeam):
beam_angle = self.list_angle[i]
gpia_err = utils.convert_dbkm2ms(gpia, ac_angle[i],
self.list_angle[i], self.vsat)
rain_all.append(rain[:, i])
gpia_err_all.append(gpia_err)
rain_all = numpy.transpose(numpy.array(rain_all))
gpia_err_all = numpy.transpose(numpy.array(gpia_err_all))
return {'rain': rain_all, 'gsig_atm_err': gpia_err_all}
else:
return {}
def mask(var: numpy.ndarray, rain: numpy.ndarray, gpia: numpy.ndarray,
rain_thresh: float) -> numpy.ndarray:
gpia_thresh = 1
var[rain > rain_thresh] = numpy.nan
var[abs(gpia) > gpia_thresh] = numpy.nan
return var
| 4,964 | 38.094488 | 79 | py |
skimulator | skimulator-master/skimulator/error/instrument.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import logging
from typing import Dict
logger = logging.getLogger(__name__)
def instr_stream(radial_angle: numpy.ndarray, ac_angle: numpy.ndarray,
wind_norm: numpy.ndarray, wind_dir:numpy.ndarray,
cycle: float)-> numpy.ndarray:
# - Compute NRCS from wind:
diff_angle = radial_angle - wind_dir
nrcs = (-47.5 + 17.5 * (wind_norm - 1)**0.3
- numpy.sqrt(2)/2 *(1 - abs(numpy.cos(2 * diff_angle)))
* (4 + 0.1* wind_norm))
# NRCS at 0db is -29 db:
snr = nrcs + 29
instr = numpy.full(numpy.shape(wind_norm), numpy.nan)
# Decifer formulae depending on SNR
snr2 = + snr
snr[snr==0] = 1
N = 24 / (snr * 3.7 + 12)
al_angle = ac_angle + numpy.pi/2
instr = (abs(numpy.sin(al_angle)) * (10 + 4*numpy.tanh(snr/6))
+ (4 + 12 * (1 - numpy.tanh(snr / 6)))
* abs(numpy.sin(al_angle))**N
+ 17 * (1 - numpy.tanh(snr / 6)))* 10**(-2)
instr[snr>19] = (14.5 * abs(numpy.sin(al_angle[snr>19]))
+ 4 * abs(numpy.sin(al_angle[snr>19]))**0.2
+ 0.4) * 10**(-2)
instr[snr2==0] = (abs(numpy.sin(al_angle[snr2==0])*10)
+ 16 * abs(numpy.sin(al_angle[snr2==0]))**2
+ 17) * 10**(-2)
# - Knowledge of HH on top of VV will decrease error (about 20%)
# Formulaes have been computed for 64 pulses, rms_instr factor will adapt
# noise if number of pulses changes (sqrt(factor_of_pulses))
instr = 0.8 * instr * numpy.sqrt(64/12000/2/cycle)
return instr
def instr_skim_old(file_rms_instr: str, radial_angle: numpy.ndarray,
rms_instr_factor:float):
# - Load instrumental rms file into dictionnary
theta, rms = numpy.loadtxt(file_rms_instr, usecols=(0, 1), unpack=True)
dic_rms = {}
with open(file_rms_instr) as f:
for line in f:
(key, val) = line.split()
dic_rms[int(key)] = float(val)
# - Compute rms corresponding to each radial angle
# Refer radial angle to along track direction
radial_angle_along = numpy.degrees((radial_angle + math.pi / 2)
% math.pi)
# Interpolate radial_angle with theta values
step = theta[1]-theta[0]
if((theta[1:]-theta[:-1]) != step).any():
logger.error('instrumental rms file noise has not a constant step')
sys.exit(1)
thetaprev = numpy.floor(radial_angle_along/step)
fac = radial_angle_along - thetaprev
rms_theta = radial_angle * 0
i = 0
for itheta, ifac in zip(thetaprev, fac):
keytheta = list(dic_rms.keys())[int(itheta)]
keythetanext = list(dic_rms.keys())[int(itheta + step) % 180]
rms_theta[i] = (dic_rms[keytheta] * ifac / step
+ (step - ifac) / step * dic_rms[keythetanext])
i += 1
# - Errors array are the same size as the swath size
instr = rms_theta * rms_instr_factor * 10**(-2)
return instr
def instr_skim(beam_angle:float, ac_angle: numpy.ndarray, sigma0: numpy.array,
cycle: float, instr_configuration: str)-> numpy.ndarray:
if beam_angle == 12:
if instr_configuration == 'A':
_co = (-15.998, 0.657, -4.174, 15.260)
if instr_configuration == 'B':
_co = (16.409, 0.963, -6.232, 2.823)
coeff = _co[0] * numpy.sin(ac_angle * _co[1] + _co[2])**2 + _co[3]
sigma_ref = 10**(0.66)
elif beam_angle == 6:
if instr_configuration == 'A':
_co = (17.272, -1.105, -2.977, 6.108)
if instr_configuration == 'B':
_co = (12.299, 1.667, -1.045, 14.071)
coeff = _co[0] * numpy.sin(ac_angle * _co[1] + _co[2]) + _co[3]
#igma_ref = 9.9
sigma_ref = 10**(0.99)
else:
logger.error('Unknown instrumental parametrisation for {}'
' angle'.format(beam_angle))
coeff_random = (coeff * 10**(-2) * sigma0 / sigma_ref)
if cycle < 0.0368:
coeff_random = coeff_random * numpy.sqrt(0.0368/cycle)
coeff_random[sigma0 < 10**-5] = numpy.nan
return coeff_random
class Instrument():
'''Class instrument defines the instrumental error for STREAM or SKIM
concepts
Args:
parameters: Simulation settings (nseed, type_satellite,
instr_configuration and cycle parameters)
'''
def __init__(self, parameters):
self.nseed = parameters.nseed + 1
self.type_satellite = parameters.type_satellite
self.cycle = parameters.cycle
self.instr_configuration = parameters.instr_configuration
self.beam_angle = parameters.list_angle
def generate(self, radial_angle: numpy.array, angle: numpy.array,
wind_norm: numpy.array, wind_dir: numpy.array,
sigma0: numpy.array) -> Dict[str, numpy.ndarray]:
''' Build errors corresponding to the instrument on the satellite
swath.
Args:
radial_angle: Angle refered to the East
angle: Angle refered to the across track direction
wind_norm: wind velocity
wind_dir: wind direction refered to the East
'''
instr_all = []
for b in range(len(self.beam_angle)):
if self.type_satellite == 'stream':
instr = instr_stream(radial_angle[:, b], angle[:, b],
wind_norm[:, b], wind_dir[:, b],
self.cycle)
elif self.type_satellite == 'skim':
instr = instr_skim(self.beam_angle[b], angle[:, b],
sigma0[:, b],
self.cycle, self.instr_configuration)
else:
logger.error(f'Unknown concept {self.type_satellite}')
sys.exit(1)
# Seed to make reproducible noise
numpy.random.seed(self.nseed)
error = numpy.random.normal(0.0, abs(instr), numpy.shape(instr)[0])
instr_all.append(error)
return{"instr": numpy.transpose(numpy.array(instr_all))}
| 6,800 | 39.96988 | 79 | py |
skimulator | skimulator-master/skimulator/error/dsigma.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
""" Error due to Sigma gradient within a beam simulation"""
import sys
import numpy
from typing import Dict, Tuple
from . import utils
import logging
logger = logging.getLogger(__name__)
def compute_pdf_dsigma(beam_angle: float)-> Tuple[float, float]:
# Compute sigma for pdf altika
inc = numpy.deg2rad(beam_angle)
sig_altika = numpy.arange(7, 15)
siglin = 10.**(sig_altika / 10)
R = 0.55
mss = R**2 / siglin
dmssdsig0 = -R**2 / siglin**2
xe = numpy.linspace(-0.02, 0.02, 1001)
dxe = xe[1] - xe[0]
# Compute sum of pdfs
a = 19.5
nu = 4.635 * numpy.exp(-0.03 * (sig_altika - 12)**2)
nu_m = numpy.array([nu, ]* len(xe))
c = 0.000045 / (10**((sig_altika + 1.5)**2 / 100)) + 4.9e-7 * sig_altika
c_m = numpy.array([c, ]*len(xe))
xe_m = numpy.array([xe, ] * len(sig_altika)).transpose()
sigbeam = (R**2.*numpy.exp(-(numpy.tan(inc)**2 / mss))
/ (mss * numpy.cos(inc)**4))
dmss = 0.0001
dsigdbeam = (R**2*numpy.exp(-(numpy.tan(inc)**2./(mss + dmss)))
/ ((mss + dmss)*numpy.cos(inc)**4) - sigbeam) / dmss
stretched_fac = siglin * dmssdsig0 * dsigdbeam / sigbeam
stretched_fac_m = numpy.array([stretched_fac, ]*len(xe))
dxe = xe[1] - xe[0]
mat_pdf = 10**((a - (3 + nu_m) * numpy.log(1 + xe_m**2
/ ((1 + nu_m) * c_m)))/10)
sum_pdf = numpy.array([numpy.sum(mat_pdf, axis=1), ] * len(mss)).transpose()
norm_pdf0 = mat_pdf / (sum_pdf) # * dxe)
stretched_pdf = abs(1/stretched_fac)*10**((a - (3 + nu_m) * numpy.log(1
+ (xe_m/stretched_fac)**2
/ ((1 + nu_m) * c_m)))/10) #/sum_pdf
return xe, stretched_pdf
class DSigma():
'''Class that simulates variation of sigma within a footprint. It is
derived from Altika observation
Args:
parameters (nseed, sat_elev)
'''
def __init__(self, parameters):
self.nseed = parameters.nseed
self.sat_elev = parameters.sat_elev / 10**3
self.beam_angle = parameters.list_angle
self.vsat = parameters.vsat
def generate(self, sigma0: numpy.ndarray,
ac_angle: numpy.ndarray)-> Dict[str, numpy.ndarray]:
'''Generate statistical dsigma error within a footprint for one beam
Args:
beam_angle: angle view from beam
sigma0: sigma0 for one beam
ac_angle: angle from across track direction
'''
dsigma_all = []
for b in range(len(self.beam_angle)):
xe, pdf = compute_pdf_dsigma(self.beam_angle[b])
inc = numpy.deg2rad(self.beam_angle[b])
# ratio altika - skim
r = 0.66
cshape = numpy.shape(sigma0[:, b])
dsigma = numpy.full(cshape, numpy.nan)
# to change, very slow
for i in range(cshape[0]):
if not numpy.isfinite(sigma0[i, b]):
continue
if numpy.isnan(sigma0[i, b]):
continue
siglog = 10 * numpy.log10(sigma0[i, b])
ind_sig = numpy.floor(siglog) - 7
ind_sig = max(ind_sig, 7)
ind_sig = int(min(ind_sig, 0))
pdf_stretched = pdf[:, ind_sig]
distribution = pdf_stretched / numpy.sum(pdf_stretched)
try:
res = numpy.random.choice(xe, size=1, p=distribution)
dsigma[i] = res[0]
except:
dsigma[i] = numpy.nan
dsigma = (r * dsigma / (10*numpy.log(10)) * self.sat_elev
* numpy.sin(inc))
dsigma = utils.convert_dbkm2ms(dsigma, ac_angle[:, b],
self.beam_angle[b], self.vsat)
dsigma_all.append(dsigma)
return {"dsigma": numpy.transpose(numpy.array(dsigma_all))}
| 4,605 | 36.754098 | 80 | py |
skimulator | skimulator-master/skimulator/error/utils.py | # Copyright (c) 2020 CNES/JPL
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Random signal generation utilities
----------------------------------
"""
from typing import Optional, Tuple
import warnings
try:
import numba as nb
except ImportError:
nb = None
import numpy
import scipy.interpolate
try:
import mkl_fft
IFFT = mkl_fft.ifft
IFFT2 = mkl_fft.ifft2
except ImportError:
IFFT = numpy.fft.ifft
IFFT2 = numpy.fft.ifft2
def gen_signal_1d(fi: numpy.ndarray,
psi: numpy.ndarray,
x: numpy.ndarray,
nseed: int = 0,
fmin: Optional[float] = None,
fmax: Optional[float] = None,
alpha: int = 10,
lf_extpl: bool = False,
hf_extpl: bool = False) -> numpy.ndarray:
"""Generate 1d random signal using Fourier coefficient"""
# Make sure fi, PSi does not contain the zero frequency:
psi = psi[fi > 0]
fi = fi[fi > 0]
# Adjust fmin and fmax to fi bounds if not specified
fmin = fmin or fi[0]
fmax = fmax or fi[-1]
# Go alpha times further in frequency to avoid interpolation aliasing.
fmaxr = alpha * fmax
# Interpolation of the non-zero part of the spectrum
f = numpy.arange(fmin, fmaxr + fmin, fmin)
mask = psi > 0
ps = numpy.exp(numpy.interp(numpy.log(f), numpy.log(fi[mask]),
numpy.log(psi[mask])))
# lf_extpl=True prolongates the PSi as a plateau below min(fi).
# Otherwise, we consider zeros values. same for hf
ps[f < fi[0]] = psi[0] if lf_extpl else 0
ps[f > fi[-1]] = psi[-1] if hf_extpl else 0
ps[f > fmax] = 0
# Detect the sections (if any) where PSi==0 and apply it to PS
mask = numpy.interp(f, fi, psi)
ps[mask == 0] = 0
f_size = f.size
phase = numpy.empty((2 * f_size + 1))
numpy.random.seed(nseed)
phase[1:(f_size + 1)] = numpy.random.random(f_size) * 2 * numpy.pi
phase[0] = 0
phase[-f_size:] = -phase[1:(f_size + 1)][::-1]
fft1a = numpy.concatenate((numpy.array([0]), 0.5 * ps, 0.5 * ps[::-1]),
axis=0)
fft1a = numpy.sqrt(fft1a) * numpy.exp(1j * phase) / fmin**0.5
yg = 2 * fmaxr * numpy.real(IFFT(fft1a))
xg = numpy.linspace(0, 0.5 / fmaxr * yg.shape[0], yg.shape[0])
return numpy.interp(numpy.mod(x, xg.max()), xg, yg)
def calculate_ps2d(f: numpy.ndarray, f2: numpy.ndarray, ps1d: numpy.ndarray,
dfx: numpy.ndarray, dfy: numpy.ndarray) -> numpy.ndarray:
result = numpy.zeros(f2.shape)
view = result.ravel()
dfx_2 = dfx * 0.5
dfx_y = dfx * dfy
for idx in range(-1, -f.size - 1, -1):
item = f[idx]
mask = (f2 >= (item - dfx_2)) & (f2 < (item + dfx_2))
amount = numpy.sum(result[:, idx]) * dfx_y
miss = ps1d[idx] * dfx - amount
view[mask.ravel()] = 0 if miss <= 0 else miss * 0.5 / dfx_y
return result
def calculate_signal(rectangle, x, y, xgmax, ygmax):
result = numpy.zeros((len(y), len(x)))
xn = (x.max() - x[0]) // xgmax
yn = (y.max() - y[0]) // ygmax
dx = x - x[0]
dy = y - y[0]
for ix_n in range(int(xn + 1)):
ix0 = numpy.where((dx >= (ix_n * xgmax))
& (dx < ((ix_n + 1) * xgmax)))[0]
for iy_n in range(int(yn + 1)):
iy0 = numpy.where((dy >= (iy_n * ygmax))
& (dy < ((iy_n + 1) * ygmax)))[0]
result[iy0[0]:iy0[-1] + 1,
ix0[0]:ix0[-1] + 1] = rectangle[:len(iy0), :len(ix0)]
return result
def convert_dbkm2ms(var_out: numpy.array, ac_angle: numpy.array,
vsat: float, beam_angle: float)-> numpy.ndarray:
# Computation of lobe antenna
th3 = numpy.deg2rad(0.65)
an = th3 / numpy.sqrt(4 * numpy.log(2))
# Computation of mispointing
delta = an**2 / numpy.sin(numpy.deg2rad(beam_angle))**2 * var_out
out = delta * vsat * numpy.cos(ac_angle)
return out
def proj_radial(u: numpy.array, v: numpy.array, radial_angle: numpy.array
) -> numpy.ndarray:
ur = u * numpy.cos(radial_angle) + v * numpy.sin(radial_angle)
return ur
def dist_sphere(lon1, lon2, lat1, lat2, R):
radlat1 = numpy.deg2rad(lat1)
radlat2 = numpy.deg2rad(lat2)
raddifflon = numpy.deg2rad(lon2 - lon1)
d = numpy.arccos(numpy.sin(radlat2) * numpy.sin(radlat1)
+ numpy.cos(radlat2) * numpy.cos(radlat1)
* numpy.cos(raddifflon))
return d * R / 1000. # in km
def cross_product(mat: numpy.array, ncoeff: int, cshape: numpy.array
)-> numpy.ndarray:
ncross = sum(range(ncoeff + 1)) + ncoeff
cross = numpy.full((cshape, ncross), numpy.nan)
cross[:, 0:ncoeff] = mat[:, :ncoeff]
niter = ncoeff
for k in range(ncoeff):
for l in range(ncoeff - k):
cross[:, niter] = mat[:, k]*mat[:, k + l]
niter += 1
return cross
def reconstruct_var(ncoeff, cshape, f1, b1, cross, xlabel):
shape_f1 = numpy.shape(f1)[0]
shape_b1 = numpy.shape(b1)[0]
#f1 = f1.reshape(int(shape_f1//float(shape_b1)), shape_f1)
proba = numpy.exp(numpy.dot(cross, f1) + b1)
proba_tot = numpy.tile(numpy.nansum(proba, axis=1), (shape_b1, 1))
proba = proba / numpy.transpose(proba_tot)
var = numpy.sum(proba * numpy.tile(xlabel, (cshape, 1)),axis=1)
b1 = numpy.tile(b1, (1, 1))
train_weight = numpy.concatenate((f1,b1),axis=0)
l_train = numpy.zeros((cshape, ncoeff + 1)).astype('float64')
l_train[:, : ncoeff] = cross
l_train[:, ncoeff] = 1.
res = numpy.dot(l_train, train_weight)
xx2D = numpy.tile(xlabel, (cshape, 1))
var = (numpy.sum(xx2D * numpy.exp(res), axis=1)
/ numpy.sum(numpy.exp(res),axis=1))
return var
if nb is not None:
_calculate_ps2d = nb.njit("(float64[:, ::1]) (float64[::1],"
"float64[:, ::1], float64[::1], float64,"
"float64)", cache=True,
nogil=True)(calculate_ps2d)
_calculate_signal = nb.njit("(float64[:, ::1]) (float64[:, ::1],"
"float64[::1], float64[::1], float64,"
"float64)", cache=True,
nogil=True)(calculate_signal)
else:
_calculate_ps2d = calculate_ps2d
_calculate_signal = calculate_signal
| 7,169 | 34.147059 | 76 | py |
skimulator | skimulator-master/skimulator/error/altimeter.py | # Copyright (c) 2020 CNES/JPL
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Altimeter instrument error
--------------------------
"""
from typing import Dict
import numpy
from . import utils
class Altimeter:
"""Altimeter instrument error.
Args:
parameters (settings.Parameters): Simulation settings
"""
def __init__(self, parameters) -> None:
# Store the generation parameters of the random signal.
self.delta_al = 2 * parameters.delta_al
self.nseed = parameters.nseed + 4
self.len_repeat = parameters.len_repeat
# Define the sepctrum of the nadir instrument error
freq = numpy.arange(1 / 3000, 1 / self.delta_al, 1 / 3000)
psd = 8 + 1.05 * 1e-4 * freq**(-2.2)
psd[freq < 0.00023627939582672978] = 1e4
# Convert spectrum in m2/cy
self.psd = psd * 1e-4
self.freq = freq
def generate(self, x_al: numpy.array) -> Dict[str, numpy.ndarray]:
"""Generate altimeter instrument error.
Args:
x_al (numpy.ndarray): Along track distance
Returns:
dict: variable name and errors simulated.
"""
# Compute random noise of 10**2 cm**2/(km/cycle)
# Compute the correspond error on the nadir in m
error = utils.gen_signal_1d(self.freq, self.psd, x_al,
nseed=self.nseed, fmin=1 / self.len_repeat,
fmax=1 / self.delta_al, alpha=10)
return {"altimeter": error.reshape(error.shape[0], -1)}
| 2,323 | 33.686567 | 79 | py |
skimulator | skimulator-master/skimulator/error/simulate_spectrum.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import math
import logging
from typing import Dict, Tuple
import pkg_resources
from . import utils
# Define logger level for debug purposes
logger = logging.getLogger(__name__)
def load_mss_noise() -> Dict[str, numpy.array]:
mss_path = pkg_resources.resource_filename('skimulator',
'share/noise_pdf_mss1d.npy')
mssr_noise = numpy.load(mss_path, allow_pickle=True)[()]
return mssr_noise
def combine_usr(lon: numpy.array, lat: numpy.array, usr: numpy.array,
mssx: numpy.array, mssy: numpy.array, mssxy: numpy.array,
dazi: float, angle: numpy.array, incl: numpy.array,
wnd_dir: numpy.array, nseed: int=1, Rearth: float=6378. * 10**3
) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
'''
PURPOSE : recombine measurements of various azimuths and ambiguity
removal using wind direction
Provides a single estimate (not applicable to a set of points)
INPUT : - usrazi : radial Stokes drift for each azimuth in azibin [m/s], len = nazi
dazi : azimuth increment [degrees]
azimuthat : along track azimuth [radians]
thwnd : direction of the wind [radians], without ambiguity. Must be in the [0;2*pi[ interval
OUTPUT : usrhat : estimate of the radial stokes drift [m/s]
'''
mssnoise = load_mss_noise()
#Define ensemble of angle where observations are required
azibin = numpy.arange(0, 180, dazi)
# Ambiguity from SKIM measurment
# TOREMOVE
usrabs = + numpy.abs(usr)
#usrabs = + usr # numpy.abs(usr)
nsample, nbeam = numpy.shape(usrabs)
# From rad to deg and considering pi ambiguity,
# Radial angle is refered to the East
degangle = numpy.rad2deg(numpy.mod(angle, numpy.pi))
# Inclination of pass (along track angle)
inclination = numpy.transpose(([incl, ]* nbeam))
angle_al = numpy.mod(inclination - angle - numpy.pi/2, 2*numpy.pi)
mssr = (numpy.cos(angle_al)**2 * mssx + numpy.sin(angle_al)**2 * mssy
+ numpy.sin(2*angle_al) * mssxy)
numpy.random.seed(nseed)
mssr_noise = numpy.random.choice(mssnoise['bins'], numpy.shape(mssr),
p=mssnoise['hist'])
mssr = mssr + mssr_noise
az_al = numpy.mod(inclination - numpy.pi, numpy.pi)
degangle = numpy.mod(numpy.rad2deg(angle_al), 180)
az_al = numpy.mod(numpy.rad2deg(az_al), 180)
# Initialize the reconstruction of ussr
usr_comb = numpy.full((nsample, nbeam), numpy.nan)
mssr_comb = numpy.full((nsample, nbeam), numpy.nan)
usp_comb = numpy.full((nsample, nbeam), numpy.nan)
# Distance max to take neighbours
dmax = 200
for ibeam in range(nbeam):
for isample in range(nsample):
if not numpy.isfinite(usrabs[isample, ibeam]):
continue
plon = lon[isample, ibeam]
plat = lat[isample, ibeam]
pangle = degangle[isample, ibeam]
rangle = angle_al[isample, ibeam]
iincl = az_al[isample, ibeam]
#dist = mod_tools.dist_sphere(plon, lon, plat, lat)
dist = 110*numpy.sqrt((numpy.cos(numpy.deg2rad(lat)) * (lon - plon))**2 + (lat-plat)**2)
Idist0 = numpy.where(dist < dmax)
azimuthr = angle_al[Idist0].ravel()
lonr = lon[Idist0].ravel()
latr = lat[Idist0].ravel()
usrr = usrabs[Idist0].ravel()
mssrr = mssr[Idist0].ravel()
distr = dist[Idist0].ravel()
ang_azi = numpy.digitize(degangle[Idist0].ravel(), azibin) - 1
Ifinite = numpy.logical_and(numpy.isfinite(usrr),
numpy.isfinite(mssrr))
# for each bin, find the closest point and append radial Stokes
# drift with ambiguity on sign
usrazi = numpy.full(len(azibin), numpy.nan)
mssrazi = numpy.full(len(azibin), numpy.nan)
thazi = numpy.full(len(azibin), numpy.nan)
for iazi in range(len(azibin)):
I1 = numpy.where(numpy.logical_and(ang_azi==iazi, Ifinite))
distazi = utils.dist_sphere(plon, lonr[I1], plat, latr[I1], Rearth)
# TODO: detect if distazi is empty
try:
Idist = numpy.argmin(distazi)
usrazi[iazi] = usrr[I1][Idist]
mssrazi[iazi] = mssrr[I1][Idist]
thazi[iazi] = numpy.mod(azimuthr[I1][Idist], numpy.pi)
except:
usrazi[iazi] = numpy.nan
mssrazi[iazi] = numpy.nan
try:
iaziat = int(numpy.where(numpy.logical_and(iincl > (azibin),
iincl < (azibin + dazi)))[0])
Iaziatnot = numpy.logical_not(numpy.arange(len(azibin))==iaziat)
except:
iaziat = None
if iaziat:
# Interpolate input data in the along track direction (will be to
# avoid as the noise cnnot be removed in that direction)
usrazi[iaziat] = numpy.interp(azibin[iaziat],
azibin[Iaziatnot],
usrazi[Iaziatnot], period=180)
mssrazi[iaziat] = numpy.interp(azibin[iaziat],
azibin[Iaziatnot],
mssrazi[Iaziatnot], period=180)
# 3. Perform an estimate of the Stokes drift direction and magnitude
angle_usr = thazi - rangle
_usr_comb, _usp_comb = proj_uss(usrazi, azibin, angle_usr, dazi,
wnd_dir[isample, ibeam])
usr_comb[isample, ibeam] = _usr_comb
usp_comb[isample, ibeam] = _usp_comb
mssr_comb[isample, ibeam] = numpy.nanmean(mssrazi)
#mssr_comb[isample, ibeam] = (numpy.nansum(mssrazi * numpy.sin(angle_usr)) * 2 * dazi / 180.)
return usr_comb, usp_comb, mssr_comb
def proj_uss(usrazi: numpy.array, azibin: numpy.array, angle_usr: numpy.array,
dazi: float, wnd_dir: numpy.array):
''' Project uss on spectrum point
ARGS:
usrazi : radial Stokes drift for each azimuth in azibin [m/s]
azibin: discretized azimuth
angle_usr: angle of measurement
dazi: increment angle
wnd_dir: Wind direction from the ...
RETURN:
_usr_comb: Radial component of Stockes drift
_usp_comb: Perpendicular component of Stockes drift
'''
c = numpy.fft.fft(usrazi)/len(usrazi)
rd2 = -0.5 * numpy.angle(c[1]) # estimate of Us direction
thrbeam = numpy.mod(rd2, 2 * numpy.pi)
if numpy.cos(wnd_dir - thrbeam) < 0.:
thrbeam = numpy.arctan2(-numpy.sin(thrbeam), -numpy.cos(thrbeam))
# 4. Ambiguity removal using closeness to estimated direction
radazibin = numpy.deg2rad(azibin)
cond = numpy.where(numpy.cos(thrbeam - radazibin) > 0)[0]
usfull = - usrazi * 1
#TOREMOVE
#usfull = usrazi
if cond.any():
usfull[(cond)] = usrazi[(cond)]
_usr_comb = (numpy.nansum(usfull * numpy.cos(angle_usr)) * 2 * dazi / 180.)
_usp_comb = (numpy.nansum(usfull * numpy.sin(angle_usr)) * 2 * dazi / 180.)
return _usr_comb, _usp_comb
def find_closest(lon: numpy.array, lat: numpy.array, mss: numpy.array,
mss_est: numpy.array, ice: numpy.array, hs: numpy.array,
beam_angle: list, Rearth: float=6378. * 10**3
) -> Tuple[numpy.ndarray, numpy.ndarray]:
''' Find closest mss at 6degree or nadir, closest hs at nadir.
It assumes that there is a nadir measurement.
ARGS:
lon: longitude for all beams, nadir included
lat: latitude for all beams, nadir included
mss: mss measurement for all beams
mss_est:
ice: Percentage of ice for all beams
hs: significant height at nadir
beam_angle: list anf beam angle
RETURNS: mssclose: closest mss measurement found
hsclose: closest hs nadir measurement
'''
lon_nadir = lon[:, 0]
lat_nadir = lat[:, 0]
lon = lon[:, 1:]
lat = lat[:, 1:]
if numpy.min(abs(lon)) <1 and numpy.max(abs(lon))>359:
lon = numpy.mod(lon + 180, 360) - 180
lon_nadir = numpy.mod(lon_nadir + 180, 360) - 180
nsample, nbeam = numpy.shape(mss[:, 1:])
mssclose = numpy.full((nsample, nbeam), numpy.nan)
hsclose = numpy.full((nsample, nbeam), numpy.nan)
ind_6 = numpy.where(numpy.array(beam_angle) == 6)[0]
mss6 = mss[:, (ind_6 + 1)].ravel()
mss_nadir = mss[:, 0]
ice_nadir = ice[:, 0]
mss_extr = + mss[:, 1:]
ice_extr = + ice[:, 1:]
mss_est[numpy.where(ice_extr>0)] = mss_extr[numpy.where(ice_extr>0)]
ice6 = ice[:, (ind_6 + 1)].ravel()
lon_nadir_ocean = +lon_nadir
lon_nadir_ocean[numpy.where((ice_nadir>0))] = numpy.nan # | ~numpy.isfinite(mss_nadir))] = numpy.nan
lon_nadir_ice = +lon_nadir
lon_nadir_ice[numpy.where((ice_nadir==0))] = numpy.nan # | ~numpy.isfinite(mss_nadir))] = numpy.nan
lon_6_ocean = + lon[:, (ind_6)].ravel()
lon_6_ocean[numpy.where((ice6 > 0))] = numpy.nan # | ~numpy.isfinite(mss6))] = numpy.nan
lon_6_ice = + lon[:, (ind_6)].ravel()
lon_6_ice[numpy.where((ice6 == 0))] = numpy.nan #| ~numpy.isfinite(mss6))] = numpy.nan
for isample in range(nsample):
for ibeam in range(nbeam):
if not numpy.isfinite(mss[isample, ibeam + 1]):
continue
plon = lon[isample, ibeam]
plat = lat[isample, ibeam]
if ice[isample, ibeam + 1] > 0:
_lon = lon_nadir_ice
else:
_lon = lon_nadir_ocean
dist_nadir = utils.dist_sphere(plon, _lon, plat, lat_nadir, Rearth)
#dist_nadir[numpy.where(numpy.isnan(mss_nadir))] = numpy.nan
# TODO remove ugly try except
try:
inadir = numpy.nanargmin(dist_nadir)
except:
continue
mss6[mss6==0] = numpy.nan
mss_nadir[mss_nadir==0] = numpy.nan
dnadir = dist_nadir[inadir]
if ice[isample, ibeam + 1] > 0:
_lon = lon_6_ice
else:
_lon = lon_6_ocean
dist_6 = utils.dist_sphere(plon, _lon, plat,
lat[:, (ind_6)].ravel(), Rearth)
try:
i6 = numpy.nanargmin(dist_6)
except:
continue
d6 = dist_6[i6]
if d6 > 15 and dnadir > 15:
mssclose[isample, ibeam] = mss_est[isample, ibeam]
ind_dist_nadir = numpy.where(dist_nadir < 150)
ind_dist_6 = numpy.where(dist_6 < 50)
_hs = numpy.nanmean(hs[ind_dist_nadir])
# if numpy.isnan(_hs):
# continue
if abs(hs[inadir] - _hs) > 1:
continue
#if abs(hs[inadir] - _hs) > 0.4:
# hsclose[isample, ibeam] = _hs
#else:
hsclose[isample, ibeam] = hs[inadir]
_mss = numpy.nanmean(mss6[ind_dist_6])
if abs(mss6[i6] - _mss) > 0.01:
continue
#if abs(mss6[i6] - _mss) > 0.005:
# mssclose[isample, ibeam] = _mss
#else:
if d6 < dnadir:
mssclose[isample, ibeam] = mss6[i6]
else:
mssclose[isample, ibeam] = mss_nadir[inadir]
if mss[isample, ibeam + 1] == 0:
mssclose[isample, ibeam] = 0
return mssclose, hsclose
def simulate_spect_skim(sgrid: dict, dic_input: dict, beam_angle: list,
radial_angle: numpy.array, delta_azim: float=15,
fc: int=1, nseed: int=1
)-> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
lon = numpy.transpose(numpy.array(sgrid['lon']))
lat = numpy.transpose(numpy.array(sgrid['lat']))
_angle = numpy.array(sgrid['angle'])
if (sgrid['ipass'] %2) != 0:
_angle = _angle + numpy.pi
incl = numpy.transpose(numpy.array(sgrid['incl']))
list_compulsory = ['uuss', 'vuss', 'mssx', 'mssy', 'mssxy', 'uwnd', 'vwnd',
]
wnd_dir = numpy.arctan2(dic_input['vwnd'], dic_input['uwnd'])
wnd_dir = numpy.mod(wnd_dir[:, fc:], 2*numpy.pi)
usr = utils.proj_radial(dic_input['uuss'][:, fc:],
dic_input['vuss'][:, fc:], radial_angle)
if 'ice' in dic_input.keys():
ice = dic_input['ice']
else:
ice = dic_input['uuss'] * 0
if 'hs' in dic_input.keys():
hs = dic_input['hs'][:, 0]
else:
hs = dic_input['uuss'][:, 0] * 0
mss = dic_input['mssu'] + dic_input['mssc']
_res = combine_usr(lon[:, fc:], lat[:, fc:], usr, dic_input['mssx'][:, fc:],
dic_input['mssy'][:, fc:], dic_input['mssxy'][:, fc:],
delta_azim, _angle, incl, wnd_dir, nseed=nseed)
usr_comb, usp_comb, mssr_comb = _res
mssclose, hsclose = find_closest(lon, lat, mss, mssr_comb, ice, hs,
beam_angle)
return usr_comb, mssclose, hsclose
| 13,999 | 43.444444 | 105 | py |
skimulator | skimulator-master/skimulator/error/wave_doppler.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import numpy
from typing import Dict
from . import utils
import logging
import pkg_resources
logger = logging.getLogger(__name__)
def wd_stream(radial_angle: numpy.array, wndir: numpy.array) -> numpy.ndarray:
wd = 6 * numpy.cos(radial_angle - wndir) * 10**(-2)
return wd
class WaveDoppler():
'''Class wave_doppler defines the computation of the wave doppler that is
seen by the instrument for STREAM and SKIM concepts
Args:
parameters: Simulation settings
'''
def __init__(self, parameters):
# Seed is not used ?
self.nseed = parameters.nseed + 2
self.type_satellite = parameters.type_satellite
self.beam_angle = parameters.list_angle
def generate(self, wind_norm: numpy.array, wind_dir: numpy.array,
windr: numpy.array, mss: numpy.array, hs: numpy.array,
ice: numpy.array, sigma0: numpy.array,
radial_angle: numpy.array, uuss: numpy.array,
vuss: numpy.array) -> Dict[str, numpy.array]:
''' Build errors corresponding to the wave Doppler seen by the
instrument on the satellite swath.
Args:
radial_angle: Angle refered to the across track direction
wind_dir: Wind direction refered to the East
wind_norm: Wind velocity
mss: Mean square slope (mssu + mssc or mssclose)
hs: significant wave height
ice: ice
sigma0: sigma0
uuss: stokes drift Eastward componant
vuss: stokes drift Northward componant
radial_angle: angle refered to the East
'''
if self.type_satellite == 'stream':
uwd = wd_stream(radial_angle, wind_dir)
ussr = numpy.full(numpy.shape(uwd), numpy.nan)
elif self.type_satellite == 'skim':
ussr = utils.proj_radial(uuss, vuss, radial_angle)
uwd_all = []
_comp = compute_wd_ai_par
for b in range(len(self.beam_angle)):
uwd = _comp(wind_norm[:, b], windr[:, b], mss[:, b],
hs[:, b], ice[:, b], sigma0[:, b],
ussr[:, b], radial_angle[:, b],
self.beam_angle[b])
uwd_all.append(uwd)
uwd_all = numpy.transpose(numpy.array(uwd_all))
else:
logger.error(f'Unknown concept {self.type_satellite}')
sys.exit(1)
return {"wd": uwd_all, "ur_uss": ussr}
def correct(self, sgrid:dict, dic_input:dict, wind_norm: numpy.array,
windr:numpy.array, ice: numpy.array,
sigma0: numpy.array, radial_angle: numpy.array,
dazim: float)-> Dict[str, numpy.ndarray]:
if self.type_satellite == 'stream':
return {}
elif self.type_satellite == 'skim':
from . import simulate_spectrum
_sim = simulate_spectrum.simulate_spect_skim
usr_est, mss_est, hs_est = _sim(sgrid, dic_input, self.beam_angle,
radial_angle, delta_azim=dazim,
fc=1, nseed=self.nseed)
uwd_all = []
_comp = compute_wd_ai_par
for b in range(len(self.beam_angle)):
uwd = _comp(wind_norm[:, b], windr[:, b], mss_est[:, b],
hs_est[:, 0], ice[:, b], sigma0[:, b],
usr_est[:, b], radial_angle[:, b],
self.beam_angle[b])
uwd_all.append(uwd)
uwd_all = numpy.transpose(numpy.array(uwd_all))
usr_est = usr_est
return {"wd_est": uwd_all, "ur_uss_est": usr_est}
def compute_wd_old_par(uuss: numpy.array, vuss:numpy.array, ucur: numpy.array,
vcur: numpy.array, uwnd: numpy.array, vwnd: numpy.array,
radial_angle:numpy.array, beam_angle: float
) -> Dict[str, numpy.ndarray]:
''' Compute Wave doppler for SKIM instrument using old parametrisation
uwd = Gr * ussr and Gr = a * log(b + c/nwr) * (1- tanh(angle))
Args:
uuss: Stokes drift eastward componant
vuss: Stokes drift northward componant
ucur: Current eastward componant
vcur: Current northward componant
uwnd: Wind eastward componant
vwnd: Wind northward componant
radial_angle: Angle refered to the East
beam_angle: View angle of beam
'''
ur_uss = utils.proj_radial(uuss, vuss, radial_angle)
nwr = numpy.sqrt((uwnd - ucur)**2 + (vwnd - vcur)**2)
nwr[nwr==0] = numpy.nan
_angle = numpy.deg2rad((beam_angle - 25) / 10)
GR = 25 * (0.82 * numpy.log(0.2 + 7/nwr)) * (1 - numpy.tanh(_angle))
GP = 0
uwd_noerr = GR * ur_uss
cshape = numpy.shape(uwd_noerr)
noise = numpy.random.normal(0, abs(uwd_noerr) * 0.25, cshape[0])
wd = uwd_noerr + noise
return {"ur_uss": ur_uss, "wd_noerr": uwd_noerr, "wd": wd}
def compute_wd_ai_par(nwnd: numpy.array, wndr: numpy.array, mss: numpy.array,
hs: numpy.array, ice: numpy.array, sigma0: numpy.array,
ussr: numpy.array, radial_angle: numpy.array,
beam_angle: float) -> Dict[str, numpy.array]:
''' Compute wave doppler using coefficients learned from ww3 data
Args:
nwmd: Wind velocity
wndr: Wind direction refered to the East
mss: Mean square slope (mssu + mssc or mssclose)
hs: significant wave height
ice: ice
sigma0: sigma0
ussr: stokes radial componant
radial_angle: angle refered to the East
beam_angle: beam view incidence angle
'''
# Load Coefficents
coeff_path = pkg_resources.resource_filename('skimulator',
'share/coeffr.npy')
coeff = numpy.load(coeff_path, allow_pickle=True)[()]
coeff_f1ur = coeff['f1UWDRglob{:d}deg'.format(int(beam_angle))]
coeff_b1ur = coeff['b1UWDRglob{:d}deg'.format(int(beam_angle))]
coeff_m = coeff['x_4dmean_{:d}deg'.format(int(beam_angle))]
coeff_std = coeff['x_4dstd_{:d}deg'.format(int(beam_angle))]
xlabel = coeff['xx{:d}'.format(int(beam_angle))]
ncoeffur = 7 #len(coeff_4dm)
# -- Compute norm --
# Construct matrix of input data
cshape = numpy.shape(wndr)
ussr_max = Usr_95(wndr)
ussr_min = Usr_5(wndr)
diff_max = ussr_max - ussr
diff_min = ussr_min - ussr
ussr[numpy.where(diff_max < 0)] = ussr_max[numpy.where(diff_max < 0)]
ussr[numpy.where(diff_min > 0)] = ussr_min[numpy.where(diff_min > 0)]
wndr_min = + wndr
wndr_min[numpy.where(abs(wndr) >= 8.)] = 8.
nwnd_min = + nwnd
nwnd_min[numpy.where(nwnd >= 8.)] = 8.
mss_max = mss_95(nwnd)
mss_min = mss_5(nwnd)
diff_max = (mss_max - mss)
diff_min = (mss_min - mss)
mss[numpy.where(diff_max < 0)] = mss_max[numpy.where(diff_max < 0)]
mss[numpy.where(diff_min > 0)] = mss_min[numpy.where(diff_min > 0)]
# noise_nwnd = numpy.random.normal(0, 10, cshape[0])
# wndr = wndr + noise_nwnd
_ind = numpy.where(mss <= 8e-4)
mss[_ind] = 8e-4
mat_noerr = numpy.full((cshape[0], ncoeffur), numpy.nan)
mat_noerr[:, 0] = ussr
mat_noerr[:, 1] = wndr
# mat_noerr[:, 2] = numpy.sign(wndr) * wndr_min
mat_noerr[:, 2] = nwnd
mat_noerr[:, 3] = nwnd_min
mat_noerr[:, 4] = hs
mat_noerr[:, 5] = 1. / mss
mat_noerr[:, 6] = 1. / (mss + numpy.log(nwnd + 0.7) * 0.009)
# Compute cross product
cross = utils.cross_product(mat_noerr, ncoeffur, cshape[0])
ncoeffcross = len(coeff_m)
# Normalize with coefficents
for i in range(ncoeffcross):
cross[:, i] = (cross[:, i] - coeff_m[i]) / coeff_std[i]
shape_b1 = numpy.shape(coeff_b1ur)[0]
Uwd = utils.reconstruct_var(ncoeffcross, cshape[0], coeff_f1ur,
coeff_b1ur, cross, xlabel)
if beam_angle == 6:
sigma_ice = 2.5
else:
sigma_ice = 1.
Uwd = Uwd * (1 - sigma_ice * ice / sigma0)
Uwd[_ind] = 0.001
Uwd[numpy.where(ice==1)] = 0.001
return Uwd
def Usr_95(wndr):
# wndr : radial wind speed [m/s]
# provides 95th percentile of USR (max tolerable)
wndr = wndr - 0.75 # middle point to lower bound
mawndr = numpy.maximum(wndr, -8)
miwndr = numpy.minimum(wndr, 8)
res = 0.0200072*wndr - 0.0073556*miwndr - 0.00290607*mawndr + 0.03929183
return res
def Usr_5(wndr):
# wndr : radial wind speed [m/s]
# provides 95th percentile of USR (min tolerable)
wndr = wndr - 0.75 # middle point to lower bound
mawndr = numpy.maximum(wndr, -8)
miwndr = numpy.minimum(wndr, 8)
res = 0.01896863*wndr - 0.00267442*miwndr - 0.00657048*mawndr - 0.02238212
return res
def mss_5(wnd):
# wnd : wind speed [m/s]
# provides 5th percentile of mss (min tolerable)
wnd = wnd - 0.5 # middle point to lower bound
mawnd = numpy.maximum(wnd, 5)
miwnd = numpy.minimum(wnd, 20)
res = (0.00152612277*wnd - 4.73687795e-06*mawnd + 2.14390064e-04*wnd**2
- 2.28370767e-04*wnd*mawnd)
return res
def mss_95(wnd):
# wnd : wind speed [m/s]
# provides 95th percentile of mss (max tolerable)
mawnd = numpy.maximum(wnd, 5)
miwnd = numpy.minimum(wnd, 20)
wnd = wnd - 0.5 # middle point to lower bound
res = (2.71004327e-03 * wnd + 1.27761483e-03*mawnd - 1.68132538e-03*miwnd
- 4.400380528e-5*wnd**2 - 1.35712627e-05)
return res
| 10,280 | 38.694981 | 79 | py |
skimulator | skimulator-master/skimulator/error/__init__.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
from .altimeter import Altimeter
from .wave_doppler import WaveDoppler
from .attitude import Attitude
from .dsigma import DSigma
from .instrument import Instrument
from .rain import Rain
#from .wet_troposphere import WetTroposphere
| 917 | 35.72 | 68 | py |
skimulator | skimulator-master/skimulator/error/attitude.py | """
Copyright (C) 2017-2021 OceanDataLab
This file is part of skiMulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skiMulator. If not, see <http://www.gnu.org/licenses/>.
"""
""" Attitude (yaw and aocs) signal"""
import sys
import numpy
from scipy import interpolate
from typing import Dict, Tuple
import netCDF4
import datetime
from .. import fitspline2d
from .. import const
import logging
logger = logging.getLogger(__name__)
def load_yaw_aocs(yaw_file: str) -> Tuple[numpy.ndarray, numpy.ndarray]:
fid = netCDF4.Dataset(yaw_file, 'r')
time = fid.variables['time'][:]
time = time / 86400.
yaw_microrad = fid.variables['yaw1'][:]
fid.close()
return time, yaw_microrad
def make_yaw_aocs(time_yaw: numpy.array, vac_yaw: numpy.array,
time: numpy.array) -> numpy.ndarray:
max_yaw = numpy.max(time_yaw)
# ind_time = numpy.where(time > max_yaw)
# time[ind_time] = time[ind_time] - max_yaw
time = time * 86400
time = numpy.mod(time, max_yaw)
f = interpolate.interp1d(time_yaw, vac_yaw)
yaw_aocs = f(time)
return yaw_aocs
def make_yaw_ted(time: numpy.array, cycle: float, angle: numpy.array,
first_time: datetime.datetime, beam: float,
instr_conf: str) -> numpy.array:
import pkg_resources
nxspline, nyspline, thedeg = (128, 64, 12)
bn = f'Spline_{nxspline:d}_{nyspline:d}_TED_'
if instr_conf == 'A':
fname = f'{bn}TAS_{beam:d}_DEG.npy'
else:
fname = '{bn}CB_{beam:d}_DEG.npy'
coeff_path = pkg_resources.resource_filename('skimulator',
'share/{}'.format(fname))
wres = numpy.load(coeff_path)
param = fitspline2d.ted_tas(wres, nxspline, nyspline, thedeg)
# Convert time in seconds
time = numpy.mod(time, cycle) * 86400
# Normalize time between 0 and 1 by dividing by the total seconds in
# 1 orbit and shift for 0s at 0deglat ascending
max_time_orbit = 6083.
# max_time_orbit = max(6083, numpy.max(time))
tdec = 5.622404310127427501e-02 * 86400
t_orbit = numpy.mod(time - tdec, max_time_orbit) / max_time_orbit
# Shift angle to across track, clockwise
az = numpy.mod(-1 * numpy.rad2deg(angle), 360)
# Normalize date between 0 and 1 for seasonal cycle
date_start = datetime.datetime(first_time.year, 1, 1)
time_d = (first_time - date_start).total_seconds()
date_end = datetime.datetime(first_time.year, 12, 31, 23, 59, 59)
time_total = (date_end - date_start).total_seconds()
time_d = numpy.mod(time_d + time, time_total) / time_total
# RESULTAT EN ARCSEC
# if (t_orbit < 0).any() or (t_orbit < 1).any() or (az < 0).any()
# or (az > 360).any() or (time_d <0).any() or (time_d>1).any():
# print(t_orbit, az, time_d)
yaw_ted = (param.transform(t_orbit, az, time_d)).astype('float32')
# conversion in rad
yaw_ted = yaw_ted * numpy.pi * 10**6 / (180 * 3600)
return yaw_ted
class Attitude():
'''Class attitude defines the yaw from ted and aocs
Args:
parameters ()
'''
def __init__(self, parameters):
import pkg_resources
self.coeff_path_avv6 = pkg_resources.resource_filename('skimulator',
'share/AVV_6deg.npy')
self.coeff_path_avv12 = pkg_resources.resource_filename('skimulator',
'share/AVV_12deg.npy')
self.coeff_path_std6 = pkg_resources.resource_filename('skimulator',
'share/AVV_6deg.npy')
self.coeff_path_std12 = pkg_resources.resource_filename('skimulator',
'share/AVV_6deg.npy')
self.DT_ORBIT = 6083
self.n12deg = parameters.list_angle.count(12)
self.nbeam = (self.n12deg + parameters.list_angle.count(6))
self.instr_configuration = parameters.instr_configuration
self.time_yaw, self.vac_yaw = load_yaw_aocs(parameters.yaw_file)
l_angle = [[0,], parameters.list_angle]
# Flatten list
self.beam_angle = [item for sublist in l_angle for item in sublist]
def load_coefficient():
'''not used'''
self.avv6 = numpy.load(coeff_path_avv6)
self.std6 = numpy.load(coeff_path_std6)
self.avv12 = numpy.load(coeff_path_avv12)
self.std12 = numpy.load(coeff_path_std12)
def generate(self, time:numpy.array, ti_cycle: float, ac_angle: numpy.array,
first_time: datetime.datetime)-> Dict[str, numpy.ndarray]:
'''Generate AOCS and TED attitude signal'''
yaw_aocs_all = []
yaw_ted_all = []
yaw_total_all = []
for b in range(len(self.beam_angle)):
yaw_aocs = make_yaw_aocs(self.time_yaw, self.vac_yaw, time[:, b])
if b !=0:
yaw_ted = make_yaw_ted(time[:, b], ti_cycle, ac_angle[:, b-1],
first_time, self.beam_angle[b],
self.instr_configuration)
else:
yaw_ted = 0 * yaw_aocs
# Conversion from microrad to m/s
yaw_total = ((yaw_aocs + yaw_ted) * const.vsat * 10**(-6)
* numpy.cos(ac_angle[:, b - 1]))
yaw_aocs_all.append(yaw_aocs)
yaw_ted_all.append(yaw_ted)
yaw_total_all.append(yaw_total)
yaw_aocs_all = numpy.transpose(numpy.array(yaw_aocs_all))
yaw_ted_all = numpy.transpose(numpy.array(yaw_ted_all))
yaw_total_all = numpy.transpose(numpy.array(yaw_total_all))
return {"yaw" : yaw_total_all, "yaw_aocs": yaw_aocs_all,
"yaw_ted": yaw_ted_all}
def calc(self, radial_angle, time):
'''not finished'''
pidx = ((128 * radial_angle / numpy.pi / 2
+ 0.49999).astype('int')) % 128
oidx = ((64*(3600*24*(time)) / self.DT_ORBIT
+ 0.49999).astype('int')) % 64
nx, ny = radial_angle.shape
res = numpy.zeros([nx,ny])
noise = numpy.random.randn(nx,ny)
| 6,582 | 38.419162 | 80 | py |
skimulator | skimulator-master/skimulator/error/generator.py | # Copyright (c) 2020 CNES/JPL
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Copyright (C) 2017-2021 OceanDataLab
This file is part of skimulator.
skimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
skimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with skimulator. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Generate instrumental errors
----------------------------
"""
from typing import Dict
import numpy
import datetime
from . import (Altimeter, WaveDoppler, Instrument, Rain, Attitude, DSigma)
from . import utils
# WetTroposphere
class Generator:
"""Instrumental error generator.
Args:
parameters (settings.Parameters): Simulation settings
first_date (numpy.datetime64): Date of the first simulated
measurement.
"""
def __init__(self, parameters):
#: The list of user-defined error generators
self.generators = []
for item in parameters.noise:
if item == Altimeter.__name__:
self.generators.append(Altimeter(parameters))
elif item == Attitude.__name__:
self.generators.append(Attitude(parameters))
elif item == DSigma.__name__:
self.generators.append(DSigma(parameters))
elif item == Instrument.__name__:
self.generators.append(Instrument(parameters))
elif item == Rain.__name__:
self.generators.append(Rain(parameters))
elif item == WaveDoppler.__name__:
self.generators.append(WaveDoppler(parameters))
# elif item == WetTroposphere.__name__:
# self.generators.append(WetTroposphere(parameters))
else:
# A new error generation class has been implemented but it is
# not handled by this object.
raise ValueError(f"unknown error generation class: {item}")
def generate_dask(self, cycle_number: int, curvilinear_distance: float,
time: numpy.array, x_al: numpy.array, x_ac: numpy.array,
swh: numpy.array) -> Dict[str, numpy.ndarray]:
"""Generate errors
Args:
cycle_number (int): Cycle number.
curvilinear_distance (float): Curvilinear distance covered by the
satellite during a complete cycle.
time (numpy.ndarray): Date of measurements.
x_al (numpy.ndarray): Along track distance.
x_ac (numpy.ndarray): Across track distance.
Returns:
dict: Associative array between error variables and simulated
values.
"""
result = {}
if not self.generators or x_al.shape[0] == 0:
return result
futures = []
with dask.distributed.worker_client() as client:
for item in self.generators:
if isinstance(item, Altimeter):
futures.append(client.submit(item.generate, x_al))
elif isinstance(item, BaselineDilation):
futures.append(client.submit(item.generate, x_al, x_ac))
elif isinstance(item, CorrectedRollPhase):
futures.append(client.submit(item.generate, time, x_ac))
elif isinstance(item, Karin):
futures.append(
client.submit(item.generate, x_al, x_ac,
curvilinear_distance, cycle_number, swh))
elif isinstance(item, RollPhase):
futures.append(client.submit(item.generate, x_al, x_ac))
elif isinstance(item, Timing):
futures.append(client.submit(item.generate, x_al, x_ac))
elif isinstance(item, WetTroposphere):
futures.append(client.submit(item.generate, x_al, x_ac))
for future in dask.distributed.as_completed(futures):
result.update(future.result())
return result
def generate(self, x_al: numpy.array, dic_input: dict,
ac_angle: numpy.array, time:numpy.array, ti_cycle: float,
first_time: datetime.datetime, radial_angle: numpy.array,
x_al_nadir: numpy.array, x_ac: numpy.array
) -> Dict[str, numpy.ndarray]:
res = {}
fc = 1
for item in self.generators:
if isinstance(item, Altimeter):
res.update(item.generate(x_al[:, 0]))
elif isinstance(item, DSigma):
sigma0 = dic_input['sigma0']
res.update(item.generate(sigma0, ac_angle))
elif isinstance(item, Attitude):
res.update(item.generate(time, ti_cycle, ac_angle, first_time))
elif isinstance(item, Instrument):
sigma0 = dic_input['sigma0']
wind_dir = numpy.arctan2(dic_input['vwnd'][:, fc:],
dic_input['uwnd'][:, fc:])
wind_norm = numpy.sqrt(dic_input['uwnd'][:, fc:]**2
+ dic_input['vwnd'][:, fc:]**2)
res.update(item.generate(radial_angle, ac_angle, wind_norm,
wind_dir, sigma0))
elif isinstance(item, Rain):
res.update(item.generate(time, x_al, x_al_nadir, x_ac,
ac_angle))
elif isinstance(item, WaveDoppler):
sigma0 = dic_input['sigma0']
wind_norm = numpy.sqrt(dic_input['uwnd'][:, fc:]**2
+ dic_input['vwnd'][:, fc:]**2)
windr = utils.proj_radial(dic_input['uwnd'][:, fc:],
dic_input['vwnd'][:, fc:],
radial_angle)
wind_dir = numpy.arctan2(dic_input['vwnd'][:, fc:],
dic_input['uwnd'][:, fc:])
if 'mssu' in dic_input.keys():
mss = dic_input['mssu'][:, fc:] + dic_input['mssc'][:, fc:]
else:
logger.error('mss variable not found')
sys.exit(1)
if 'hs' in dic_input.keys():
hs = dic_input['hs'][:, fc:]
else:
hs = sigma0 * 0
if 'uuss' in dic_input.keys():
uuss = dic_input['uuss'][:, fc:]
vuss = dic_input['vuss'][:, fc:]
else:
uuss = sigma0 * 0
vuss = sigma0 * 0
if 'ice' in dic_input.keys():
ice = dic_input['ice'][:, fc:]
else:
ice = sigma0 * 0
res.update(item.generate(wind_norm, wind_dir, windr, mss, hs,
ice, sigma0, radial_angle, uuss, vuss))
return res
def correct(self, grid_dict: dict, dic_input: dict,
radial_angle: numpy.array, dazim: float, fc: int=1
) -> Dict[str, numpy.ndarray]:
res = {}
for item in self.generators:
if isinstance(item, WaveDoppler):
sigma0 = + dic_input['sigma0']
wind_norm = numpy.sqrt(dic_input['uwnd'][:, fc:]**2
+ dic_input['vwnd'][:, fc:]**2)
windr = utils.proj_radial(dic_input['uwnd'][:, fc:],
dic_input['vwnd'][:, fc:],
radial_angle)
if 'ice' in dic_input.keys():
ice = dic_input['ice'][:, fc:]
else:
ice = sigma0 * 0
res.update(item.correct(grid_dict, dic_input, wind_norm, windr,
ice, sigma0, radial_angle, dazim))
return res
def generate_obs(self, dic_input: dict, error:dict, corr: dict, fc: int=1,
) -> Dict[str, numpy.ndarray]:
ur_obs = + dic_input['ur_true']
nsample = numpy.shape(dic_input['ssh_true'])[0]
ssh_obs = + dic_input['ssh_true'][:, 0].reshape(nsample, -1)
for item in self.generators:
if isinstance(item, Altimeter):
fc = 1
ssh_obs += error['altimeter']
elif isinstance(item, DSigma):
ur_obs += error['dsigma']
elif isinstance(item, Attitude):
# TODO add attitude corrected error or yaw total
att = 1
elif isinstance(item, Instrument):
ur_obs += error['instr']
elif isinstance(item, Rain):
# TODO mask data
mask = fc
elif isinstance(item, WaveDoppler):
if 'wd_est' in dic_input.keys():
wd_corr = (error['wd'] - corr['wd_est'])
else:
wd_corr = error['wd']
ur_obs += wd_corr
return {"ur_obs": ur_obs, "ssh_obs": ssh_obs}
| 9,631 | 42.981735 | 80 | py |
skimulator | skimulator-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# S4 documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 10 16:54:19 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from os.path import expanduser
home = expanduser("~")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'+'skimulator'))
sys.path.insert(0, os.path.abspath('../../'))
#sys.path.append(os.getcwd()+'/../../'+'skimulator')
#sys.path.append(os.getcwd()+'/../../')
#os.path.join(os.path.dirname(__file__), os.path.pardir)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'skimulator'
copyright = u'2017, Lucile Gaultier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skimulator'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'skimulator.tex', 'SKIM Simulator Documentation',
u'Lucile Gaultier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'SKIM Simulator', u'SKIM Simulator Documentation',
[u'Lucile Gaultier'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SKIM Simulator', u'SKIM Simulator Documentation',
u'Lucile Gaultier', 'SKIM Simulator', 'Open Source SKIM Simulator.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'skimulator'
epub_author = u'Lucile Gaultier'
epub_publisher = u'Lucile Gaultier'
epub_copyright = u'2017, Lucile Gaultier'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'SKIM Simulator'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| 10,718 | 30.342105 | 80 | py |
skimulator | skimulator-master/doc/source/params.py | # -----------------------#
# Files and directories
# -----------------------#
## -- Get the user home directory
from os.path import expanduser
import os
import math
home = expanduser("~")
# ------ Name of the configuration (to build output files names)
config = [yourconfig]
# ------ Directory that contains orbit file:
dir_setup = os.path.join([yourpath], 'skimulator', 'data')
# ------ Directory that contains your own inputs:
indatadir = '[yourpath_to_yourdata]/'
# ------ Directory that contains your outputs:
outdatadir = '[yourpath_to_outputs]/'
# ------ Orbit file:
satname = [chosenorbit]
filesat = os.path.join(dir_setup, [chosenorbit])
# ------ Number of days in orbit (optional if specified in orbit file)
satcycle = 29
# ------ Satellite elevation (optional if specified in orbit file)
sat_elev = 817 * 10**3
# ------ Order of columns (lon, lat, time) in orbit file
# (default is (0, 1, 2) with order_orbit_col = None)
order_orbit_col = None
# , dir_setup+os.sep+'orbjason.txt', dir_setup+os.sep+'orbaltika.txt' ]
# ------ Number of processors for parallelisation purposes
proc_number = [number of processor (integer)]
# ------ Deactivate printing of progress bar to avoid huge log
progress_bar = True or False
# -----------------------#
# SKIM swath parameters
# -----------------------#
# ------ Satellite grid file root name:
# (Final file name is root_name_[numberofpass].nc)
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
or filesgrid = os.path.join(outdatadir, '[your_grid_root_name]')
# ------ Force the computation of the satellite grid:
makesgrid = True or False
# ------ Give a subdomain if only part of the model is needed:
# (modelbox=[lon_min, lon_max, lat_min, lat_max])
# (If modelbox is None, the whole domain of the model is considered)
modelbox = None or [yourlon_min, yourlon_max, yourlat_min, yourlat_max]
#------- Rotation speed of the antenna (in tr/min)
rotation_speed = rotation depends on the chosen config
#------- List of position of beams:
list_pos = (0, [angle_in_rad], [angle_in_rad] ...)
#------- List of angle of beams in degrees:
list_angle = ([incidence], [incidence], [incidence] ...)
#------- List of timeshift as regard to nadir for 12 degree beams:
list_shift = (1, 3, 2 ...)
#------- Cycle duration
cycle = 0.0096
# ------ Shift longitude of the orbit file if no pass is in the domain
# (in degree): Default value is None (no shift)
shift_lon = 0
# ------ Shift time of the satellite pass (in day):
# Default value is None (no shift)
shift_time = None
# -----------------------#
# Model input parameters
# -----------------------#
# ------ List of model files:
# (The first file contains the grid and is not considered as model data)
# To generate the noise alone, file_input=None and specify region
# in modelbox
file_input = os.path.join(indatadir, [your_list_of_file_name.txt]' or None
# ------ Type of model data:
# (Optional, default is NETCDF_MODEL and reads netcdf3 and netcdf4 files)
# (Other option is WW3)
model = 'WW3' or 'NETCDF_MODEL'
# ------ First time of the model
first_time = 'yyyy-mm-ddTHH:MM:SSZ'
# ------ Grid file name
file_grid_model = (os.path.join(indatadir, [yourgridfileu]),
os.path.join(indatadir, [yourgridfilev]),)
# ------ Specify if there is a ice mask for high latitudes
# (if true, mask is recomputed at each cycle)
ice_mask = False or True
# ------ Type of grid:
# 'regular' or 'irregular', if 'regular' only 1d coordinates
# are extracted from model
grid = 'regular'
# ------ Specify list of variable:
list_input_var = {'ucur': [[u_var], [vel_ext], 0], 'vcur': [[v_var], [v_ext], 1],
'uuss': [[uuss_var], [uss_ext], 0], 'vuss': [[vuss_var], [uss_ext], 1],
'ice': [[ice_var], [ice_ext], 0], 'mssd': [[mssd_var], [msd_ext], 0],
'mssx': [[mssx_var], [mss_ext], 0], 'mssy':[[mssy_var], [mss_ext]],
'ssh': [[ssh_var], [ssh_ext], 0],
'uwnd': [[uwnd_var], [wnd_ext], 0], 'vwnd': [[vwnd_var], [wnd_ext], 1]}
# ------ Specify longitude variable:
lon = ('longitude', 'longitude')
# ------ Specify latitude variable:
lat = ('latitude', 'latitude')
# ------ Specify number of time in file:
dim_time = 24
# ------ Time step between two model outputs (in days):
timestep = 1/24.
# ------ Number of outputs to consider:
# (timestep*nstep=total number of days)
nstep = 35*24
# ------ Not a number value:
model_nan = -32767.
# -----------------------#
# SKIM output files
# -----------------------#
# ------ Output file root name:
# (Final file name is root_name_c[cycle].nc
file_output = os.path.join(outdatadir, config)
# ------ Interpolation of the SSH from the model (if grid is irregular and
# pyresample is not installed:
# (either 'linear' or 'nearest', use 'nearest' for large region
# as it is faster and use less memory.)
interpolation = 'nearest' or 'linear'
# ------ List of output variables:
list_output = ['ssh_obs', 'ur_true', 'ucur', 'vcur', 'uuss', 'vuss', 'instr',
'radial_angle', 'vwnd', 'mssx', 'mssy', 'mssxy', 'uwb',
'ssh_true', 'ssh', 'ice', 'mssd',
'vindice', 'ur_obs', 'uwnd', 'sigma0']
# -----------------------#
# SKIM error parameters
# -----------------------#
# ------ File containing random coefficients to compute and save
# random error coefficients so that runs are reproducible:
# If file_coeff is specified and does not exist, file is created
# If you don't want runs to be reproducible, file_coeff is set to None
file_coeff = None or os.path.join(outdatadir, 'Random_coeff.nc')
# Compute instrumental nadir noise:
nadir = True
# ------ Number of random realisations for instrumental and geophysical error
# (recommended ncomp=2000), ncomp1d is used for 1D spectrum, and ncomp2d
# is used for 2D spectrum (wet troposphere computation):
ncomp1d = 3000
ncomp2d = 2000
# ------- Instrument white noise error
instr = True or False
# ------- Choice of instrument configuration
instr_configuration = 'A' or 'B'
# ------- Attitude error
attitude = True or False
# ------- File which provide the AOCS error:
yaw_file = os.path.join(dir_setup, 'sample_req1.nc')
# ------- Wave bias
uwb = True or False
## -- Geophysical error
## ----------------------
# ------ Consider ice in sigma0 computation
ice = True or False
# ------ Rain error (True to compute it):
rain = True or False
# ------ Rain file containing scenarii (python file):
rain_file = os.path.join(dir_setup, [yourrainscenarii])
# ------ Threshold to flag data:
rain_threshold = 0.15
# -----------------------#
# L2C computation
# -----------------------#
# config name for L2c:
config_l2c = '[yourl2cconfig]'
# Length resolution to select neighbors (in km):
resol = 40
# Grid resolution for l2c (alongtrack, acrosstrack) grid (in km):
posting = 5
# Remove noisy data around nadir (in km):
ac_threshold = 20
# List of variables to be interpolated on the swath:
list_input_var_l2c = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 1]}
# -----------------------#
# L2D computation
# -----------------------#
# config name for L2d:
config_l2d = ''
# Length resolution to select neighbors (multiplication factor):
resol_spatial_l2d = 1
# Temporal resolution to select neighbors (multiplication factor):
resol_temporal_l2d = 1
# Grid resolution for l2d (lat, lon) grid (in degrees):
posting_l2d = (0.1, 0.1)
# Time domain: (start_time, end_time, dtime) in days:
time_domain = (5, 25, 1)
# Spatial domain (lon_min, lon_max, lat_min, lat_max):
spatial_domain = [0, 360, -90, 90]
# List of variables to be interpolated on the grid:
list_input_var_l2d = {'ucur': ['ucur', 'cur', 0], 'vcur': ['vcur', 'cur', 1]}
| 7,779 | 38.492386 | 89 | py |
skimulator | skimulator-master/doc/images/code_image/Fig4.py | '''
FIG. 4: Radial currents and instrumental noise.
'''
import netCDF4
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import skimulator.rw_data as rw
import glob
import cartopy
import os
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
# List files
indatadir = '/mnt/data/project/'
indatadir = '/tmp/key/project/'
indatadir = os.path.join(indatadir, 'skim', 'skim_output')
config="WW3_GS_6b108az"
config="WW3_GS_8b105az"
filesgrid = os.path.join(indatadir, '{}_'.format(config))
ipass = 59
indatapath = '{}c01_p{:03d}.nc'.format(filesgrid, ipass)
listfile = glob.glob(indatapath)
listfile = sorted(listfile)
outdatadir = '../'
modelbox = [-73, -71.0, 34.0, 36.0]
scale = 11
# Prepare figure
pyplot.figure(figsize=(10, 15))
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
projection = transform
ax1 = pyplot.subplot(111, projection=projection)
ax1.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
ax1.set_extent(modelbox, projection)
gl = ax1.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
data = netCDF4.Dataset(indatapath, 'r')
indatapath = '{}grid_p{:03d}.nc'.format(filesgrid, ipass)
datag = netCDF4.Dataset(indatapath, 'r')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
ur = data['ur_model'][:]
corrangle = datag['radial_angle'][:]
urinstr = data['instr'][:]
urnoise = ur + urinstr
uur = ur * numpy.cos(corrangle)
vur = ur * numpy.sin(corrangle)
uurnoise = urnoise * numpy.cos(corrangle)
vurnoise = urnoise * numpy.sin(corrangle)
for i in range(numpy.shape(lon)[1]):
style_color = '{}+'.format(listcolor[i])
#pyplot.plot(lon[:, i], lat[:, i], style_color,
# transform=cartopy.crs.PlateCarree())
pyplot.quiver(lon[:, i], lat[:, i], uurnoise[:, i], vurnoise[:, i],
color='green', scale=scale, transform=transform)
pyplot.quiver(lon[:, i], lat[:, i], uur[:, i], vur[:, i], color='red',
scale=scale, transform=transform, alpha=0.5)
pyplot.savefig(os.path.join(outdatadir, 'Fig4.png'))
| 2,193 | 30.797101 | 74 | py |
skimulator | skimulator-master/doc/images/code_image/Fig6.py | '''
FIG. 4: Radial currents and instrumental noise.
'''
import netCDF4
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import skimulator.rw_data as rw
import glob
import cartopy
import os
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
# List files
indatadir = '/mnt/data/project/'
indatadir = '/tmp/key/project/'
indatadir = os.path.join(indatadir, 'skim', 'skim_output')
config="WW3_GS_6b108az"
config="WW3_GS_8b105az"
filesgrid = os.path.join(indatadir, '{}_'.format(config))
ipass = 59
indatapath = '{}c01_p{:03d}.nc'.format(filesgrid, ipass)
listfile = glob.glob(indatapath)
listfile = sorted(listfile)
outdatadir = '../'
modelbox = [-73, -71.0, 34.0, 36.0]
scale = 11
# Prepare figure
pyplot.figure(figsize=(10, 15))
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
projection = transform
ax1 = pyplot.subplot(111, projection=projection)
ax1.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
ax1.set_extent(modelbox, projection)
gl = ax1.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
data = netCDF4.Dataset(indatapath, 'r')
indatapath = '{}grid_p{:03d}.nc'.format(filesgrid, ipass)
datag = netCDF4.Dataset(indatapath, 'r')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
ur = data['ur_model'][:]
corrangle = datag['radial_angle'][:]
ur_noise = data['uss_err'][:] + data['instr'][:]
urnoise = ur + ur_noise
uur = ur * numpy.cos(corrangle)
vur = ur * numpy.sin(corrangle)
uurnoise = urnoise * numpy.cos(corrangle)
vurnoise = urnoise * numpy.sin(corrangle)
for i in range(numpy.shape(lon)[1]):
style_color = '{}+'.format(listcolor[i])
#pyplot.plot(lon[:, i], lat[:, i], style_color,
# transform=cartopy.crs.PlateCarree())
pyplot.quiver(lon[:, i], lat[:, i], uurnoise[:, i], vurnoise[:, i],
color='green', scale=scale, transform=transform)
pyplot.quiver(lon[:, i], lat[:, i], uur[:, i], vur[:, i], color='red',
scale=scale, transform=transform, alpha=0.5)
pyplot.savefig(os.path.join(outdatadir, 'Fig6.png'))
| 2,216 | 31.130435 | 74 | py |
skimulator | skimulator-master/doc/images/code_image/Fig2.py | '''
FIG. 2: scheme of the SKIM geometry with 4 beams at 12 degrees and 1 beam at 6 degree and 5 beams at 12 degrees and 2 beams at 6 degree.
'''
import netCDF4
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import skimulator.rw_data as rw
import glob
import cartopy
import os
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
# List files
config="WW3_AT_metop_2018_8b"
indatadir = '/mnt/data/project/'
indatadir = '/tmp/key/data/skim_at_output/{}'.format(config)
#indatadir = os.path.join(indatadir, 'skim', 'skim_output')
filesgrid = os.path.join(indatadir, '{}_grid'.format(config))
ipass = 58
indatapath = '{}_p{:03d}.nc'.format(filesgrid, ipass)
outdatadir = '../'
modelbox = [-90, -70., 32., 40.]
#modelbox = [270, 290., 32., 40.]
# Prepare figure
pyplot.figure(figsize=(10, 15))
projection = cartopy.crs.Mercator()
transform = cartopy.crs.PlateCarree()
projection = transform
ax1 = pyplot.subplot(121, projection=projection)
#ax.coastlines()
ax1.add_feature(cartopy.feature.OCEAN, zorder=1)
ax1.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
#ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
# projection)
ax1.set_extent([-74., -68., 34, 37], projection)
gl = ax1.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
ax1.set_title('(a) 6 beams configuration')
data = netCDF4.Dataset(indatapath, 'r')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
lon_nadir = data['lon_nadir'][:]
lon_nadir[lon_nadir > 180] = lon_nadir[lon_nadir > 180] - 360
lat_nadir = data['lat_nadir'][:]
pyplot.plot(lon_nadir, lat_nadir, 'k+', transform=transform)
for i in range(numpy.shape(lon)[1]):
style_color = '{}+'.format(listcolor[i])
pyplot.plot(lon[:, i], lat[:, i], style_color,
transform=cartopy.crs.PlateCarree())
ax2 = pyplot.subplot(122, projection=projection)
#ax.coastlines()
ax2.add_feature(cartopy.feature.OCEAN, zorder=1)
ax2.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
#ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
# projection)
#config="WW3_GS_8b105az"
config="WW3_AT_metop_2018_6a"
indatadir = '/tmp/key/data/skim_at_output/{}'.format(config)
filesgrid = os.path.join(indatadir, '{}_grid'.format(config))
indatapath = '{}_p{:03d}.nc'.format(filesgrid, ipass)
print(indatapath)
ax2.set_extent([-74., -68., 34, 37], projection)
gl = ax2.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
ax2.set_title('(b) 8 beams configuration')
data = netCDF4.Dataset(indatapath, 'r')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
lon_nadir = data['lon_nadir'][:]
lon_nadir[lon_nadir > 180] = lon_nadir[lon_nadir > 180] - 360
lat_nadir = data['lat_nadir'][:]
pyplot.plot(lon_nadir, lat_nadir, 'k+', transform=transform)
for i in range(numpy.shape(lon)[1]):
style_color = '{}+'.format(listcolor[i])
pyplot.plot(lon[:, i], lat[:, i], style_color,
transform=cartopy.crs.PlateCarree())
pyplot.savefig(os.path.join(outdatadir, 'Fig2.png'))
| 3,290 | 35.164835 | 139 | py |
skimulator | skimulator-master/doc/images/code_image/Fig3.py | '''
FIG. 3: Model interpolated currents and the corresponding radial currents.
'''
import netCDF4
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import skimulator.rw_data as rw
import glob
import cartopy
import os
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
# List files
config="WW3_AT_metop_2018_8b"
indatadir = '/mnt/data/project/'
indatadir = '/tmp/key/data/skim_at_output/{}'.format(config)
modeldatapath = '/tmp/key/data/model/ww3_gs/ww3.201109_cur.nc'
filesgrid = os.path.join(indatadir, '{}_'.format(config))
ipass = 58
indatapath = '{}c01_p{:03d}.nc'.format(filesgrid, ipass)
listfile = glob.glob(indatapath)
listfile = sorted(listfile)
outdatadir = '../'
modelbox = [-71, -69.0, 34.0, 36.0]
scale = 11
is_cartopy = True
# Prepare figure
pyplot.figure(figsize=(10, 15))
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
if is_cartopy is True:
#projection = transform
ax1 = pyplot.subplot(121, projection=projection)
else:
ax1 = pyplot.subplot(121)
if is_cartopy is True:
ax1.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
ax1.set_extent(modelbox, projection)
gl = ax1.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
ax1.set_title('(a) Model, "true" velocity')
data = netCDF4.Dataset(indatapath, 'r')
datam = netCDF4.Dataset(modeldatapath, 'r')
iifile = data['index'][:, 0]
lon = datam['longitude'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = datam['latitude'][:]
mlon, mlat = numpy.meshgrid(lon, lat)
mu = datam['ucur'][int(iifile[0]),:, :]
mv = datam['vcur'][int(iifile[0]),:, :]
mu=numpy.ma.masked_invalid(mu)
numpy.ma.masked_where(mu == datam['ucur']._FillValue, mu, copy=False)
mu[mu.mask] = 0
numpy.ma.masked_where(mv == datam['vcur']._FillValue, mv, copy=False)
mv[mv.mask] = 0
s0 = 10
if is_cartopy is True:
pyplot.pcolor(mlon, mlat, numpy.sqrt(mu**2 + mv**2), vmax=2.0, cmap='jet',
transform=transform)
pyplot.quiver(mlon[::s0, ::s0], mlat[::s0, ::s0], mu[::s0, ::s0],
mv[::s0, ::s0], scale=scale, transform=transform)
else:
pyplot.pcolor(mlon, mlat, numpy.sqrt(mu**2 + mv**2), vmax=2.0, cmap='jet')
pyplot.quiver(mlon[::s0, ::s0], mlat[::s0, ::s0], mu[::s0, ::s0],
mv[::s0, ::s0])
indatapath = '{}grid_p{:03d}.nc'.format(filesgrid, ipass)
datag = netCDF4.Dataset(indatapath, 'r')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
ax2 = pyplot.subplot(122, projection=projection)
ax2.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
print(indatapath)
ax2.set_extent(modelbox, projection)
gl = ax2.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
ax2.set_title('(b) Interpolated (green) and projected (red) velocity')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
ur = data['ur_true'][:]
corrangle = datag['radial_angle'][:]
datau = data['u_model'][:]
datav = data['v_model'][:]
uur = ur * numpy.cos(corrangle)
uur[uur.mask] = 0
vur = ur * numpy.sin(corrangle)
vur[vur.mask] = 0
for i in range(numpy.shape(lon)[1]):
style_color = '{}+'.format(listcolor[i])
# pyplot.plot(lon[:, i], lat[:, i], style_color,
# transform=transform)
pyplot.quiver(lon[:, i], lat[:, i], datau[:, i], datav[:, i],
color='green', scale=scale, transform=transform)
pyplot.quiver(lon[:, i], lat[:, i], uur[:, i], vur[:, i], color='red',
scale=scale, transform=transform)
pyplot.savefig(os.path.join(outdatadir, 'Fig3.png'))
| 3,784 | 32.794643 | 78 | py |
skimulator | skimulator-master/doc/images/code_image/Fig1.py | '''
FIG. 1: 5-day worth of SKIM simulated data in a global configuration with the science orbit.
'''
import netCDF4
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import skimulator.rw_data as rw
import glob
import cartopy
import os
# List files
outdatadir = '/tmp/key'
outdatadir = '/mnt/data/project/'
outdatadir = os.path.join(outdatadir, 'skim', 'skim_output')
config="WW3_GLOB_6b108az"
filesgrid = os.path.join(outdatadir, '{}_grid'.format(config))
indatapath = '{}_*'.format(filesgrid)
listfile = glob.glob(indatapath)
listfile = sorted(listfile) #.sort()
outdatadir = '../'
modelbox = [0., 360., -90., 90.]
# Prepare figure
pyplot.figure(figsize=(10, 15))
ax = pyplot.axes(projection=cartopy.crs.Orthographic(0, 0))
#ax.set_extent([modelbox[0], modelbox[1], modelbox[2], modelbox[3]],
# crs=cartopy.crs.Mercator())
#ax.coastlines()
ax.add_feature(cartopy.feature.OCEAN, zorder=1)
ax.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
ax.set_global()
#ax.gridlines()
for ifile in listfile[:90]:
data = netCDF4.Dataset(ifile, 'r')
lon = data['lon'][:, 0]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:, 0]
lon_nadir = data['lon_nadir'][:]
lon_nadir[lon_nadir > 180] = lon_nadir[lon_nadir > 180] - 360
lat_nadir = data['lat_nadir'][:]
pyplot.plot(lon[:], lat[:], '.', color='#4EE2EC', markersize=0.3,
transform=cartopy.crs.PlateCarree())
pyplot.plot(lon_nadir, lat_nadir, '.', color='#565051', markersize=0.3,
transform=cartopy.crs.PlateCarree())
pyplot.savefig(os.path.join(outdatadir, 'Fig1.png'))
| 1,647 | 31.96 | 92 | py |
skimulator | skimulator-master/doc/images/code_image/Fig5.py | '''
FIG. 4: Radial currents and instrumental noise.
'''
import netCDF4
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import skimulator.rw_data as rw
import glob
import cartopy
import os
# Initialize color
listcolor = ['c', 'y', 'b', 'g', 'k', 'r', 'c', 'y']
# List files
indatadir = '/mnt/data/project/'
indatadir = '/tmp/key/project/'
indatadir = os.path.join(indatadir, 'skim', 'skim_output')
config="WW3_GS_6b108az"
config="WW3_GS_8b105az"
filesgrid = os.path.join(indatadir, '{}_'.format(config))
ipass = 59
indatapath = '{}c01_p{:03d}.nc'.format(filesgrid, ipass)
listfile = glob.glob(indatapath)
listfile = sorted(listfile)
outdatadir = '../'
modelbox = [-73, -71.0, 34.0, 36.0]
scale = 11
# Prepare figure
pyplot.figure(figsize=(10, 15))
projection = cartopy.crs.PlateCarree()
transform = cartopy.crs.PlateCarree()
projection = transform
ax1 = pyplot.subplot(111, projection=projection)
ax1.add_feature(cartopy.feature.LAND, zorder=1, edgecolor='black')
ax1.set_extent(modelbox, projection)
gl = ax1.gridlines(crs=transform, draw_labels=True, color='gray',
linestyle='--', alpha=0.5)
gl.xlabels_top = False
gl.ylabels_left = False
data = netCDF4.Dataset(indatapath, 'r')
indatapath = '{}grid_p{:03d}.nc'.format(filesgrid, ipass)
datag = netCDF4.Dataset(indatapath, 'r')
lon = data['lon'][:]
lon[lon > 180] = lon[lon > 180] - 360
lat = data['lat'][:]
ur = data['ur_model'][:]
corrangle = datag['radial_angle'][:]
ur_stroke = data['uss_err'][:]
urnoise = ur + ur_stroke
uur = ur * numpy.cos(corrangle)
vur = ur * numpy.sin(corrangle)
uurnoise = urnoise * numpy.cos(corrangle)
vurnoise = urnoise * numpy.sin(corrangle)
for i in range(numpy.shape(lon)[1]):
style_color = '{}+'.format(listcolor[i])
#pyplot.plot(lon[:, i], lat[:, i], style_color,
# transform=cartopy.crs.PlateCarree())
pyplot.quiver(lon[:, i], lat[:, i], uurnoise[:, i], vurnoise[:, i],
color='green', scale=scale, transform=transform)
pyplot.quiver(lon[:, i], lat[:, i], uur[:, i], vur[:, i], color='red',
scale=scale, transform=transform, alpha=0.5)
pyplot.savefig(os.path.join(outdatadir, 'Fig5.png'))
| 2,199 | 30.884058 | 74 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
compositionspace : add licence and such
"""
from setuptools import setup, find_packages, Extension
with open('README.md') as readme_file:
readme = readme_file.read()
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Alaukik Saxena, Sarath Menon, Mariano Forti",
author_email='s.menon@mpie.de',
python_requires='>=3.8',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="APT analysis tools",
install_requires = ['numpy', 'matplotlib', 'pandas', 'h5py', 'scikit-learn',
'tqdm', 'pyevtk', 'pyyaml', 'pyvista'],
#license="GNU General Public License v3",
long_description=readme,
long_description_content_type='text/markdown',
include_package_data=True,
keywords='compositionspace',
name='compositionspace',
packages=find_packages(include=['compositionspace', 'compositionspace.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='',
version='0.0.9',
zip_safe=False,
#entry_points={
# 'console_scripts': [
# 'calphy = calphy.kernel:main',
# 'calphy_kernel = calphy.queuekernel:main',
# ],
#}
)
| 1,643 | 29.444444 | 80 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/compositionspace/datautils.py |
import pandas as pd
import re
import os
import yaml
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
import h5py
import warnings
import compositionspace.paraprobe_transcoder as paraprobe_transcoder
#really check this!
pd.options.mode.chained_assignment = None
class DataPreparation:
def __init__(self, inputfile):
if isinstance(inputfile, dict):
self.params = inputfile
else:
with open(inputfile, "r") as fin:
params = yaml.safe_load(fin)
self.params = params
self.version = "1.0.0"
if not os.path.exists(self.params['output_path']):
os.mkdir(self.params['output_path'])
def get_label_ions(self, pos, rrngs):
pos['comp'] = ''
pos['colour'] = '#FFFFFF'
pos['nature'] = ''
count=0;
for n,r in rrngs.iterrows():
count= count+1;
pos.loc[(pos.Da >= r.lower) & (pos.Da <= r.upper),['comp','colour', 'nature']] = [r['comp'],'#' + r['colour'],count]
return pos
def atom_filter(self, x, atom_range):
"""
Get a list of atom species and their counts
Parameters
----------
Returns
-------
Notes
-----
Assuming all the data
"""
atom_total = []
x_arr = x.values
mass = x_arr[:,-1]
x_columns = x.columns
for i in range(len(atom_range)):
low_bound = atom_range['lower'][i]
upper_bound = atom_range['upper'][i]
id_ = np.where(np.logical_and(mass>=low_bound, mass<=upper_bound))
atom = x_arr[id_]
atom_total.append(atom)
atom_total = tuple(atom_total)
atom_total = np.vstack(atom_total)
atom_total = pd.DataFrame(atom_total, columns = x_columns)
count_atom= len(atom_total)
return atom_total, count_atom
def get_pos(self, file_name):
"""
Read the pos file
Parameters
----------
file_name: string
Name of the input file
Returns
-------
pos: np structured array
The atom positions and ---- ratio
Notes
-----
Assumptions
Examples
--------
Raises
------
FileNotFoundError: describe
"""
if not os.path.exists(file_name):
raise FileNotFoundError(f"filename {file_name} does not exist")
with open(file_name, 'rb') as f:
dt_type = np.dtype({'names':['x', 'y', 'z', 'm'],
'formats':['>f4', '>f4', '>f4', '>f4']})
pos = np.fromfile(f, dt_type, -1)
pos = pos.byteswap().newbyteorder()
return pos
def get_rrng(self, file_name):
"""
Read the data
Parameters
----------
Returns
-------
Notes
-----
"""
if not os.path.exists(file_name):
raise FileNotFoundError(f"filename {file_name} does not exist")
patterns = re.compile(r'Ion([0-9]+)=([A-Za-z0-9]+).*|Range([0-9]+)=(\d+.\d+) +(\d+.\d+) +Vol:(\d+.\d+) +([A-Za-z:0-9 ]+) +Color:([A-Z0-9]{6})')
ions = []
rrngs = []
with open(file_name, "r") as rf:
for line in rf:
m = patterns.search(line)
if m:
if m.groups()[0] is not None:
ions.append(m.groups()[:2])
else:
rrngs.append(m.groups()[2:])
ions = pd.DataFrame(ions, columns=['number','name'])
ions.set_index('number',inplace=True)
rrngs = pd.DataFrame(rrngs, columns=['number','lower','upper','vol','comp','colour'])
rrngs.set_index('number',inplace=True)
rrngs[['lower','upper','vol']] = rrngs[['lower','upper','vol']].astype(float)
rrngs[['comp','colour']] = rrngs[['comp','colour']].astype(str)
return ions, rrngs
def get_apt(self, file_name):
"""
Read the apt file
Parameters
----------
file_name: string
Name of the input file
Returns
-------
pos: np structured array
The atom positions and ---- ratio
Notes
-----
Assumptions
Examples
--------
Raises
------
FileNotFoundError: describe
"""
if not os.path.exists(file_name):
raise FileNotFoundError(f"filename {file_name} does not exist")
apt = paraprobe_transcoder.paraprobe_transcoder(file_name)
apt.read_cameca_apt()
POS = apt.Position
MASS = apt.Mass
POS_MASS = np.concatenate((POS,MASS),axis = 1)
return POS_MASS
def get_apt_dataframe(self):
"""
Read the data
Parameters
----------
Returns
-------
Notes
-----
"""
df_Mass_POS_lst = []
file_name_lst=[]
ions = None
rrngs = None
pbar = tqdm(os.listdir(self.params["input_path"]), desc="Reading files")
for filename in pbar:
if filename.endswith(".POS"):
#print(filename)
path = os.path.join(self.params["input_path"], filename)
pos = self.get_pos(path)
df_POS_MASS = pd.DataFrame({'x':pos['x'],'y': pos['y'],'z': pos['z'],'Da': pos['m']})
df_Mass_POS_lst.append(df_POS_MASS)
file_name_lst.append(filename)
if filename.lower().endswith(".pos"):
path = os.path.join(self.params["input_path"], filename)
pos = self.get_pos(path)
df_POS_MASS = pd.DataFrame({'x':pos['x'],'y': pos['y'],'z': pos['z'],'Da': pos['m']})
df_Mass_POS_lst.append(df_POS_MASS)
file_name_lst.append(filename)
if filename.endswith(".apt"):
path = os.path.join(self.params["input_path"], filename)
POS_MASS = self.get_apt(path)
df_POS_MASS = pd.DataFrame(POS_MASS, columns = ["x","y","z","Da"])
df_Mass_POS_lst.append(df_POS_MASS)
file_name_lst.append(filename)
if filename.lower().endswith(".rrng"):
path = os.path.join(self.params["input_path"], filename)
ions,rrngs = self.get_rrng(path)
if filename.endswith(".RRNG"):
path = os.path.join(self.params["input_path"], filename)
ions,rrngs = self.get_rrng(rrange_file)
return (df_Mass_POS_lst, file_name_lst, ions, rrngs)
def get_big_slices(self):
"""
Cut the data into specified portions
Parameters
----------
Returns
-------
Notes
-----
"""
#df_lst, files, ions, rrngs= read_apt_to_df(folder)
df_lst, files, ions, rrngs= self.get_apt_dataframe()
filestrings = []
prefix = self.params['output_path']
for idx, file in enumerate(files):
org_file = df_lst[idx]
atoms_spec = []
c = np.unique(rrngs.comp.values)
for i in range(len(c)):
range_element = rrngs[rrngs['comp']=='{}'.format(c[i])]
total, count = self.atom_filter(org_file, range_element)
name = i
total["spec"] = [name for j in range(len(total))]
atoms_spec.append(total)
df_atom_spec = pd.concat(atoms_spec)
x_wu=df_atom_spec
sorted_df = x_wu.sort_values(by=['z'])
filestring = "file_{}_large_chunks_arr.h5".format(file.replace(".","_"))
filestring = os.path.join(prefix, filestring)
filestrings.append(filestring)
hdf = h5py.File(filestring, "w")
group1 = hdf.create_group("group_xyz_Da_spec")
group1.attrs["columns"] = ["x","y","z","Da","spec"]
group1.attrs["spec_name_order"] = list(c)
sublength_x= abs((max(sorted_df['z'])-min(sorted_df['z']))/self.params["n_big_slices"])
start = min(sorted_df['z'])
end = min(sorted_df['z']) + sublength_x
pbar = tqdm(range(self.params["n_big_slices"]), desc="Creating chunks")
for i in pbar:
temp = sorted_df[sorted_df['z'].between(start, end, inclusive="both")]
group1.create_dataset("chunk_{}".format(i), data = temp.values)
start += sublength_x
end += sublength_x
hdf.close()
self.chunk_files = filestrings
def get_big_slices_molecules(self):
df_lst, files,ions,rrngs = self.get_apt_dataframe()
for file_idx in range(len(files)):
Org_file =df_lst[file_idx]
atoms_spec = []
c = np.unique(rrngs.comp.values)
for i in range(len(c)):
print(c[i])
range_element = rrngs[rrngs['comp']=='{}'.format(c[i])]
total, Count = atom_filter(Org_file,range_element)
name = c[i]
#name = i
total["spec"] = [name for j in range(len(total))]
atoms_spec.append(total)
Df_atom_spec = pd.concat(atoms_spec)
#############################
#check molecules:
print("MOLECULE CHECK")
molecule_check = np.array([len(c[i].split(" ")) for i in range(len(c))])
molecules = c[np.argwhere(molecule_check == 2)]
Df_lst = []
for mol in molecules:
spec_mol = mol[0].split(" ")
Df = Df_atom_spec.loc[Df_atom_spec['spec'] == mol[0]].copy()
Df["spec"] = [spec_mol[1]]*len(Df)
Df_atom_spec.loc[Df_atom_spec['spec'] == mol[0], ['spec'] ] = spec_mol[0]
Df_lst.append(Df)
Df_lst.append(Df_atom_spec)
Df_atom_spec = pd.concat(Df_lst)
SpecSemi = np.unique(Df_atom_spec.spec.values)
#check doubles
print(SpecSemi)
mol_doub_check = np.array([int(SpecSemi[j].split(":")[1]) for j in range(len(SpecSemi))])
mol_doub = SpecSemi[np.argwhere(mol_doub_check == 2)]
Df_lst = []
for mol in mol_doub:
spec_mol = mol[0].split(":")[0]+":1"
Df = Df_atom_spec.loc[Df_atom_spec['spec'] == mol[0]].copy()
Df["spec"] = [spec_mol]*len(Df)
Df_atom_spec.loc[Df_atom_spec['spec'] == mol[0], ['spec'] ] = spec_mol
Df_lst.append(Df)
Df_lst.append(Df_atom_spec)
Df_atom_spec = pd.concat(Df_lst)
SpecFinal = np.unique(Df_atom_spec.spec.values)
Df_spec_lst = []
for spec_ID in range(len(SpecFinal)):
print( SpecFinal[spec_ID])
Df = Df_atom_spec.loc[Df_atom_spec['spec'] == SpecFinal[spec_ID]].copy()
name = spec_ID
Df["spec"] = [name for j in range(len(Df))]
Df_spec_lst.append(Df)
Df_atom_spec = pd.concat(Df_spec_lst)
############################
x_wu=Df_atom_spec
sort_x = x_wu.sort_values(by=['z'])
## open hdf5 file
#hdf = pd.HDFStore("./file_{}_large_chunks.h5".format(files[file_idx].replace(".","_")))
output_path = self.params['output_path'] + "/Output_big_slices.h5"
hdf = h5py.File(output_path, "w")
G1 = hdf.create_group("Group_xyz_Da_spec")
G1.attrs["columns"] = ["x","y","z","Da","spec"]
G1.attrs["spec_name_order"] = list(SpecFinal)
##end
sublength_x= abs((max(sort_x['z'])-min(sort_x['z']))/self.params["n_big_slices"])
print(sublength_x)
start = min(sort_x['z'])
end = min(sort_x['z']) +sublength_x
for i in tqdm(range(self.params["n_big_slices"])):
#temp = sort_x.iloc[start:end]
print(start)
print(end)
temp = sort_x[sort_x['z'].between(start, end, inclusive=True)]
#temp.to_csv('B3_Hi_ent_cubes/{}.csv'.format(i), index=False)
#hdf.put("chunk_{}".format(i), temp, format = "table", data_columns= True)
##Put data into hdf5 file
G1.create_dataset("chunk_{}".format(i), data = temp.values)
##end
start += sublength_x
end += sublength_x
#print(end)
hdf.close()
def get_voxels(self):
"""
Parameters
----------
Returns
-------
Notes
-----
"""
filestrings = []
prefix = self.params['output_path']
size = self.params["voxel_size"]
for filename in self.chunk_files:
hdfr = h5py.File(filename, "r")
filestring = filename.replace("large", "small")
#filestring = os.path.join(prefix, filestring)
filestrings.append(filestring)
with h5py.File(filestring, "w") as hdfw:
group_r = hdfr.get("group_xyz_Da_spec")
group_keys = list(group_r.keys())
columns_r = list(list(group_r.attrs.values())[0])
group1 = hdfw.create_group("0")
prev_attri =list(list(group_r.attrs.values())[0])
prev_attri.append("vox_file")
group1.attrs["columns"] = prev_attri
group1.attrs["spec_name_order"] = list(list(group_r.attrs.values())[1])
name_sub_file = 0
step = 0
m=0
pbar = tqdm(group_keys, desc="Getting Voxels")
for key in pbar:
read_array = np.array(group_r.get(key))
s= pd.DataFrame(data = read_array, columns = columns_r)
x_min = round(min(s['x']))
x_max = round(max(s['x']))
y_min = round(min(s['y']))
y_max = round(max(s['y']))
z_min = round(min(s['z']))
z_max = round(max(s['z']))
p=[]
x=[]
for i in range(z_min, z_max, size):
cubic = s[s['z'].between(i, i+size, inclusive="both")]
for j in range(y_min, y_max, size):
p = cubic[cubic['y'].between(j, j+size, inclusive="both")]
for k in range(x_min, x_max, size):
x = p[p['x'].between(k, k+size, inclusive="both")]
if len(x['x'])>20:
#warnings.warn("I am running some code with hardcoded numbers. Really recheck what's up here!")
name ='cubes_z{}_x{}_y{}'.format(i,j,k).replace("-","m")
if step>99999:
step=0
m=m+1
group1 = hdfw.create_group("{}".format(100000*m))
x["vox_file"] = [name_sub_file for n_file in range(len(x))]
group1.create_dataset("{}".format(name_sub_file), data = x.values)
name_sub_file = name_sub_file+1
step=step+1
group1 = hdfw.get("0")
group1.attrs["total_voxels"]="{}".format(name_sub_file)
self.voxel_files = filestrings
def calculate_voxel_composition(self, fileindex=0, outfilename="output_vox_ratio_composition.h5"):
vox_ratio_files = []
for voxel_file in self.voxel_files:
small_chunk_file_name = self.voxel_files[fileindex]
hdf_sm_r = h5py.File(small_chunk_file_name, "r")
group = hdf_sm_r.get("0")
total_voxels =list(list(group.attrs.values())[2])
spec_lst_len = len(list(list(group.attrs.values())[1]))
items = list(hdf_sm_r.items())
item_lst = []
### CHECK THESE NUMBERS!
for item in range(len(items)):
item_lst.append([100000*(item), 100000*(item+1)])
item_lst = np.array(item_lst)
total_voxels_int =""
for number in total_voxels:
total_voxels_int = total_voxels_int + number
total_voxels_int = int(total_voxels_int)
files = [file_num for file_num in range(total_voxels_int)]
spec_names = np.arange(spec_lst_len)
dic_ratios = {}
for spec_name in spec_names:
dic_ratios["{}".format(spec_name)] = []
dic_ratios["Total_no"]=[]
dic_ratios["file_name"]=[]
dic_ratios["vox"] = []
ratios = []
f_count = 0
pbar = tqdm(files, desc="Calculating voxel composition")
for filename in pbar:
group = np.min(item_lst[[filename in range(j[0],j[1]) for j in item_lst]])
arr = np.array(hdf_sm_r.get("{}/{}".format(group,filename))[:,4])
N_x = len(arr)
for spec in (spec_names):
ratio = (len(np.argwhere(arr==spec)))/N_x
dic_ratios["{}".format(spec)].append(ratio)
dic_ratios["file_name"].append(filename)
dic_ratios["vox"].append(f_count)
dic_ratios["Total_no"].append(N_x)
f_count = f_count+1
df = pd.DataFrame.from_dict(dic_ratios)
output_path = os.path.join(self.params["output_path"], outfilename)
with h5py.File(output_path, "w") as hdfw:
hdfw.create_dataset("vox_ratios", data =df.drop("file_name", axis = 1).values )
hdfw.attrs["what"] = ["All the Vox ratios for a given APT smaple"]
hdfw.attrs["howto_Group_name"] = ["Group_sm_vox_xyz_Da_spec/"]
df_actual = df.drop("file_name", axis = 1)
df_columns = list(df_actual.columns)
hdfw.attrs["columns"]= df_columns
hdf_sm_r.close()
self.voxel_ratio_file = output_path
| 19,133 | 34.108257 | 151 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/compositionspace/segmentation.py | from compositionspace.datautils import DataPreparation
from compositionspace.models import get_model
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture
import json
import h5py
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from tqdm.notebook import tqdm
import os
from pyevtk.hl import pointsToVTK
from pyevtk.hl import gridToVTK#, pointsToVTKAsTIN
import yaml
import pyvista as pv
class CompositionClustering():
def __init__(self, inputfile):
if isinstance(inputfile, dict):
self.params = inputfile
else:
with open(inputfile, "r") as fin:
params = yaml.safe_load(fin)
self.params = params
self.version = "1.0.0"
def get_PCA_cumsum(self, vox_ratio_file, vox_file):
with h5py.File(vox_file,"r") as hdf:
group = hdf.get("Group_sm_vox_xyz_Da_spec")
group0 = hdf.get("0")
spec_lst = list(list(group0.attrs.values())[1])
with h5py.File(vox_ratio_file , "r") as hdfr:
ratios = np.array(hdfr.get("vox_ratios"))
ratios_columns = list(list(hdfr.attrs.values())[0])
group_name = list(list(hdfr.attrs.values())[1])
print(len(ratios))
print((ratios_columns))
ratios = pd.DataFrame(data=ratios, columns=ratios_columns)
X_train=ratios.drop(['Total_no','vox'], axis=1)
PCAObj = PCA(n_components = len(spec_lst))
PCATrans = PCAObj.fit_transform(X_train)
PCACumsumArr = np.cumsum(PCAObj.explained_variance_ratio_)
plt.figure(figsize=(5,5))
plt.plot( range(1,len(PCACumsumArr)+1,1),PCACumsumArr,"-o")
plt.ylabel("Explained Variance")
plt.xlabel('Dimensions')
plt.grid()
output_path = os.path.join(self.params["output_path"], "PCA_cumsum.png")
plt.savefig(output_path)
plt.show()
return PCACumsumArr, ratios
def get_bics_minimization(self, vox_ratio_file, vox_file):
with h5py.File(vox_file,"r") as hdf:
group = hdf.get("Group_sm_vox_xyz_Da_spec")
group0 = hdf.get("0")
spec_lst = list(list(group0.attrs.values())[2])
with h5py.File(vox_ratio_file , "r") as hdfr:
ratios = np.array(hdfr.get("vox_ratios"))
ratios_columns = list(list(hdfr.attrs.values())[0])
group_name = list(list(hdfr.attrs.values())[1])
ratios = pd.DataFrame(data=ratios, columns=ratios_columns)
gm_scores=[]
aics=[]
bics=[]
X_train=ratios.drop(['Total_no','vox'], axis=1)
n_clusters=list(range(1,self.params["bics_clusters"]))
pbar = tqdm(n_clusters, desc="Clustering")
for n_cluster in pbar:
gm = GaussianMixture(n_components=n_cluster,verbose=0)
gm.fit(X_train)
y_pred=gm.predict(X_train)
#gm_scores.append(homogeneity_score(y,y_pred))
aics.append(gm.aic(X_train))
bics.append(gm.bic(X_train))
output_path = os.path.join(self.params["output_path"], "bics_aics.png")
plt.plot(n_clusters, aics, "-o",label="AIC")
plt.plot(n_clusters, bics, "-o",label="BIC")
plt.legend()
plt.savefig(output_path)
plt.show()
return self.params["bics_clusters"], aics, bics
def calculate_centroid(self, data):
"""
Calculate centroid
Parameters
----------
data: pandas DataFrame or numpy array
Returns
-------
centroid
"""
if isinstance(data, pd.DataFrame):
length = len(data['x'])
sum_x = np.sum(data['x'])
sum_y = np.sum(data['y'])
sum_z = np.sum(data['z'])
return sum_x/length, sum_y/length, sum_z/length
else:
length = len(data[:,0])
sum_x = np.sum(data[:,0])
sum_y = np.sum(data[:,1])
sum_z = np.sum(data[:,2])
return sum_x/length, sum_y/length, sum_z/length
def get_voxel_centroid(self, vox_file, files_arr):
with h5py.File(vox_file, "r") as hdf:
items = list(hdf.items())
item_lst = []
for item in range(len(items)):
item_lst.append([100000*(item),100000*(item+1)])
item_lst = np.array(item_lst)
dic_centroids = {}
dic_centroids["x"]=[]
dic_centroids["y"]=[]
dic_centroids["z"] = []
dic_centroids["file_name"] = []
df_centroids = pd.DataFrame(columns=['x', 'y', 'z','filename'])
for filename in files_arr:
group = np.min(item_lst[[filename in range(j[0],j[1]) for j in item_lst]])
xyz_Da_spec_atoms = np.array(hdf.get("{}/{}".format(group, filename)))
x, y, z = self.calculate_centroid(xyz_Da_spec_atoms)
dic_centroids["x"].append(x)
dic_centroids["y"].append(y)
dic_centroids["z"].append(z)
dic_centroids["file_name"].append(filename)
return dic_centroids
def get_composition_cluster_files(self, vox_ratio_file, vox_file, n_components):
ml_params = self.params["ml_models"]
with h5py.File(vox_file,"r") as hdf:
group = hdf.get("Group_sm_vox_xyz_Da_spec")
group0 = hdf.get("0")
spec_lst = list(list(group0.attrs.values())[2])
with h5py.File(vox_ratio_file , "r") as hdfr:
ratios = np.array(hdfr.get("vox_ratios"))
ratios_columns = list(list(hdfr.attrs.values())[0])
group_name = list(list(hdfr.attrs.values())[1])
ratios = pd.DataFrame(data=ratios, columns=ratios_columns)
X_train=ratios.drop(['Total_no','vox'], axis=1)
gm = get_model(ml_params=ml_params)
gm.fit(X_train)
y_pred=gm.predict(X_train)
cluster_lst = []
for phase in range(n_components):
cluster_lst.append(np.argwhere(y_pred == phase).flatten())
df_lst = []
for cluster in cluster_lst:
df_lst.append(ratios.iloc[cluster])
#sorting
cluster_lst_sort = []
len_arr = np.array([len(x) for x in cluster_lst])
sorted_len_arr = np.sort(len_arr)
for length in sorted_len_arr:
cluster_lst_sort.append(cluster_lst[np.argwhere(len_arr == length)[0,0]])
#print([len(x) for x in cluster_lst_sort])
cluster_lst = cluster_lst_sort
return cluster_lst, ratios
def get_composition_clusters(self, vox_ratio_file, vox_file, outfile="vox_centroid_file.h5"):
voxel_centroid_output_file = []
n_components = self.params["n_phases"]
ml_params = self.params["ml_models"]
cluster_lst, ratios = self.get_composition_cluster_files(vox_ratio_file, vox_file, n_components)
plot_files = []
for phase in range(len(cluster_lst)):
cluster_files = []
cluster = cluster_lst[phase]
for voxel_id in cluster:
cluster_files.append(ratios['vox'][voxel_id])
plot_files.append(cluster_files)
plot_files_group = []
for cluster_files in plot_files:
plot_files_group.append([int(file_num) for file_num in cluster_files ])
with h5py.File(vox_file,"r") as hdf_sm_r:
hdf_sm_r = h5py.File(vox_file,"r")
group = hdf_sm_r.get("0")
#total_voxels =list(list(group.attrs.values())[0])
total_voxels =list(list(group.attrs.values())[2])
total_voxels_int =""
for number in total_voxels:
total_voxels_int = total_voxels_int + number
total_voxels_int = int(total_voxels_int)
hdf_sm_r.close()
plot_files_cl_All_group = [file_num for file_num in range(total_voxels_int)]
plot_files_group.append(plot_files_cl_All_group)
output_path = os.path.join(self.params["output_path"], outfile)
with h5py.File(output_path,"w") as hdfw:
for cluster_file_id in range(len(plot_files_group)):
G = hdfw.create_group(f"{cluster_file_id}")
G.attrs["what"] = ["Centroid of voxels"]
G.attrs["howto_Group_name"] = ["Group_sm_vox_xyz_Da_spec/"]
G.attrs["colomns"] = ["x","y","z","file_name"]
CentroidsDic = self.get_voxel_centroid(vox_file, plot_files_group[cluster_file_id])
G.create_dataset(f"{cluster_file_id}", data = pd.DataFrame.from_dict(CentroidsDic).values)
self.voxel_centroid_output_file = output_path
def generate_plots(self):
vtk_files = []
with h5py.File(self.voxel_centroid_output_file, "r") as hdfr:
groups =list(hdfr.keys())
for group in range(len(groups)-1):
phase_arr = np.array(hdfr.get(f"{group}/{group}"))
phase_columns = list(list(hdfr.get(f"{group}").attrs.values())[0])
phase_cent_df =pd.DataFrame(data=phase_arr, columns=phase_columns)
image = phase_cent_df.values
file_path = self.voxel_centroid_output_file + f"_{group}"
vtk_files.append(file_path + ".vtu")
x = np.ascontiguousarray(image[:,0])
y= np.ascontiguousarray(image[:,1])
z = np.ascontiguousarray(image[:,2])
label = np.ascontiguousarray( image[:,3])
pointsToVTK(file_path, x, y, z, data = {"label" : label} )
self.vtk_files = vtk_files
def plot3d(self, **kwargs):
self.generate_plots()
for file in self.vtk_files:
grid = pv.read(file)
grid.plot(**kwargs, jupyter_backend="panel")
| 10,121 | 35.541516 | 106 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/compositionspace/paraprobe_transcoder.py | # -*- coding: utf-8 -*-
"""
Reader for the APSuite6/IVAS4 *.APT file format
MK::GPLV3, 03/09/2020, Markus K\"uhbach, m.kuehbach@mpie.de
"""
import numpy as np
#https://www.python-kurs.eu/numpy_dtype.php
class APTFileBranches():
def __init__(self):
self.dict_kwnsect = { 1: 'tof', 2: 'pulse', 3: 'freq', 4: 'tElapsed', 5: 'erate', 6: 'tstage', 7: 'TargetErate',
8: 'TargetFlux', 9: 'pulseDelta', 10: 'Pres', 11: 'VAnodeMon', 12: 'Temp', 13: 'AmbTemp', 14: 'FractureGuard',
15: 'Vref', 16: 'Noise', 17: 'Uniformity', 18: 'xstage', 19: 'ystage', 20: 'zstage', 21: 'z', 22: 'tofc', 23: 'Mass', 24: 'tofb',
25: 'xs', 26: 'ys', 27: 'zs', 28: 'rTip', 29: 'zApex', 30: 'zSphereCorr', 31: 'XDet_mm', 32: 'YDet_mm', 33: 'Multiplicity',
34: 'Vap', 35: 'Detector Coordinates', 36: 'Position'}
self.dict_sectionid = { 'Failure': 0,
'tof': 1, 'pulse': 2, 'freq': 3, 'tElapsed': 4, 'erate': 5, 'tstage': 6, 'TargetErate': 7,
'TargetFlux': 8, 'pulseDelta': 9, 'Pres': 10, 'VAnodeMon': 11, 'Temp': 12, 'AmbTemp': 13, 'FractureGuard': 14,
'Vref': 15, 'Noise': 16, 'Uniformity': 17, 'xstage': 18, 'ystage': 19, 'zstage': 20, 'z': 21, 'tofc': 22, 'Mass': 23, 'tofb': 24,
'xs': 25, 'ys': 26, 'zs': 27, 'rTip': 28, 'zApex': 29, 'zSphereCorr': 30, 'XDet_mm': 31, 'YDet_mm': 32, 'Multiplicity': 33,
'Vap': 34, 'Detector Coordinates': 35, 'Position': 36 }
self.dict_iHeaderSize = { 1: 148, 2: 148, 3: 148, 4: 148, 5: 148, 6: 148, 7: 148,
8: 148, 9: 148, 10: 148, 11: 148, 12: 148, 13: 148, 14: 148,
15: 148, 16: 148, 17: 148, 18: 148, 19: 148, 20: 148, 21: 148, 22: 148, 23: 148, 24: 148,
25: 148, 26: 148, 27: 148, 28: 148, 29: 148, 30: 148, 31: 148, 32: 148, 33: 148,
34: 148, 35: 148, 36: 148+6*4 }
self.dict_iHeaderVersion = { 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2,
8: 2, 9: 2, 10: 2, 11: 2, 12: 2, 13: 2, 14: 2,
15: 2, 16: 2, 17: 2, 18: 2, 19: 2, 20: 2, 21: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2, 32: 2, 33: 2,
34: 2, 35: 2, 36: 2 }
self.dict_iSectionVersion = { 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1,
8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1,
15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: 1, 23: 1, 24: 1,
25: 1, 26: 1, 27: 1, 28: 1, 29: 1, 30: 1, 31: 1, 32: 1, 33: 1,
34: 1, 35: 1, 36: 1 }
self.dict_eRelationshipType = { 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1,
8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1,
15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: 1, 23: 1, 24: 1,
25: 1, 26: 1, 27: 1, 28: 1, 29: 1, 30: 1, 31: 1, 32: 1, 33: 1,
34: 1, 35: 1, 36: 1 }
self.dict_eRecordType = { 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1,
8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1,
15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: 1, 23: 1, 24: 1,
25: 1, 26: 1, 27: 1, 28: 1, 29: 1, 30: 1, 31: 1, 32: 1, 33: 1,
34: 1, 35: 1, 36: 1 }
self.dict_eRecordDataType = { 1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 2, 7: 3,
8: 3, 9: 1, 10: 3, 11: 3, 12: 3, 13: 3, 14: 2,
15: 3, 16: 3, 17: 3, 18: 1, 19: 1, 20: 1, 21: 1, 22: 3, 23: 3, 24: 3,
25: 3, 26: 3, 27: 3, 28: 3, 29: 3, 30: 3, 31: 3, 32: 3, 33: 1,
34: 3, 35: 3, 36: 3}
self.dict_iDataTypeSize = { 1: 32, 2: 32, 3: 32, 4: 32, 5: 32, 6: 16, 7: 32,
8: 32, 9: 16, 10: 32, 11: 32, 12: 32, 13: 32, 14: 16,
15: 32, 16: 32, 17: 32, 18: 32, 19: 32, 20: 32, 21: 64, 22: 32, 23: 32, 24: 32,
25: 32, 26: 32, 27: 32, 28: 32, 29: 32, 30: 32, 31: 32, 32: 32, 33: 32,
34: 32, 35: 32, 36: 32 }
self.dict_iRecordSize = { 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 2, 7: 4,
8: 4, 9: 2, 10: 4, 11: 4, 12: 4, 13: 4, 14: 2,
15: 4, 16: 4, 17: 4, 18: 4, 19: 4, 20: 4, 21: 8, 22: 4, 23: 4, 24: 4,
25: 4, 26: 4, 27: 4, 28: 4, 29: 4, 30: 4, 31: 4, 32: 4, 33: 4,
34: 4, 35: 8, 36: 12 }
self.dict_iElements = dict()
for i in range(1,len(self.dict_kwnsect)+1):
self.dict_iElements[i] = int(self.dict_iRecordSize[i] / (self.dict_iDataTypeSize[i]/8))
class APTFileHeader():
def __init__(self, fn, *args, **kwargs):
self.known_sections = APTFileBranches()
self.healthy = True
self.aptfn = fn
self.cSignature = ""
self.iHeaderSize = 0
self.iHeaderVersion = 0
self.wcFilename = ""
self.ftCreationTime = 0
self.llIonCount = 0
def read_cameca_apt_file_header(self, fid):
ht = np.dtype([('cSignature', np.int8, (4,)),
('iHeaderSize', np.int32),
('iHeaderVersion', np.int32),
('wcFilename', np.uint16, 256),
('ftCreationTime', np.uint64),
('llIonCount', np.uint64)])
tmp = np.fromfile( fid , ht, count = 1 ) #unicode_, 256),
str_parse = ""
for i in tmp['cSignature'].flatten():
if chr(i) != '\x00':
str_parse = str_parse + chr(i)
self.cSignature = str_parse
if self.cSignature != 'APT':
# print('The file header indicates the file is not a valid *.APT file!')
self.healthy = False
return None
self.iHeaderSize = tmp['iHeaderSize'].flatten()[0]
if self.iHeaderSize != 540:
# print('The iHeaderSize is unexpectedly different!')
self.healthy = False
return None
self.iHeaderVersion = tmp['iHeaderVersion'].flatten()[0]
if self.iHeaderVersion != 2:
# print('The iHeaderVersion is not 2 but only 2 is supported by this implementation!')
self.healthy = False
return None
###MK::parsing UTF-16 works currently only for the lower bit, for the
#APTV2 draft specification this is not a problem because currently the internal
##section identifiers use only UTF-8 characters
str_parse = ""
for i in tmp['wcFilename'].flatten():
if chr(i) != '\x00':
str_parse = str_parse + chr(i)
self.wcFilename = str_parse
self.ftCreationTime = tmp['ftCreationTime'].flatten()[0]
self.llIonCount = tmp['llIonCount'].flatten()[0]
if self.llIonCount < 1:
# print('llIonCount < 1 means there are no ions in the dataset which is weird!')
self.healthy = False
return None
# print('*.APT file ' + self.aptfn + ' successfully read ' + str(self.llIonCount) + ' ions')
tmp = []
str_parse = ""
def print(self):
print('APTFileHeader')
print('healthy ' + str(self.healthy))
print('aptfn ' + self.aptfn)
print('cSignature ' + self.cSignature)
print('iHeaderSize ' + str(self.iHeaderSize))
print('iHeaderVersion ' + str(self.iHeaderVersion))
print('wcFilename ' + self.wcFilename)
print('ftCreationTime ' + str(self.ftCreationTime))
print('llIonCount ' + str(self.llIonCount))
#def get_offset(self):
# return 540
class APTSectionHeader():
def __init__(self, *args, **kwargs): #fn
self.healthy = True
self.known_sections = APTFileBranches()
#self.aptfn = fn
self.cSignature = ""
self.iHeaderSize = 0
self.iHeaderVersion = 0
self.wcSectionType = ""
self.iSectionVersion = 0
self.eRelationshipType = 0
self.eRecordType = 0
self.eRecordDataType = 0
self.iDataTypeSize = 0
self.iRecordSize = 0
self.wcDataUnit = ""
self.llRecordCount = 0
self.llByteCount = 0
def read_cameca_apt_section_header_auto(self, fid ):
ht = np.dtype([('cSignature', np.int8, (4,)),
('iHeaderSize', np.int32),
('iHeaderVersion', np.int32),
('wcSectionType', np.uint16, 32),
('iSectionVersion', np.int32),
('eRelationshipType', np.uint32),
('eRecordType', np.uint32),
('eRecordDataType', np.uint32),
('iDataTypeSize', np.int32),
('iRecordSize', np.int32),
('wcDataUnit', np.uint16, 16),
('llRecordCount', np.uint64),
('llByteCount', np.uint64)])
#skip the APTFileHeader and start reading the first section
#f = open( self.aptfn, "rb" )
#f.seek(offset, os.SEEK_SET) #seek set is relative to beginning of the file
tmp = np.fromfile( fid , ht, count = 1 ) #unicode_, 256),
str_parse = ""
for i in tmp['cSignature'].flatten():
if chr(i) != '\x00':
str_parse = str_parse + chr(i)
self.cSignature = str_parse
if self.cSignature != 'SEC':
# print('The section header is faulty, it misses the required SEC keyword!')
self.healthy = False
return None
###MK::parsing UTF-16 works currently only for the lower bit, for the
#APTV2 draft specification this is not a problem because currently the internal
##section identifiers use only UTF-8 characters
str_parse = ""
for i in tmp['wcSectionType'].flatten():
if chr(i) != '\x00':
str_parse = str_parse + chr(i)
self.wcSectionType = str_parse
# print('__' + self.wcSectionType + '__')
sectionid = self.known_sections.dict_sectionid.get(self.wcSectionType)
if sectionid == None:
# print('Encountering an unknown section!')
self.healthy = False
return None
###MK::here HeaderSize and Version tested after reading wcSectionType
self.iHeaderSize = tmp['iHeaderSize'].flatten()[0]
if self.iHeaderSize != self.known_sections.dict_iHeaderSize[sectionid]:
# print('iHeaderSize is ' + str(self.iHeaderSize) + ' this does not match the expectation!')
self.healthy = False
return None
self.iHeaderVersion = tmp['iHeaderVersion'].flatten()[0]
if self.iHeaderVersion != self.known_sections.dict_iHeaderVersion[sectionid]:
# print('iHeaderVersion is ' + str(self.iHeaderVersion) + ' this does not match the expectation!')
self.healthy = False
return None
self.iSectionVersion = tmp['iSectionVersion'].flatten()[0]
if self.iSectionVersion != self.known_sections.dict_iSectionVersion[sectionid]:
# print('iSectionVersion is ' + str(self.iSectionVersion) + ' this does not match the expectation!')
self.healthy = False
return None
self.eRelationshipType = tmp['eRelationshipType'].flatten()[0]
if self.eRelationshipType != self.known_sections.dict_eRelationshipType[sectionid]:
# print('eRelationshipType is ' + str(self.eRelationshipType) + ' this does not match the expectation!')
self.healthy = False
return None
self.eRecordType = tmp['eRecordType'].flatten()[0]
if self.eRecordType != self.known_sections.dict_eRecordType[sectionid]:
# print('eRecordType is ' + str(self.eRecordType) + ' this does not match the expectation!')
self.healthy = False
return None
self.eRecordDataType = tmp['eRecordDataType'].flatten()[0]
if self.eRecordDataType != self.known_sections.dict_eRecordDataType[sectionid]:
# print('eRecordDataType is ' + str(self.eRecordDataType) + ' this does not match the expectation!')
self.healthy = False
return None
self.iDataTypeSize = tmp['iDataTypeSize'].flatten()[0]
if self.iDataTypeSize != self.known_sections.dict_iDataTypeSize[sectionid]:
# print('iDataTypeSize is ' + str(self.iDataTypeSize) + ' this does not match the expectation!')
self.healthy = False
return None
self.iRecordSize = tmp['iRecordSize'].flatten()[0]
if self.iRecordSize != self.known_sections.dict_iRecordSize[sectionid]:
# print('iRecordSize is ' + str(self.iRecordSize) + ' this does not match the expectation!')
self.healthy = False
return None
str_parse = ""
for i in tmp['wcDataUnit'].flatten():
if chr(i) != '\x00':
str_parse = str_parse + chr(i)
self.wcDataUnit = str_parse
self.llRecordCount = tmp['llRecordCount'].flatten()[0]
##MK::test implementatio remaining
self.llByteCount = tmp['llByteCount'].flatten()[0]
# print('*.APT file successfully read ' + str(sectionid) + ' section header')
tmp = []
str_parse = ""
##MK::implement checks
def print(self):
print('APTSectionHeader')
#print(self.aptfn)
print('cSignature ' + self.cSignature)
print('iHeaderSize ' + str(self.iHeaderSize))
print('iHeaderVersion ' + str(self.iHeaderVersion))
print('wcSectionType ' + self.wcSectionType)
print('iSectionVersion ' + str(self.iSectionVersion))
print('eRelationshipType ' + str(self.eRelationshipType))
print('eRecordType ' + str(self.eRecordType))
print('eRecordDataType ' + str(self.eRecordDataType))
print('iDataTypeSize ' + str(self.iDataTypeSize))
print('iRecordSize ' + str(self.iRecordSize))
print('wcDataUnit ' + self.wcDataUnit)
print('llRecordCount ' + str(self.llRecordCount))
print('llByteCount ' + str(self.llByteCount))
#def get_offset(self):
# if self.wcSectionType == "Position":
# return 148 #(148+6*4)
# else:
# return 148
class paraprobe_transcoder():
def __init__(self, aptfn, *args, **kwargs):
self.fn = aptfn
self.filestream = None
self.header = APTFileHeader( self.fn )
self.idtfyd_sections = []
#self.mass_sect = APTSectionHeader( self.fn )
#self.xyz_sect = APTSectionHeader( self.fn )
def read_cameca_apt_section_data_fixed_onetoone(self, fid, sect ):
#which datatype to use, interpret from the section header
ni = int(self.header.llIonCount)
# print('ni ' + str(ni))
nj = int(int(sect.iRecordSize) / (int(sect.iDataTypeSize)/int(8)))
# print('nj ' + str(nj))
dtyp = None #eRecordDataType == 3 and iDataTypeSize == 32
if sect.eRecordDataType == 1:
if sect.iDataTypeSize/8 == 2:
dtyp = np.int16
elif sect.iDataTypeSize/8 == 4:
dtyp = np.int32
elif sect.iDataTypeSize/8 == 8:
dtyp = np.int64
else:
# print('Unknown datatype selection!')
return None
elif sect.eRecordDataType == 2:
###MK::currently only one type used uint16
dtyp = np.uint16
elif sect.eRecordDataType == 3:
dtyp = np.float32
else:
# print('Unknown datatype selection!')
return None
# print(dtyp)
return np.reshape(np.fromfile(fid, dtyp, count = int(ni*nj)), (int(ni), int(nj)), order='C')
def read_cameca_apt(self):
offset = 0
# print('Offset is ' + str(offset))
self.filestream = open(self.fn, "rb")
self.header.read_cameca_apt_file_header( self.filestream )
# self.header.print()
#'Failure' is sectionid so one more keys, currently 36+1
for sectionid in range(1,len(self.header.known_sections.dict_sectionid)):
sect = APTSectionHeader()
sect.read_cameca_apt_section_header_auto( self.filestream )
# sect.print()
if sect.healthy == True:
#read data of the section
#large switch #####
#what type of section is this?
sectionid = self.header.known_sections.dict_sectionid.get(sect.wcSectionType, 0) #0 is for 'Failure'#
# print('sectionid ' + str(sectionid))
self.idtfyd_sections.append( sect )
###MK implement more elegantly using switchers
#MK::here we use the fact that the file pointer gets advanced implicitly so after reading the sect it points to the
#first byte of the data section corresponding to the data associated with this section
#this works because *.APT files by definition have the data following immediately after each section headers
#with ##MK::at least as of 2020/03/20 no gaps in the binary structure
if sectionid == 1:
self.tof = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 2:
self.pulse = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 3:
self.freq = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 4:
self.tElapsed = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 5:
self.erate = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 6:
self.tstage = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 7:
self.TargetErate = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 8:
self.TargetFlux = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 9:
self.pulseDelta = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 10:
self.Pres = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 11:
self.VAnodeMon = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 12:
self.Temp = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 13:
self.AmbTemp = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 14:
self.FractureGuard = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 15:
self.Vref = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 16:
self.Noise = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 17:
self.Uniformity = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 18:
self.xstage = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 19:
self.ystage = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 20:
self.zstage = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 21:
self.z = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 22:
self.tofc = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 23:
self.Mass = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 24:
self.tofb = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 25:
self.xs = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 26:
self.ys = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 27:
self.zs = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 28:
self.rTip = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 29:
self.zApex = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 30:
self.zSphereCorr = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 31:
self.XDet_mm = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 32:
self.YDet_mm = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 33:
self.Multiplicity = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 34:
self.Vap = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 35:
self.DetectorCoordinates = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
elif sectionid == 36:
#position has preceeding 6 float32 for bounds
self.tipbox = np.reshape(np.fromfile( self.filestream, np.float32, count = int(2*3) ), (int(2), int(3) ), order='C')
self.Position = self.read_cameca_apt_section_data_fixed_onetoone( self.filestream, sect )
else:
# print('Section is faulty, which is why we stop reading!')
self.filestream.close()
break
else:
# print('Reached the end of the file!')
self.filestream.close()
break
#done with all I/O on the *.APT file so close it
self.filestream.close()
print('Done, reading *.APT file')
# self.header.read_cameca_apt_file_header()
# offset = offset + self.header.get_offset()
# print('Offset is ' + str(offset))
# self.mass_sect.read_cameca_apt_section_header( offset )
# offset = offset + self.mass_sect.get_offset()
# print('Offset is ' + str(offset))
#
# #load mass
# f = open(self.fn, "rb")
# f.seek(offset, os.SEEK_SET)
# self.mq = []
# self.mq = np.fromfile(f, np.float32, count = int(self.header.llIonCount)*1)
# offset = offset + int(self.header.llIonCount) * 1 * 4
# print('Offset is ' + str(offset))
# print('Mass data read successfully!')
# print(np.shape(self.mq))
#
# self.xyz_sect.read_cameca_apt_section_header( offset )
# offset = offset + self.mass_sect.get_offset()
# print('Offset is ' + str(offset))
# #read x,y,z limits
# f = open(self.fn, "rb")
# f.seek(offset, os.SEEK_SET)
# self.tipbox = np.fromfile(f, np.float32, count = 6)
# offset = offset + 6*4
# print('Offset is ' + str(offset))
# #read x,y,z coordinates
# f = open(self.fn, "rb")
# f.seek(offset, os.SEEK_SET)
# self.xyz = []
# self.xyz = np.reshape(np.fromfile(f, np.float32, count = int(self.header.llIonCount)*3),
# (int(self.header.llIonCount),3), order='C')
# print('Position coordinates read successfully')
# print(np.shape(self.xyz))
# offset = offset + int(self.header.llIonCount)*3*4
# print('Offset is ' + str(offset))
# def write(self, file_name: str):
# #file_path = Path(file_name)
# #file_path.write_text(self.to_utf8())
# with open(file_name,'w') as f:
# f.write(self.xml)
#example how to read a APT file from APSuite6/IVAS4
#minimal example with default output currently implemented after reconstruction wizard from APSuite6/IVAS4
#Tk().withdraw()
#fn = askopenfilename()
#maximal example with all branches of an *.APT file exported, be careful --- in APSuite6/IVAS4 this is super slow (40min for 8mio atoms! below)
#fn = 'Z:/GITHUB/MPIE_APTFIM_TOOLBOX/paraprobe/code/481f2262-b0d5-4dfb-91b7-fb31b57cd1b0.apt'
#apt = paraprobe_transcoder( fn )
#apt.read_cameca_apt()
#np.shape(apt.Mass)
#verbose
#apt.header.print()
#apt.mass_sect.print()
#apt.xyz_sect.print()
| 26,191 | 50.66075 | 161 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/compositionspace/models.py | from ast import Raise
from sklearn.ensemble import RandomForestClassifier
from sklearn.mixture import GaussianMixture
from sklearn.cluster import DBSCAN
def get_model(ml_params):
"""
get machine learning model for clustering
"""
model_name = ml_params["name"]
model_params = ml_params[model_name]
_available_models = ["randomforest"]
if(model_name == "randomforest"):
model = RandomForestClassifier(max_depth=model_params["max_depth"],
n_estimators=model_params["n_estimators"])
return model
if(model_name == "GaussianMixture"):
model = GaussianMixture(n_components = model_params["n_components"], max_iter=model_params["max_iter"],verbose=model_params["verbose"])
return model
if(model_name == "DBScan"):
model = DBSCAN(eps= self.params["eps"], min_samples= model_params["min_samples"])
return model
else:
raise ValueError(f"Now implementation is found for the model {model_name}, choose from: {_available_models}")
| 1,067 | 37.142857 | 144 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/compositionspace/__init__.py | 0 | 0 | 0 | py | |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/compositionspace/postprocessing.py | import pandas as pd
import os
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import numpy as np
import h5py
from sklearn.cluster import DBSCAN
from pyevtk.hl import pointsToVTK
from pyevtk.hl import gridToVTK
import yaml
class DataPostprocess:
def __init__(self, inputfile):
if isinstance(inputfile, dict):
self.params = inputfile
else:
with open(inputfile, "r") as fin:
params = yaml.safe_load(fin)
self.params = params
self.version = "1.0.0"
def get_post_centroids(self, voxel_centroid_phases_files, cluster_id):
"""
Reads the voxel centroids for a phase
Parameters
----------
voxel_centroid_phases_files : str, Voxel cetroids corresponding to each phase
cluster_id: int, phase id in Voxel_centroid_phases_files
Returns
-------
pandas dataframe for voxel cetroids(x,y,z, voxelId), pandas dataframe for voxel cetroids(x,y,z),
list of coloumn names (x,y,z, voxelId)
Notes
-----
input is taken from composition space based segmentation of phases.
"""
with h5py.File(voxel_centroid_phases_files , "r") as hdfr:
group = cluster_id
Phase_arr = np.array(hdfr.get(f"{group}/{group}"))
Phase_columns = list(list(hdfr.get(f"{group}").attrs.values())[0])
Phase_cent_df =pd.DataFrame(data=Phase_arr, columns=Phase_columns)
Df_centroids = Phase_cent_df.copy()
Df_centroids_no_files = Df_centroids.drop(['file_name'] , axis=1)
files = Df_centroids['file_name']
return Df_centroids_no_files, Df_centroids, Phase_columns
def DBSCAN_clustering(self, voxel_centroid_phases_files, cluster_id,
plot= False, plot3d = False, save =False):
"""
Get individual clusters or precipitates corresponding to each phase/ chemical domain.
DBSCAN is applied on the centroids of the voxels helping to remove noisy voxels around clusters.
Parameters
----------
cluster_id: int,id of the phase/chemical domain in Output_voxel_cetroids_phases.h5
eps: float,epsilon is a hyperparameter for DBSCAN.The maximum distance between two samples for one to be
considered as in the neighborhood of the other. This is not a maximum bound on the distances of
points within a cluster.
min_smaples: int, min_smaples is a hyperparameter for DBSCAN. The number of samples (or total weight) in a
neighborhood for a point to be considered as a core point. This includes the point itself.
plot: boolean, if true plots the histogram of the cluster for found by the DBSCAN algorithm.
plot3d: boolean, if true plots voxel centroids in corresponding to each precipitate and outputs a .vtu file .
save: boolean, saves a .h5 file corresponding to a cluster_id containing centroids for each precipitate.
Returns
-------
Voxel centroids corresponding to each precipitate.
Notes
-----
input is taken from composition space based segmentation of phases.
"""
eps = self.params["ml_models"]["DBScan"]["eps"]
min_samples = self.params["ml_models"]["DBScan"]["min_samples"]
Df_centroids_no_files, Df_centroids, Phase_columns = self.get_post_centroids( voxel_centroid_phases_files ,cluster_id)
db = DBSCAN(eps=eps, min_samples= min_samples).fit(Df_centroids_no_files.values) #eps=5., min_samples= 35
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
if plot == True:
plt.hist(labels,bins = 100);
cluster_combine_lst = []
for i in np.unique(labels):
if i !=-1:
cl_idx =np.argwhere(labels==i).flatten()
cl_cent=Df_centroids_no_files.iloc[cl_idx]
cl_cent["ID"] = [i]*len(cl_cent)
cluster_combine_lst.append(cl_cent)
if plot3d == True:
OutFile = os.path.join(self.params["output_path"], f"Output_DBSCAN_segmentation_phase{cluster_id}")
Df_comb = pd.concat(cluster_combine_lst)
image = Df_comb.values
x = np.ascontiguousarray(image[:,0])
y= np.ascontiguousarray(image[:,1])
z = np.ascontiguousarray(image[:,2])
label = np.ascontiguousarray( image[:,3])
pointsToVTK(OutFile,x,y,z, data = {"label" : label} )
if save == True:
OutFile = os.path.join(self.params["output_path"], f"Output_DBSCAN_segmentation_phase{cluster_id}")
with h5py.File(OutFile, "w") as hdfw:
G = hdfw.create_group(f"{cluster_id}")
G.attrs["columns"] = Phase_columns
for i in tqdm(np.unique(labels)):
if i !=-1:
cl_idx =np.argwhere(labels==i).flatten()
cl_cent=Df_centroids.iloc[cl_idx]
G.create_dataset("{}".format(i), data = cl_cent.values)
| 5,481 | 38.157143 | 126 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/tests/test_read_files.py | import pytest
import numpy as np
import os
import sys
from compositionspace.datautils import DataPreparation
def test_file_rrng():
data = DataPreparation("tests/experiment_params.yaml")
datarrng = data.get_rrng("tests/data/R31_06365-v02.rrng")
assert datarrng[0]["name"].values[0] == "C"
def test_file_pos():
data = DataPreparation("tests/experiment_params.yaml")
datapos = data.get_pos("tests/data/R31_06365-v02.pos")
assert np.isclose(datapos[0][0]+5.3784895, 0)
def test_file_df():
data = DataPreparation("tests/experiment_params.yaml")
data = data.get_apt_dataframe()
assert np.isclose(data[0][0]["x"].values[0]+5.3784895, 0)
assert data[1][0] == 'R31_06365-v02.pos'
assert data[2]["name"].values[0] == "C"
assert np.isclose(data[3]["lower"].values[0]-5.974, 0)
def test_chunkify():
data = DataPreparation("tests/experiment_params.yaml")
data.get_big_slices()
assert os.path.exists(data.chunk_files[0]) == True
def test_voxelise():
data = DataPreparation("tests/experiment_params.yaml")
data.get_big_slices()
data.get_voxels()
data.calculate_voxel_composition()
assert os.path.exists(data.voxel_ratio_file) == True
| 1,213 | 31.810811 | 61 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
import shutil
import glob
sys.path.insert(0, os.path.abspath('../../compositionspace/'))
def skip(app, what, name, obj, would_skip, options):
if name in ( '__init__',):
return False
return would_skip
def setup(app):
app.connect('autodoc-skip-member', skip)
if os.path.exists("example"):
shutil.rmtree("example")
shutil.copytree("../../example", "example")
project = 'compositionspace'
copyright = '2022, Alaukik Saxena, Sarath Menon'
author = 'Alaukik Saxena, Sarath Menon'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'm2r2',
'sphinx_markdown_tables',
'nbsphinx',
]
html_theme = 'furo'
html_theme_options = {
#'logo_only' : True,
#'canonical_url' : 'https://calphy.readthedocs.io/',
}
html_extra_path = ['../_static' ]
source_suffix = ['.rst', '.md']
exclude_patterns = [] | 1,349 | 23.545455 | 83 | py |
fuzzyJoiner | fuzzyJoiner-master/build_model.py | from random import shuffle
import pickle
import numpy as np
# import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_PRECOMPUTED_SPLIT = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print(str("failed to find embedding for:" + word).encode('utf-8'))
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r', encoding='utf8')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2, val_split = 0.2, base_file=None):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_test_samples = int(test_split * len(ents))
num_validation_samples = int(val_split * len(ents))
if USE_PRECOMPUTED_SPLIT:
train = pickle.load(open((base_file + '.train'), "rb" ))
test = pickle.load(open((base_file + '.test'), "rb" ))
validation = pickle.load(open((base_file + '.validation'), "rb" ))
else:
train = ents[:-(num_validation_samples + num_test_samples)]
test = ents[-num_test_samples:]
validation = ents[-(num_validation_samples + num_test_samples):-num_test_samples]
pickle.dump(train, open(base_file + '.train', 'wb'))
pickle.dump(test, open(base_file + '.test', 'wb'))
pickle.dump(validation, open(base_file + '.validation', 'wb'))
return train, test, validation
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = K.square(y_pred[:,0,0])
a_n_distance = K.square(y_pred[:,1,0])
p_n_distance = K.square(y_pred[:,2,0])
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_semi_hard_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
triplets = {}
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
expected_text = set(entity2same[key])
expected_ids = [entity2unique[i] for i in expected_text]
for positive in expected_text:
k = entity2unique[positive]
nearest = t.get_nns_by_vector(predictions[k], NNlen)
dist_k = t.get_distance(index, k)
semi_hards = []
for n in nearest:
if n == index or n in expected_ids or n == k:
continue
n_dist = t.get_distance(index, n)
if n_dist > dist_k:
semi_hards.append(unique_text[n])
# shuffle(semi_hards)
# semi_hards = semi_hards[0:20]
for i in semi_hards:
triplets['anchor'].append(key)
triplets['positive'].append(unique_text[k])
triplets['negative'].append(i)
return triplets
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
parser.add_argument('--use_precomputed_split', action='store_true', help='load precomputed split for test and validation data')
parser.add_argument('--data_path', type=str, help='location to store/load data from')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
USE_PRECOMPUTED_SPLIT = args.use_precomputed_split
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test, validation = split(entities, test_split = .20, val_split = 0.20 , base_file=args.data_path)
print("TRAIN")
print(str(train).encode('utf-8'))
print(len(train))
print("TEST")
print(str(test).encode('utf-8'))
print(len(test))
print("validation")
print(str(validation).encode('utf-8'))
print(len(validation))
#pickle.dump(test, open(filepath + '.test_data.pickle', 'wb'))
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
entity2same_validation = generate_names(validation, people)
print(str(entity2same_train).encode('utf-8'))
print(str(entity2same_test).encode('utf-8'))
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
unique_text_validation, entity2unique_validation = build_unique_entities(entity2same_validation)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
if not USE_PRECOMPUTED_SPLIT:
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
tokenizer.fit_on_texts(unique_text + unique_text_test + unique_text_validation)
pickle.dump( tokenizer, open(args.data_path + '.tokenizer' , "wb" ))
else:
tokenizer = pickle.load(open(args.data_path + '.tokenizer', 'rb'))
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
sequences_validation = tokenizer.texts_to_sequences(unique_text_validation)
sequences_validation = pad_sequences(sequences_validation, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
triplets = generate_triplets_from_ANN(embedder_model, sequences, entity2unique, entity2same_train, unique_text, True)
print(len(triplets['anchor']))
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
validation_data, val_match_stats = generate_triplets_from_ANN(embedder_model, sequences_validation, entity2unique_validation, entity2same_validation, unique_text_validation, False)
validation_seq = get_sequences(validation_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, train_match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
Y_val = np.random.randint(2, size=(1,2,len(validation_data['anchor']))).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=2, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_data=([validation_seq['anchor'], validation_seq['positive'], validation_seq['negative']],Y_val))
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 25,195 | 37.118003 | 249 | py |
fuzzyJoiner | fuzzyJoiner-master/preloaded_runner.py | import pickle
import numpy as np
import tensorflow as tf
import random as random
import json
from keras import backend as K
#from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Input, Lambda, GRU
from keras.layers import Embedding
from keras.models import Model
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print(str("failed to find embedding for:" + word).encode('utf-8'))
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def build_model_from_weights(weights_file, embbeding_dimensions):
embedder = Embedding(embbeding_dimensions, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=False)
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, 2):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
try:
model.load_weights(weights_file)
except ValueError as e:
full = str(e)
#https://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
new_emb = [int(num) for num in full.replace('.', ' ').split() if num.isdigit()]
return build_model_from_weights(weights_file, new_emb[2])
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
try:
model.load_weights(weights_file)
except ValueError as e:
full = str(e)
#https://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
new_emb = [int(num) for num in full.replace('.', ' ').split() if num.isdigit()]
return build_model_from_weights(weights_file, new_emb[2])
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
# First get the arguments
output_file_name_for_hpo = "val_dict_list.json"
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: triplet-loss, improved-loss, angular-loss, adapted-loss')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
parser.add_argument('--tokenizer', type=str, help='location of tokenizer file')
parser.add_argument('--previous_test', type=str, help='use previous test data')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'adapted-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
people = 'people' in args.entity_type
# Load test data if specified
if args.previous_test and args.previous_test.lower() in ("yes", "true", "t", "1"):
test = pickle.load(open(args.input, 'rb'))
else:
test = read_entities(args.input)
print("TEST")
print(str(test).encode('utf-8'))
#encode test data for annoy
entity2same_test = generate_names(test, people, limit_pairs=True)
print(str(entity2same_test).encode('utf-8'))
#load tokenizer
tokenizer = pickle.load(open(args.tokenizer, 'rb'))
#Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("test text len:" + str(len(unique_text_test)))
#use tokenizer to convert to sequences
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build model so we can load weights into it
word_index = tokenizer.word_index
num_words = len(word_index) + 1
model, test_positive_model, test_negative_model, inter_model = build_model_from_weights(args.model, num_words)
current_model = inter_model
# print some statistics
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 17,465 | 35.848101 | 139 | py |
fuzzyJoiner | fuzzyJoiner-master/parse_additive.py | 0 | 0 | 0 | py | |
fuzzyJoiner | fuzzyJoiner-master/Levenstien_Rule_Based.py | from sys import argv
import string
import Levenshtein
import statistics
from names_cleanser import NameDataCleanser, CompanyDataCleanser
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def load_buckets(entity2same):
#used https://stackoverflow.com/questions/34860982/replace-the-punctuation-with-whitespace
translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))
bucket_dict = {}
for item in entity2same:
for entity in entity2same[item] + [item]:
entity_no_punct = entity.translate(translator).lower()
split_entity = [item for item in entity_no_punct.split(' ') if item]
for word in split_entity:
if word not in bucket_dict:
bucket_dict[word] = []
bucket_dict[word].append(entity)
return bucket_dict
def get_stats(entity2same, bucket_dict):
def get_closest(items, key, number_to_get):
low_key = key.lower()
closest = [(x, Levenshtein.distance(low_key, x.lower())) for x in items]
closest = list(set(closest))
# print('key')
# # print(key.encode('utf-8'))
# print('unsorted')
# # print([(thing[0].encode('utf-8'), thing[1]) for thing in closest])
# print('sorted')
closest = sorted(closest, key=lambda a: a[1])
# print([(thing[0].encode('utf-8'), thing[1]) for thing in closest])
# print('dist removed')
closest = [item[0] for item in closest]
# print([thing.encode('utf-8') for thing in closest])
# print([thing.encode('utf-8') for thing in closest[:number_to_get]])
return closest[:number_to_get]
# predictions = model.predict(sequences)
# t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
# t.set_seed(123)
# for i in range(len(predictions)):
# # print(predictions[i])
# v = predictions[i]
# t.add_item(i, v)
# t.build(100) # 100 trees
match = 0
no_match = 0
lev_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
NNlen = 20
translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))
print_num = 0
skipped = 0
for key in entity2same:
# if not key.translate(translator).strip():
# skipped += 1
# continue
nearest = []
for word in key.translate(translator).lower().split():
if not word:
continue
nearest = nearest + bucket_dict[word]
#if key == "Abu al - Qasim Muhammad ibn 'Abd Allah ibn 'Abd al - Muttalib ibn Hashim":
#print(nearest)
if len(nearest) > NNlen:
nearest = get_closest(nearest, key, NNlen)
#if key == "Abu al - Qasim Muhammad ibn 'Abd Allah ibn 'Abd al - Muttalib ibn Hashim":
#print(nearest)
nearest_text = set(nearest)
expected_text = set(entity2same[key])
if print_num % 1000 == 0:
print(print_num)
print(key.encode('utf-8'))
print([item.encode('utf-8') for item in nearest])
print_num += 1
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
if len(nearest_text) == 0:
skipped += 1
continue
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), len(nearest_text)) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = Levenshtein.distance(key.lower(), j.lower())
pos_distances.append(dist_pos)
dist_neg = Levenshtein.distance(key.lower(), i.lower())
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
lev_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = Levenshtein.distance(key.lower(), i.lower())
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = Levenshtein.distance(key.lower(), j.lower())
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = Levenshtein.distance(key.lower(), p.lower())
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
#print(len(expected_text))
#print(len(nearest_text))
if min(len(expected_text),len(nearest_text)) == 0:
print(nearest)
print(expected_text)
print(nearest_text)
print('key' + str(key))
for word in key.translate(translator).lower().split():
if not word:
continue
print(bucket_dict[word])
print(word)
closest_positive_counts.append(closest_pos_count / min(len(expected_text),len(nearest_text)))
# for i in negatives:
# for j in expected_text:
# triplets['anchor'].append(key)
# triplets['positive'].append(j)
# triplets['negative'].append(i)
print('bad entities' + str(skipped))
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(lev_accuracy / total))
print("Precision at 1: " + str(precise / (len(entity2same) - skipped)))
# obj = {}
# obj['accuracy'] = lev_accuracy / total
# obj['steps'] = 1
# with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
# json.dump(obj, out)
# if test:
return match/(match + no_match)
# else:
# return triplets, match/(match + no_match)
input_file = argv[1]
people = True if argv[2][0] is 'p' else False
print(argv)
print(people)
entities = read_entities(input_file)
entity2same = generate_names(entities, people)
bucket_dict = load_buckets(entity2same)
# for key in bucket_dict:
# print('key {} value {}'.format(key, bucket_dict[key]).encode('utf-8'))
print(get_stats(entity2same, bucket_dict))
| 8,910 | 37.409483 | 120 | py |
fuzzyJoiner | fuzzyJoiner-master/names_cleanser.py | import re
import argparse
import Levenshtein
from nltk import bigrams
from os import listdir
from os.path import isfile, join
from difflib import SequenceMatcher
# from sklearn.metrics import jaccard_similarity_score
class GenericDataCleanser(object):
name_reject_set = frozenset(['father of', '(', 'author of', 'pope', 'emperor'])
company_reject_set = frozenset(["("])
def __init__(self, entity_type, function = None, number=None, pairs=2):
if number:
self.get=True
self.number_of_names = number
else:
self.get = False
function_dictionary = {"nametest":self.test_x_names, "companytest":self.test_x_companies, "name":self.good_name_data, "company":self.good_company_data, "names":self.good_name_data}
if entity_type == "name" or entity_type == "names" or entity_type == "company":
self.parsing_function = function_dictionary[entity_type]
elif function and function == "test":
self.parsing_function = function_dictionary[entity_type + "test"]
self.pairs = 2
def test_x_names(self, data):
self.number_of_names -= 1
if self.number_of_names < 0:
return False
else:
return self.good_name_data(data)
def test_x_companies(self, data):
self.number_of_names -= 1
if self.number_of_names < 0:
return False
else:
return self.good_company_data(data)
def is_english(self, data):
try:
data.encode('ascii')
except UnicodeEncodeError:
return False
return True
def fix_bad_chars(self, data):
#used https://www.tutorialspoint.com/python/string_translate.htm
#change if we want to preserve any of these things
intab = "\n"
outtab = " "
trantab = str.maketrans(intab, outtab)
return data.translate(trantab)
#test whether or not a company name should be used
def remove_bad(self, data):
data = data.replace('"', "(")
data = data.replace("\n", "")
data = data.replace(" ", "")
return data.lstrip()
def good_company_data(self, data):
if data.startswith("<http://dbpedia.org/resource"):
return False
if data.startswith("The Master Trust Bank of Japan"):
return False
if not self.is_english(data):
return False
data = self.fix_bad_chars(data)
data = data.lower()
for item in self.company_reject_set:
if item in data:
return False
return True
def good_name_data(self, data):
if not self.is_english(data):
return False
data = self.fix_bad_chars(data)
data = data.lower()
for item in self.name_reject_set:
if item in data:
return False
return True
#cleanses the data and returns only good data
#if we cannot get 2 peices of good data returns the empty string
def cleanse_data(self, dataToCleanse):
ret = []
name_array = dataToCleanse.split("|")
for part in name_array:
if not part:
continue
if len(ret) >= self.pairs:
break
if self.parsing_function(part):
# if part[-1:] == "\n":
# part = part[:-1]
if part not in ret:
part = self.remove_bad(part)
if part:
part = part.replace(',', ' , ').replace('-', ' - ').replace('.', ' . ').replace(' ', ' ')
ret.append(part)
if len(ret) != self.pairs:
return None
return ret
#parses input file and writes an output
def parse_file(self,input_file, output_file, output_rejects_file):
lines = input_file.readlines()
entity_id = 0
print('in parse file')
count = 0
for line in lines:
ret = self.cleanse_data(line)
if ret and len(ret) >= self.pairs:
count += 1
# all these entity names are in the exact same set
# we can create the number of pairs that we were asked to create
# for now, create it with the 'anchor' (first element of the list)
# and all other names
entity_id += 1
newline = '|'.join(ret) + '\n'
output_file.write(newline)
else:
output_rejects_file.write(line)
if self.get:
print("ran out of names, proceeding with as many as were available")
print('total number of names:' + str(count))
def clean_file(self, filename, output):
onlyfiles = [f for f in listdir(filename) if isfile(join(filename, f))]
output_file = open(output, "w", encoding='utf-8')
output_rejects_file = open('./rejects.txt', 'w', encoding='utf-8')
output_file = open(output, "w", encoding='utf-8')
for file_path in onlyfiles:
input_file = open(filename + "/" + file_path, encoding='utf-8')
self.parse_file(input_file, output_file, output_rejects_file)
output_rejects_file.close()
input_file.close()
output_file.close()
class CompanyDataCleanser(GenericDataCleanser):
def __init__(self, limit_pairs=False):
self.limit_pairs = False
self.pairs = 2
self.get = False
def is_acronym(self, name):
p = re.compile('[A-Z0-9]+')
if len(name) < 6 and p.match(name):
return True
return False
def has_cyrillic(self, text):
return bool(re.search('[а-яА-Я]', text))
def cleanse_data(self, line):
if self.has_cyrillic(line):
return None
# Wikipedia has URLs for the company name first remove trailing '>' and remove disambiguation of type by (company)
line = line.replace('(company)', '')
line = line.replace('\n', '')
arr = line.split('|')
url = arr[0].split('/')
company_name = url[-1].replace('>', '')
# SEC filings have some odd company names, filter these out
if re.match('T[0-9]+', company_name):
return None
if re.match('[0-9 ]+', company_name):
return None
company_name = company_name.replace('_', ' ')
company_name_no_spaces = re.sub('[ .,]', '', company_name).lower().strip()
ret_val = []
ret_val.append(company_name)
for i in range(1, len(arr)):
arr[i] = arr[i].replace('\n', '')
name = arr[i].replace(' ', '')
name = name.lower().strip()
name = re.sub('[,.]', '', name)
if name == company_name:
continue
# check to see if this is an acronym
if self.is_acronym(arr[i]):
ret_val.append(arr[i])
else:
# find longest substring
seqMatch = SequenceMatcher(None, company_name_no_spaces, name)
# find match of longest sub-string
match = seqMatch.find_longest_match(0, len(company_name_no_spaces), 0, len(name))
# ensure that we match the company name somewhere early in the string
if match.size > 1 and match.a < 1:
ret_val.append(arr[i])
else:
print('removing name:' + arr[i] + ' for ' + company_name)
return ret_val
class NameDataCleanser(GenericDataCleanser):
MAX_PAIRS = 6
def __init__(self, number=0, pairs = 2, limit_pairs = True):
if number > 0:
self.get=True
self.number_of_names = number
else:
self.get = False
self.pairs = pairs
self.limit_pairs = limit_pairs
def create_new_name(self, name, current_name_set):
names = name.split()
ret = set([])
if len(names) <= 2:
ret.add(names[1] + ' , ' + names[0]) # last_name, first_name
ret.add(names[0][0] + ' . ' + names[1]) # first_initial. lastname
ret.add(names[1] + ' , ' + names[0][0]) # last_name, first_initial.
elif len(names) > 2:
ret.add(names[0] + ' ' + names[1][0] + ' . ' + names[-1]) # first_name middle initial last_name
ret.add(names[-1] + ' , ' + names[0] + ' ' + names[1]) # last_name, first name, middle name
ret.add(names[-1] + ' , ' + names[0] + ' ' + names[1][0] + ' . ') # last_name, first name, middle initial
ret.add(names[-1] + " " + names[0]) # drop all middle names
ret.add(names[0][0] + ' . ' + names[-1]) # first initial. lastname
ret.add(names[-1] + ' , ' + names[0][0]) # last_name, first_initial.
diff = ret.difference(set(current_name_set))
return diff
def cleanse_data(self, line):
# skip lines with pope/emperor/queen etc they tend to be useless
titles = ['king','queen','emperor']
qualifiers = ['of', 'I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','XVII','XVIII']
if 'pope' in line.lower():
return
for p in titles:
if p in line.lower():
for q in qualifiers:
if q in line:
return
# remove any names with "..." with nothing. also remove all (.*)
line = re.sub('["][^"]*["]', '', line)
line = re.sub('[(][^)]*[)]', '', line)
# remove any non-Latin characters from the line
line = re.sub('[^\x00-\x7F\x80-\xFF\u0100-\u017F\u0180-\u024F\u1E00-\u1EFF]','',line)
arr = line.split("|")
dups = []
for name in arr:
dups.append(name.replace(',', ' , ').replace('-', ' - ').replace('.', ' . ').replace(' ', ' ').strip())
if (len(set(dups))) == 1:
return
# remove silly names
cleansed_arr = []
for name in arr:
if name.strip() == '' or name.strip() == '.':
continue
# remove all abbreviated names that end with .
if name.endswith('.'):
continue
# remove all single names
if len(name.split(' ')) == 1:
continue
name = name.replace(',', ' , ').replace('-', ' - ').replace('.', ' . ').replace(' ', ' ').strip()
cleansed_arr.append(name.replace('\n', '').replace('"',''))
if len(cleansed_arr) == 0 or len(cleansed_arr[0].strip().split(' ')) == 1:
return
# check if the name is simply some name with all other words is just a title
base_parts_set = set(cleansed_arr[0].split())
if len(base_parts_set.intersection(set(qualifiers))) == 1:
return
# compare each name with every other name in the array to make sure
# we have at least one name part in common with the first name, which we assume is
# the 'canonical' name.
ret = set(cleansed_arr)
base = cleansed_arr[0].lower()
# print(cleansed_arr[0])
for j in range(1, len(cleansed_arr)):
cmp = cleansed_arr[j].lower()
# same name just changed case
if base == cmp and cleansed_arr[j] in ret:
ret.remove(cleansed_arr[j])
continue
ratio = Levenshtein.ratio(base, cmp)
if (ratio == 0 and cleansed_arr[j] in ret):
ret.remove(cleansed_arr[j])
# print('removing due to ratio' + cleansed_arr[j] + "|" + cleansed_arr[0])
continue
# string similarity metrics will tell you Richard Phillips is similar to Jack Dowling because they look at it character by character
# need something at a word level. Look at all the name parts in the thing we are comparing to base, and make sure at least
# one name overlaps
match = False
for part in cleansed_arr[j].lower().split(' '):
part = part.replace('\n','')
if part in base:
match = True
if not match and cleansed_arr[j] in ret:
ret.remove(cleansed_arr[j])
# print('removing due to no overlap in names' + cleansed_arr[j] + "|" + cleansed_arr[0])
continue
# often we have situations where the first name matches but everything else does not. This happens
# when a woman changes names for instance. A complication here is that Japanese names reverse names
# so we need to not remove that case
match = False
s = set(cleansed_arr[0].lower().split()).intersection(set(cleansed_arr[j].lower().split()))
if len(s) > 1:
match = True
rest_of_base = ''.join(cleansed_arr[0].lower().split()[1:])
compare_to = ''.join(cleansed_arr[j].lower().split()[1:])
base_bigrams = set(bigrams(rest_of_base))
cmp_bigrams = set(bigrams(compare_to))
if len(base_bigrams.intersection(cmp_bigrams)) == 0 and not match:
# print(base_bigrams)
# print(cmp_bigrams)
if cleansed_arr[j] in ret:
# print('removing due to looks like a female name changed after marriage' + cleansed_arr[j] + base)
ret.remove(cleansed_arr[j])
continue
anchor = cleansed_arr[0]
ret_val = [cleansed_arr[0]]
# put anchor in first, then add all other elements
for n in ret:
if n != cleansed_arr[0]:
ret_val.append(n)
if len(ret_val) < self.pairs:
additional_elements = self.create_new_name(anchor, ret_val)
ret_val.extend(additional_elements)
if len(ret_val) < self.pairs:
return
elif len(ret_val) > self.MAX_PAIRS:
anchor_parts = set(anchor.split())
def jaccard_sim_function(x):
x_parts = set(x.split())
return len(anchor_parts.intersection(x_parts)) / len(anchor_parts.union(x_parts))
sorted(ret_val, key=jaccard_sim_function)
# print(ret_val)
ret_val = ret_val[0:self.MAX_PAIRS]
assert len(set(ret_val)) >= self.pairs
return ret_val
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-f', dest="input_file", help="file to cleanse file")
parser.add_argument('-o', dest="output_file", help="cleansed data file name")
parser.add_argument('-t', dest="entity_type", help="names or companies")
parser.add_argument('-p', dest="num_pairs", help="how many same pairs should be created (default=2)", nargs='?', default=2, type=int)
parser.add_argument('-u', dest="function", help="use test parsing function", nargs='?', default=None)
parser.add_argument('-n', dest="number", help="process only this many from the file (useful for debugging)", nargs='?', type=int, default = 0)
parser.add_argument('-l', dest="no_limit", nargs='?', type=bool, default=False)
args = parser.parse_args()
assert args.num_pairs <= 4
print(args.num_pairs)
if args.entity_type == 'names':
cleaner = NameDataCleanser(args.number, args.num_pairs)
elif args.entity_type == 'companies':
cleaner = CompanyDataCleanser()
# Uncomment next line to test old code
# cleaner = GenericDataCleanser(args.entity_type, args.function, args.number, args.num_pairs)
cleaner.clean_file(args.input_file, args.output_file)
| 15,807 | 37.462287 | 188 | py |
fuzzyJoiner | fuzzyJoiner-master/random_test_selecter.py | from sys import argv
from random import shuffle
input_file = open(argv[1], 'r')
output_file = open(argv[2], 'w')
lines = input_file.readlines()
shuffle(lines)
lines = lines[-int(argv[3]):]
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
| 272 | 21.75 | 32 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM.py | import numpy as np
import random as random
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
# random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
import tensorflow as tf
# from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph())
# """
from keras import backend as K
K.set_session(sess)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Lambda, GRU, Activation
from keras.layers import Embedding
from keras.models import Model, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=30
USE_GRU=True
DEBUG = True
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
# net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
#net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
#net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
"""
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=triplet_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
K.clear_session()
| 21,712 | 37.227113 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/matcher_functions.py | import sqlalchemy
from sqlalchemy.sql import select
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
def connect(user, password, db, host='localhost', port=5432):
'''Returns a connection and a metadata object'''
# We connect with the help of the PostgreSQL URL
# postgresql://federer:grandestslam@localhost:5432/tennis
url = 'postgresql://{}:{}@{}:{}/{}'
url = url.format(user, password, host, port, db)
# The return value of create_engine() is our connection object
con = sqlalchemy.create_engine(url, client_encoding='utf8')
# We then bind the connection to MetaData()
meta = sqlalchemy.MetaData(bind=con, reflect=True)
return con, meta
# create a set of tuples for the machine learning model where each entity name is matched
# with at least 2 other names of the same entity, and each entity is paired with 2 other
# names of a different entity. The different pairs are chosen based on the fact that the have the
# same name part, or are completely different. If the name is unique then the 2 different pairs
# are just 2 random pairs.
def create_tuples_for_siamese_network():
aliases_m = meta.tables['aliases']
word2entities_m= meta.tables['words2entities']
#this should combine several functions and return alisases and bucket lists.
def condensed_start():
pass
def load_good_buckets(table_string1, table_string2, dictionary, con, meta):
table = meta.tables[table_string1]
words1 = [list(row) for row in con.execute(select([table]))]
table = meta.tables[table_string2]
words2 = [list(row) for row in con.execute(select([table]))]
buckets = []
bucket_words = {}
temp_list1 = []
temp_list2 = []
x = 0
y = 0
while x != None and y != None:
if words1[x][1] < words2[y][1]:
x = find_next_bucket(words1, x)
elif words1[x][1] > words2[y][1]:
y = find_next_bucket(words2, y)
else:
word = words1[x][1]
temp_list1, x = load_bucket(words1, x, dictionary)
temp_list2, y = load_bucket(words2, y, dictionary)
pair = [temp_list1, temp_list2]
buckets.append(pair)
bucket_words[word] = buckets[-1]
return buckets, bucket_words
def find_next_bucket(table, position):
prev_word = table[position][1]
while position < len(table):
if table[position][1] != prev_word:
return position
position += 1
return None
def load_bucket(table, position, dictionary):
bucket = []
prev_word = table[position][1]
while position < len(table):
if table[position][1] != prev_word:
return (bucket, position)
bucket.append([dictionary[table[position][0]], table[position][0]])
position += 1
return bucket, None
def create_double_num_dicts(aliases):
serial_num = 0
num_to_word = {}
word_to_num = {}
for pair in aliases:
for name in pair:
num_to_word[serial_num] = name
word_to_num[name] = serial_num
serial_num += 1
return num_to_word, word_to_num
def fscore(true_items, test_dict, beta):
true_positives = 0.0
false_positive = 0.0
false_negative = 0.0
not_indexed = 0
for key in test_dict:
if test_key(true_items, test_dict, key):
true_positives += 1
else:
false_positive += 1
for pair in true_items:
if pair[0] not in test_dict:
false_negative += 1
not_indexed += 1
elif pair[1] not in test_dict[pair[0]]:
false_negative += 1
print( "total names: " + str(len(true_items)))
print( "not indexed: " + str(not_indexed))
print ("wrongly indexed: " + str(false_negative - not_indexed))
print ("all false negitives: " + str(false_negative))
print ("true positives: " + str(true_positives))
print ("false positives: " + str(false_positive))
temp = ((1 + (beta * beta)) * true_positives)
return temp / (temp + false_positive + ((beta * beta) * false_negative))
def test_key(true_items, test_dict, key):
for answer in test_dict[key]:
if (key, answer) in true_items:
return True
return False
def make_test_dict(items, k):
dictionary = {}
overflow = 0
for pair in items:
if pair[0] in dictionary:
if len(dictionary[pair[0]]) < k:
dictionary[pair[0]].append(pair[1])
else:
overflow += 1
else:
dictionary[pair[0]] = [pair[1]]
print( "overflow: " + str(overflow))
return dictionary
def get_aliases(con, meta):
table = meta.tables['aliases']
aliases = con.execute(select([table]))
dictionary = set([])
for row in aliases:
dictionary.add((row[0], row[1]))
return dictionary
def run_test(pre_procces, test, num_to_word, bucket_list):
bucket_list = pre_proccess_words(num_to_word, bucket_list, pre_procces)
matches = set([])
for pair in bucket_list:
for name1 in pair[0]:
for name2 in pair[1]:
if test(name1[1], name2[1]):
matches.add((num_to_word[name1[0]], num_to_word[name2[0]]))
return matches
def create_alias_dict(con, meta):
table = meta.tables['aliases']
aliases = con.execute(select([table]))
dictionary = {}
for row in aliases:
dictionary[row[0]] = row[1]
return dictionary
def get_missed(aliases, test_dict):
missed = set([])
for pair in aliases:
if pair[0] not in test_dict or pair[1] not in test_dict[pair[0]]:
missed.add(pair)
return missed
def export_unbucketed(impossible, con, meta):
execute_pairs = []
if 'unbucketed' in meta.tables:
meta.tables['unbucketed'].drop(con)
unbucketed = Table('unbucketed', meta, Column('name1', String), Column('name2', String), extend_existing=True)
zipping_string = ('name1', 'name2')
for key in impossible:
for name in impossible[key]:
execute_pairs.append(dict(zip(zipping_string, (key, name))))
meta.create_all(con)
con.execute(unbucketed.insert(), execute_pairs)
def export_missed(aliases, test_dict, con, meta):
missed_items = get_missed(aliases, test_dict)
execute_pairs = []
if 'missed' in meta.tables:
meta.tables['missed'].drop(con)
missed = Table('missed', meta, Column('name1', String), Column('name2', String), extend_existing=True)
zipping_string = ('name1', 'name2')
for pair in missed_items:
execute_pairs.append(dict(zip(zipping_string, pair)))
meta.create_all(con)
con.execute(missed.insert(), execute_pairs)
def get_impossible(aliases, bucket_list, num_to_word):
testing_dict = {}
for pair in aliases:
if pair[0] in testing_dict:
testing_dict[pair[0]].append(pair[1])
else:
testing_dict[pair[0]] = [pair[1]]
bucket_list = pre_proccess_words(num_to_word, bucket_list, lambda x : x)
for bucket_pair in bucket_list:
other_bucket = set([bucket_pair[1][x][1] for x in range(len(bucket_pair[1]))])
for name in bucket_pair[0]:
other_name = 0
while other_name < len(testing_dict[name[1]]):
try:
if testing_dict[name[1]][other_name] in other_bucket:
testing_dict[name[1]].pop(other_name)
except ValueError:
pass
other_name += 1
for key in testing_dict.copy():
if not testing_dict[key]:
del testing_dict[key]
return testing_dict
def pre_proccess_words(num_to_word, bucket_list, function):
for pair in bucket_list:
for bucket in pair:
for name in bucket:
name[1] = function(num_to_word[name[0]])
return bucket_list
def run_special_test(bucket_list, num_to_word):
matches = set([])
for pair in bucket_list:
if len(pair[0]) == 1 and len(pair[1]) == 1:
matches.add((num_to_word[pair[0][0][0]], num_to_word[pair[1][0][0]]))
return matches
| 8,149 | 37.809524 | 114 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.20.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,235 | 36.061082 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/ANNBasedSampleSelection.py | import Named_Entity_Recognition_Modified
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from matcher_functions import connect
import argparse
import numpy as np
from keras.layers import Embedding, Concatenate
from keras.models import Model
import names_cleanser
from random import randint
import keras.backend as K
import sys
MARGIN = 2
DEBUG = False
def process_aliases(con, meta):
aliases = Named_Entity_Recognition_Modified.get_aliases_with_ids(con, meta)
entity2sames = {}
namesToIds = {}
def has_difft_id(name, entityid):
if name in namesToIds:
ids = namesToIds[name]
if ids != entityid:
return True
namesToIds[name] = entityid
return False
i = 0
for row in aliases:
i += 1
if DEBUG and i > 100:
break
entityid = row[2]
# filter out names that are associated with multiple ids, this will confuse the model trying to learn the distance function
if has_difft_id(row[0], entityid) or has_difft_id(row[1], entityid) or has_difft_id(row[2], entityid):
continue
if entityid not in entity2sames:
entity2sames[entityid] = [row[0]]
entity2sames[entityid].append(row[1])
else:
entity2sames[entityid].append(row[1])
# print(entity2sames)
entities = []
entity2names = {}
for e,v in entity2sames.items():
entity2names[e] = [len(entities) + i for i in range(0, len(v))]
entities.extend(v)
print(entities)
print(entity2names)
return entities, entity2names
if __name__ == '__main__':
print('Processing text dataset')
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', dest="user", help="username")
parser.add_argument('-p', dest="password", help="password")
parser.add_argument('-d', dest="db", help="dbname")
parser.add_argument('-o', dest="output_file", help="output file name")
parser.add_argument('-a', dest="num_pairs", help="number of same pairs in db", nargs='?', default=2, type=int)
args = parser.parse_args()
#change to get from sql and not read from file
con, meta = connect(args.user, args.password, args.db)
# get all names first
entities, entity2names = process_aliases(con, meta)
tokenizer = Tokenizer(num_words=Named_Entity_Recognition_Modified.MAX_NB_WORDS)
tokenizer.fit_on_texts(entities)
sequences = tokenizer.texts_to_sequences(entities)
print(sequences)
sequences = pad_sequences(sequences, maxlen=Named_Entity_Recognition_Modified.MAX_SEQUENCE_LENGTH)
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, Named_Entity_Recognition_Modified.EMBEDDING_DIM))
kz = KazumaCharEmbedding()
for word, i in word_index.items():
if i >= Named_Entity_Recognition_Modified.MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# note that we set trainable = False so as to keep the embeddings fixed
Named_Entity_Recognition_Modified.check_for_zeroes(embedding_matrix, "here is the first pass")
embedding_layer = Embedding(num_words,
Named_Entity_Recognition_Modified.EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=Named_Entity_Recognition_Modified.MAX_SEQUENCE_LENGTH,
trainable=False)
model = Named_Entity_Recognition_Modified.embedded_representation(embedding_layer)
embedded_output = model.predict(sequences)
print(np.shape(embedded_output))
sys.exit(0)
t = AnnoyIndex(len(embedded_output[0]), metric='euclidean')
for i in range(len(embedded_output)):
v = embedded_output[i]
t.add_item(i, v)
t.build(100) # 100 trees
with open(args.output_file, 'w') as f:
for e, v in entity2names.items():
index_for_same = entity2names[e]
anchor_index = index_for_same[0]
nearest = t.get_nns_by_vector(embedded_output[anchor_index], 10)
maximum_diff = -1
minimum_same = 100000
maximum_same = -1
same_pair_in_NN_set = False
for i in range(1, len(index_for_same)):
dist = t.get_distance(anchor_index, index_for_same[i])
print("same pair:" + entities[anchor_index] + "-" + entities[index_for_same[i]] + " distance:" + str(dist))
minimum_same = min(dist, minimum_same)
maximum_same = max(dist, maximum_same)
for i in nearest:
if i == anchor_index:
continue
dist = t.get_distance(anchor_index, i)
print(entities[anchor_index] + "-" + entities[i] + " distance:" + str(dist))
if i in index_for_same:
same_pair_in_NN_set = True
else:
maximum_diff = max(dist, maximum_diff)
if dist > minimum_same:
f.write(entities[anchor_index] + "|" + entities[index_for_same[randint(1, len(index_for_same) - 1)]] + "|" + entities[i] + "\n")
if (maximum_diff < minimum_same):
print("hard entity because maximum different is less than minimum same")
continue
elif same_pair_in_NN_set:
print("easy entity - same pair is in NN set")
else:
print("~hard entity" + entities[anchor_index])
# write a set of different completely items now
print(maximum_same)
j = 0
while j <= 30:
k = randint(0, len(entities) - 1)
if t.get_distance(anchor_index, k) > maximum_diff + MARGIN:
f.write((entities[anchor_index] + "|" + entities[index_for_same[randint(1, len(index_for_same) - 1)]] + "|" + entities[k] + "\n"))
k += 1
j += 1
print(len(entity2names))
| 6,038 | 32.181319 | 138 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-angular.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# """
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=True
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=modified_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,847 | 37.329825 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/Triplet_Iteration.py | from sys import argv
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
import numpy as np
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
import Named_Entity_Recognition_Modified
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
DEBUG = False
DEBUG_DATA_LENGTH = 100
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_random_image(img_groups, group_names, gid):
gname = group_names[gid]
photos = img_groups[gname]
pid = np.random.choice(np.arange(len(photos)), size=1)[0]
pname = photos[pid]
return gname + pname + ".jpg"
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
# i = 0
# while sum(embedding_vector) == 0 and i <= 1000:
# embedding_vector = k.emb(word)
# i++;
# if i == 1000:
# print("fail")
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_tokenizer(texts):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts['anchor'] + texts['negative'] + texts['positive'])
return tokenizer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
#removes the new line charecter at the end
texts['negative'].append(line_array[2][:-1])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def get_test(texts, sequences, percent):
indices = np.arange(sequences['anchor'].shape[0])
np.random.shuffle(indices)
ret_sequence = {}
ret_sequence['anchor'] = sequences['anchor'][indices]
ret_sequence['positive'] = sequences['positive'][indices]
ret_sequence['negative'] = sequences['negative'][indices]
num_validation_samples = int(percent * sequences['anchor'].shape[0])
ret_train = {}
ret_train['anchor'] = ret_sequence['anchor'][:-num_validation_samples]
ret_train['positive'] = ret_sequence['positive'][:-num_validation_samples]
ret_train['negative'] = ret_sequence['negative'][:-num_validation_samples]
ret_test = {}
ret_test['anchor']= ret_sequence['anchor'][-num_validation_samples:]
ret_test['positive']= ret_sequence['positive'][-num_validation_samples:]
ret_test['negative'] = ret_sequence['negative'][-num_validation_samples:]
ret_texts = {}
texts['anchor'] = np.array(texts['anchor'])
texts['positive'] = np.array(texts['positive'])
texts['negative'] = np.array(texts['negative'])
ret_texts['anchor'] = texts['anchor'][indices]
ret_texts['positive'] = texts['positive'][indices]
ret_texts['negative'] = texts['negative'][indices]
return ret_train, ret_test, ret_texts
def triplet_loss(y_true, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def assign_triplets(data, model):
sequences = np.concatenate((data['positive'], data['negative']))
unique_sequence = []
anchor_place = {}
place_to_item = {}
unique_set = set([])
for item in data['anchor']:
print(item)
item2 = tuple(item)
if item2 not in unique_set:
anchor_place[item2] = len(unique_sequence)
place_to_item[len(unique_sequence)] = item
unique_sequence.append(item)
unique_set.add(item2)
for item in sequences:
item2 = tuple(item)
if item2 not in unique_set:
place_to_item[len(unique_sequence)] = item
unique_sequence.append(item)
unique_set.add(item2)
#make annoy index
unique_sequence = np.array(unique_sequence)
predictions = model.predict(unique_sequence)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
for i in range(len(predictions)):
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
#create nearest neighbors list
anchor_to_nearest = {}
new_data = {}
new_data['anchor'] = []
new_data['positive'] = []
new_data['negative'] = []
index = 0
while index < len(data['anchor']):
name = data['anchor'][index]
hash_name = tuple(name)
if hash_name in anchor_to_nearest:
if anchor_to_nearest[hash_name]:
new_data['anchor'].append(name)
new_data['negative'].append(place_to_item[anchor_to_nearest[hash_name].pop()])
new_data['positive'].append(data['positive'][index])
index += 1
else:
anchor_to_nearest[hash_name] = t.get_nns_by_item(anchor_place[hash_name], 5)
new_data['anchor'] = np.array(new_data['anchor'])
new_data['positive'] = np.array(new_data['positive'])
new_data['negative'] = np.array(new_data['negative'])
return new_data
def do_annoy(model, texts, tokenizer, verbose):
unique_text = []
entity_idx = []
entity2same = {}
for i in range(len(texts['anchor'])):
if not texts['anchor'][i] in entity2same:
entity2same[texts['anchor'][i]] = []
entity_idx.append(len(unique_text))
unique_text.append(texts['anchor'][i])
l = entity2same[texts['anchor'][i]]
if texts['positive'][i] not in l:
entity2same[texts['anchor'][i]].append(texts['positive'][i])
unique_text.append(texts['positive'][i])
print(entity2same)
print(unique_text)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
for i in range(len(predictions)):
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
for index in entity_idx:
nearest = t.get_nns_by_vector(predictions[index], 5)
print(nearest)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[unique_text[index]])
nearest_text.remove(unique_text[index])
print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
if verbose:
print([t.get_distance(index, i) for i in nearest])
overlap = expected_text.intersection(nearest_text)
print(overlap)
m = len(overlap)
match += m
no_match += len(expected_text) - m
print("match: {} no_match: {}".format(match, no_match))
def print_deb_data(debbuging_data):
for i in range(debbuging_data['number']):
print('anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['texts']['anchor'][i], debbuging_data['texts']['positive'][i], debbuging_data['texts']['negative'][i]))
print('sequences: anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['sequences']['anchor'][i], debbuging_data['sequences']['positive'][i], debbuging_data['sequences']['negative'][i]))
def debugging_text_and_sequences(reordered_text, training_data, number):
debbuging_data = {}
debbuging_data['number'] = number
debbuging_data['sequences'] = {}
debbuging_data['texts'] = {}
debbuging_data['sequences']['anchor'] = []
debbuging_data['sequences']['positive'] = []
debbuging_data['sequences']['negative'] = []
debbuging_data['texts']['anchor'] = []
debbuging_data['texts']['positive'] = []
debbuging_data['texts']['negative'] = []
for i in range(number):
debbuging_data['texts']['anchor'].append(reordered_text['anchor'][i])
debbuging_data['texts']['positive'].append(reordered_text['positive'][i])
debbuging_data['texts']['negative'].append(reordered_text['negative'][i])
debbuging_data['sequences']['anchor'].append(training_data['anchor'][i])
debbuging_data['sequences']['positive'].append(training_data['positive'][i])
debbuging_data['sequences']['negative'].append(training_data['negative'][i])
return debbuging_data
# triples_data = create_triples(IMAGE_DIR)
texts = read_file(argv[1])
print("anchor: {} positive: {} negative: {}".format(texts['anchor'][0], texts['positive'][0], texts['negative'][0]))
tokenizer = get_tokenizer(texts)
print('got tokenizer')
sequences = get_sequences(texts, tokenizer)
train_data, test_data, reordered_text = get_test(texts, sequences, 0.2)
debbuging_data = debugging_text_and_sequences(reordered_text, train_data, 20)
number_of_names = len(train_data['anchor'])
print('sequenced words')
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
embedder = get_embedding_layer(tokenizer)
print('got embeddings')
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = Flatten(name='flatten')(net)
net = Dense(128, activation='relu', name='embed')(net)
net = Dense(128, activation='relu', name='embed2')(net)
net = Dense(128, activation='relu', name='embed3')(net)
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist')([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist')([net_anchor, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists'
)([positive_dist, negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=triplet_loss, metrics=[accuracy])
for x in range(3):
train_data = assign_triplets(train_data, base_model)
print(train_data)
number_of_names = len(train_data['anchor'])
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
print('fitting round' + str(x))
model.fit([train_data['anchor'], train_data['positive'], train_data['negative']], Y_train, epochs=5, batch_size=15, validation_split=0.25)
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
print("training data predictions")
positives = test_positive_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
negatives = test_negative_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
negatives = test_negative_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
# model.save('triplet_loss_resnet50.h5')
inter_model = Model(input_anchor, net_anchor)
do_annoy(inter_model, texts, tokenizer, False)
print('annoy on embeddings for debbuging_data')
do_annoy(Named_Entity_Recognition_Modified.embedded_representation(embedder), debbuging_data['texts'], tokenizer, True)
print('annoy on full model for debbuging_data')
do_annoy(inter_model, debbuging_data['texts'], tokenizer, True)
print_deb_data(debbuging_data) | 14,236 | 37.374663 | 199 | py |
fuzzyJoiner | fuzzyJoiner-master/old/ContrastiveLossLSTM-8.20.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 19,575 | 35.86629 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/seq2seq.py | '''Sequence to sequence example in Keras (character-level).
This script demonstrates how to implement a basic character-level
sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine translation, as word-level
models are more common in this domain.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and correspding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences, we:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence character)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next character
- Sample the next character using these predictions
(we simply use argmax).
- Append the sampled character to the target sequence
- Repeat until we generate the end-of-sequence character or we
hit the character limit.
# Data download
English to French sentence pairs.
http://www.manythings.org/anki/fra-eng.zip
Lots of neat sentence pairs datasets can be found at:
http://www.manythings.org/anki/
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 10000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = 'fra-eng/fra.txt'
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
# Save model
model.save('s2s.h5')
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(100):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence) | 9,104 | 40.013514 | 79 | py |
fuzzyJoiner | fuzzyJoiner-master/old/matcher.py | #using tutorial https://suhas.org/sqlalchemy-tutorial/
from sys import argv
from matcher_functions import *
#establish connection to database
con, meta = connect(argv[1], argv[2], argv[3])
#load pairs from database
aliases = get_aliases(con, meta)
#create dictionaries assingning serial numbers to names and names from serial numbers
num_to_word, word_to_num = create_double_num_dicts(aliases)
#load the buckets from the database bucket_list is aranges as follows:
#bucket_list[pair_of_buckets][bucket(this must be 0 or 1)][name (this represents a single name)][0 for number and 1 for pre-procced name]
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
#print out the number of names that are possible to get just based on bucketing:
impossible = get_impossible(aliases, bucket_list, num_to_word)
print("possible matches: " + str(len(aliases) - len(impossible)))
#next make a list to store the outcomes of all our tests:
matches_list = []
#then run our tests
matches_list.append(run_test(lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1, num_to_word, bucket_list))
matches_list.append(run_test(lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1), num_to_word, bucket_list))
matches_list.append(run_special_test(bucket_list, num_to_word))
#next create a test dictionary relating each item in the first set to k items in other set
test_dict = make_test_dict(set([]).union(*matches_list), 1000)
#use this dictionary to calculate and print the f-score
print("fscore: " + str(fscore(aliases, test_dict, 1)))
#next export the items we missed
export_missed(aliases, test_dict, con, meta)
#lastly export the items we could not have gotten since they were not in the same bucket:
export_unbucketed(impossible, con, meta) | 1,840 | 62.482759 | 152 | py |
fuzzyJoiner | fuzzyJoiner-master/old/cleanser.py | from sys import argv
from os import listdir
from os.path import isfile, join
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-f', dest="input_file or directory", help="file to cleanse file")
parser.add_argument('-o', dest="output_file", help="cleansed data file name")
parser.add_argument('-t', dest="entity_type", help="names or people")
parser.add_argument('-u', dest="function", help="use test parsing function")
parser.add_argument('-n', dest="number", help="process only this many from the file (useful for debugging)")
args = parser.parse_args()
if args.entity_type == 'names':
cleaner = NameDataCleanser(number)
else:
cleaner = GenericDataCleanser(entity_type, function, number)
cleaner.parse_file(args.filename, args.output, args.rejects)
if len(args) >= 3:
parsing_function = self.good_name_data
if len(args) >= 4:
parsing_function = self.function_dictionary[args[3]]
if len(args) >= 6:
self.number_of_names = int(args[5])
if args[4] == "test":
parsing_function = self.function_dictionary[(args[3] + args[4])]
elif args[4] == "get":
self.get = True
onlyfiles = [f for f in listdir(args[1]) if isfile(join(args[1], f))]
output_file = open(args[2], "w", encoding='utf-8')
output_rejects_file = open('./rejects.txt', 'w', encoding='utf-8')
output_file = open(args[2], "w", encoding='utf-8')
for file_path in onlyfiles:
input_file = open(args[1] + "/" + file_path, encoding='utf-8')
self.parse_file(input_file, output_file, parsing_function, output_rejects_file)
output_rejects_file.close()
input_file.close()
#test_file.close();
output_file.close()
else:
print ("too few arguments please enter arguments in the following format: input_file output_file [function [get x OR test x]]")
cleaner = GenericDataCleanser()
cleaner.clean_file(argv) | 1,994 | 38.9 | 130 | py |
fuzzyJoiner | fuzzyJoiner-master/old/image_join3.py | 0 | 0 | 0 | py | |
fuzzyJoiner | fuzzyJoiner-master/old/matcher_class.py | from matcher_functions import load_good_buckets, create_double_num_dicts, connect, get_aliases
class matcher(object):
def __init__(self, user, password, database, test_pairs, bucket_number):
con, meta = connect(user, password, database)
num_to_word, word_to_num = create_double_num_dicts(get_aliases(con, meta))
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
self.rarity_match = {}
for pair in bucket_list:
if len(pair[0]) <= bucket_number and len(pair[1]) <= bucket_number:
for i in range(len(pair[0])):
for j in range(len(pair[1])):
self.rarity_match[pair[0][i][1]] = pair[1][j][1]
self.test_pairs = test_pairs
def special_test(self, name1, name2):
temp1 = False
temp2 = False
if name1 in self.rarity_match:
temp1 = self.rarity_match[name1] == name2
if name2 in self.rarity_match:
temp2 = self.rarity_match[name2] == name1
return temp1 or temp2
def match(self, name1, name2):
for test_pair in self.test_pairs:
temp1 = test_pair[0](name1)
temp2 = test_pair[0](name2)
if test_pair[1](temp1, temp2):
return True
return self.special_test(name1, name2)
| 1,366 | 46.137931 | 105 | py |
fuzzyJoiner | fuzzyJoiner-master/old/Named_Entity_Recognition_Modified.py | """
This code is modified from
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
and ttps://github.com/fchollet/keras/blob/master/examples/
for our own purposes
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from matcher_functions import connect, get_aliases, load_good_buckets, create_double_num_dicts
from matcher_class import matcher
import os
from sqlalchemy import Table, Column, Integer, String, ForeignKey, Float
import sys
from sys import argv
import numpy as np
from embeddings import KazumaCharEmbedding
import random
from annoy import AnnoyIndex
import random
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from keras.utils import to_categorical
from keras.optimizers import RMSprop
from keras import backend as K
from keras import regularizers
from keras.regularizers import L1L2
from sqlalchemy.sql import select
import random
import argparse
BASE_DIR = './Machine_Learning/'
GLOVE_DIR = BASE_DIR + 'glove/'
TEXT_DATA_DIR = BASE_DIR + 'nerData'
DO_ANN_ON_EMBEDDINGS = False
# number of words an entity is allowed to have
# distribution of number of words in peoples names can be found in peopleNamesDisbn
# distribution of number of words in company names can be found in companyNamesDisbn
# Note most of the names above that are fairly esoteric or just plain noise. Included is
# python code to remove them
MAX_SEQUENCE_LENGTH = 10
# Total number of unique tokens in peoples names is 90K, including a lot of non-English names. To remove those
# we use an egregious hack to check if its UTF-8
# Total number of unique tokens in company names is 37K
# Assuming no overlap between the two we get about 127K. We may need to tweak this parameter as we go
# but according to the Keras documentation, this can even be left unset
MAX_NB_WORDS = 150000
# Size of embeddings from Glove (we will try the 100 dimension encoding to start with)
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.05
def check_for_zeroes(to_check, intro_string):
found = 0
for vector in to_check:
if sum(vector) == 0:
print(intro_string + str(vector))
found += 1
if not found:
print(intro_string + " no problems found")
else:
print(intro_string + ' found this many: '+ str(found))
def get_diff_names_with_overlap(con, aliases):
no_match_texts = []
texts = []
# load a mapping from entity id to names
entityid2names = {}
for row in aliases:
if row[2] in entityid2names:
names = entityid2names[row[2]];
else:
names = []
entityid2names[row[2]] = names
names.append(row[0])
names.append(row[1])
texts.append(row[0])
print(len(aliases))
print("getting word2entities")
# load a mapping from words to entities
word2entities = {}
rows = con.execute("select word, entities from word2entities;")
for row in rows:
word2entities[row[0]] = row[1]
for index in range(len(texts)):
name_arr = texts[index].split()
new_text = ''
for n in name_arr:
# a name part may have been filtered out of word2entities
if n not in word2entities:
continue
if new_text:
break
for e in word2entities[n]:
if e == texts[index] or e not in entityid2names: # if the entity is the same as this anchor's text skip it
continue
names = entityid2names[e]
for x in names:
if n in x:
new_text = x
break
if new_text:
no_match_texts.append(new_text)
else:
no_match_texts.append(texts[index + 1])
print("done processing matches with overlap")
return no_match_texts
def get_diff_names_with_no_overlap(aliases):
entitylist = []
for row in aliases:
entitylist.append(row[0])
s = [i for i in range(len(entitylist))]
random.shuffle(s)
ret = []
for i in range(len(entitylist)):
if s[i] == i :
s[i] = s[i+1]
s[i+1] = i
ret.append(entitylist[s[i]])
return ret
#this returns a new set of texts to use as similar non-matches for texts1
def get_no_match_texts(user, password, db, texts1):
def get_non_match(name1, bucket_words, matching_set):
for word in name1.split(" "):
if word in bucket_words:
bucket = bucket_words[word]
else:
return None
if len(bucket[1]) > 1:
for name2 in bucket[1]:
if (name1, name2[1]) not in matching_set:
return name2[1]
return None
no_match_texts = []
#this should not be done here and needs to be fixed up before more work is done
#it should instead be done by a singel function in matcher_functions
#establish connection to database
con, meta = connect(user, password, db)
#load pairs from database
aliases = get_aliases(con, meta)
#create dictionaries assigning serial numbers to names and names from serial numbers
num_to_word, word_to_num = create_double_num_dicts(aliases)
#load the buckets from the database bucket_list is aranges as follows:
#bucket_list[pair_of_buckets][bucket(this must be 0 or 1)][name (this represents a single name)][0 for number and 1 for pre-procced name]
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
for index in range(len(texts1)):
new_text = get_non_match(texts1[index], bucket_words, aliases)
if new_text == None:
new_text = texts1[(index + 1) % len(texts1)]
no_match_texts.append(new_text)
return no_match_texts
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Equation 4
'''
margin = 1
return K.mean((1 - y_true) * K.square(y_pred) +
y_true * K.square(K.maximum(margin - y_pred, 0)))
#for now will take any bad pairs, will take only relivent ones later
def create_pairs(x, y, z):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
for index in range(len(x)):
pairs += [[x[index], y[index]]]
pairs += [[x[index], z[index]]]
labels += [0, 1]
# n = min([len(digit_indices[d]) for d in range(10)]) - 1
# for d in range(10):
# for i in range(n):
# z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
# pairs += [[x[z1], x[z2]]]
# inc = random.randrange(1, 10)
# dn = (d + inc) % 10
# z1, z2 = digit_indices[d][i], digit_indices[dn][i]
# pairs += [[x[z1], x[z2]]]
# labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim, embedding_layer, reg):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
# kernel_regularizer=reg))
seq.add(Dropout(0.1))
final_layer = Dense(128, activation='relu')
seq.add(final_layer)
# kernel_regularizer=reg))
return seq, final_layer
def embedded_representation(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
same_correct = 0
diff_correct = 0
for i in range(len(labels)):
if predictions[i] < 0.5 and labels[i] == 0:
same_correct += 1
elif predictions[i] >= 0.5 and labels[i] == 1:
diff_correct += 1
print("Precision computation: same - " + str(same_correct) + " different: " + str(diff_correct) + " from total: " + str(len(labels)))
return (same_correct + diff_correct) / len(labels)
def get_aliases_with_ids(con, meta):
#load pairs from database
aliases = con.execute("select alias1, alias2, entityid from aliases order by entityid;")
entities = []
for row in aliases:
entities.append((row[0], row[1], row[2]))
return entities
def f1score(predictions, labels):
#labels[predictions.ravel() < 0.5].sum()
predictions = predictions.ravel()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(labels)):
if predictions[i] < 0.5:
if labels[i] == 0:
true_positive += 1
else:
false_positive += 1
elif labels[i] == 0:
false_negitive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
print (fscore)
return fscore
#compute accuracy using a rule based matcher
def sequence_to_word(sequence, reverse_word_index):
return " ".join([reverse_word_index[x] for x in sequence if x in reverse_word_index])
def sequence_pair_to_word_pair(sequence_pair, reverse_word_index):
return [sequence_to_word(sequence_pair[0], reverse_word_index), sequence_to_word(sequence_pair[1], reverse_word_index)]
if __name__ == '__main__':
print('Processing text dataset')
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', dest="user", help="username")
parser.add_argument('-p', dest="password", help="password")
parser.add_argument('-d', dest="db", help="dbname")
parser.add_argument('-a', dest="num_pairs", help="number of same pairs in db", nargs='?', default=2, type=int)
args = parser.parse_args()
texts1 = [] # list of text samples in part 1
texts2 = [] # list of text samples in part 2
#change to get from sql and not read from file
con, meta = connect(args.user, args.password, args.db)
aliases = get_aliases_with_ids(con, meta)
unique_aliases = []
# collect up all the anchors that are unique (anchors will get repeated if num_pairs > 2)
prev = int(aliases[0][2])
unique_aliases.append(aliases[0])
for tuple in aliases:
texts1.append(tuple[0])
texts2.append(tuple[1])
if int(tuple[2]) != prev:
unique_aliases.append(tuple)
prev = int(tuple[2])
print('Found %s texts.' % len(texts1))
texts3 = []
print(len(unique_aliases))
print(len(texts1))
print(len(texts2))
# get the different pairs
if args.num_pairs == 2:
print("args num pairs is 2")
texts3.extend(get_diff_names_with_overlap(connect(args.user, args.password, args.db)[0], unique_aliases))
elif args.num_pairs == 3:
print("args num pairs is 3")
texts3.extend(get_diff_names_with_overlap(connect(args.user, args.password, args.db)[0], unique_aliases))
texts3.extend(get_diff_names_with_no_overlap(unique_aliases))
elif args.num_pairs == 4:
print("args num pairs is 4")
texts3.extend(get_diff_names_with_overlap(connect(args.user, args.password, args.db)[0], unique_aliases))
texts3.extend(get_diff_names_with_no_overlap(unique_aliases))
texts3.extend(get_diff_names_with_no_overlap(unique_aliases))
print(len(texts3))
assert len(texts1) == len(texts2)
assert len(texts2) == len(texts3), str(len(texts3))
# vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts1 + texts2 + texts3)
# this step should get similar but non-matching items to keep for later matching
# this step creates a sequence of words ids for each word in each label
sequences1 = tokenizer.texts_to_sequences(texts1)
for sc in range(len(texts1)):
if sum(sequences1[sc]) == 0:
print('here is a problem word :' + texts1[sc] + '::')
sequences2 = tokenizer.texts_to_sequences(texts2)
no_match_sequences = tokenizer.texts_to_sequences(texts3)
word_index = tokenizer.word_index
check_for_zeroes(sequences1, " sequences")
print('Found %s unique tokens.' % len(word_index))
annoy_data1 = pad_sequences(sequences1, maxlen=MAX_SEQUENCE_LENGTH)
annoy_data2 = pad_sequences(sequences2, maxlen=MAX_SEQUENCE_LENGTH)
no_match_data = pad_sequences(no_match_sequences, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data1 tensor:', annoy_data1.shape)
print('Shape of data2 tensor:', annoy_data2.shape)
# split the data into a training set and a validation set, shuffling items
indices = np.arange(annoy_data1.shape[0])
np.random.shuffle(indices)
texts1 = np.array(texts1)
texts2 = np.array(texts2)
texts3 = np.array(texts3)
texts1 = texts1[indices]
texts2 = texts2[indices]
texts3 = texts3[indices]
# for i in range(len(texts1)):
# print(texts1[i] + " paired with: " + texts2[i])
# print(texts1[i] + " paired with: " + texts3[i])
data1 = annoy_data1[indices]
data2 = annoy_data2[indices]
no_match_data = no_match_data[indices]
num_validation_samples = int(VALIDATION_SPLIT * data1.shape[0])
x_train = data1[:-num_validation_samples]
y_train = data2[:-num_validation_samples]
z_train = no_match_data[:-num_validation_samples]
x_test = data1[-num_validation_samples:]
y_test = data2[-num_validation_samples:]
z_test = no_match_data[-num_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = len(word_index) + 1 # word_index is indexed from 1-N
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
kz = KazumaCharEmbedding()
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# note that we set trainable = False so as to keep the embeddings fixed
check_for_zeroes(embedding_matrix, "here is the first pass")
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# the data, shuffled and split between train and test sets
#need to change this not sure how
input_dim = MAX_SEQUENCE_LENGTH
epochs = 10
# create training+test positive and negative pairs
# these next lines also need to change
#digit_indices = [np.where(y_train == i)[0] for i in range(10)]
print("x_train {} , y_train {} , z_train {} ".format(x_train, y_train, z_train))
tr_pairs, tr_y = create_pairs(x_train, y_train, z_train)
#digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(x_test, y_test, z_test)
print (len(tr_y))
# network definition
base_network, final_layer = create_base_network(input_dim, embedding_layer, L1L2(0.0,0.0))
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
print(base_network.summary())
# train
rms = RMSprop()
#change the optimizer (adam)
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs)
# compute final accuracy on training and test sets
#add an LSTM layer (later)
# testpairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
# [lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
# matcher = matcher(argv[1], argv[2], argv[3], test_pairs, 1)
pred_learning = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
# out = model.layers[2].get_output_at(0)
# inp = model.input
# func = K.function([inp], [out]) # evaluation functions
# print("here should be a vector")
# print(func([tr_pairs[:, 0][0], tr_pairs[:, 1][0]]))
# Testing
# print (layer_outs)
tr_acc = compute_accuracy(pred_learning, tr_y)
tr_f1 = f1score(pred_learning, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
pred_learning = np.append(pred_learning, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
x_test_text = texts1[-num_validation_samples:]
y_test_text = texts2[-num_validation_samples:]
z_test_text = texts3[-num_validation_samples:]
text_pairs, text_y = create_pairs(x_test_text, y_test_text, z_test_text)
# for i in range(len(text_pairs)):
# print(str(text_pairs[i]))
# print(pred[i])
# print(model.predict([np.array([te_pairs[i, 0]]), np.array([te_pairs[i, 1]])]))
# from https://github.com/spotify/annoy
f = 128
if DO_ANN_ON_EMBEDDINGS:
inter_model = embedded_representation(embedding_layer)
else:
inter_model = Model(input_a, processed_a)
intermediate_output1 = inter_model.predict(x_test)
intermediate_output2 = inter_model.predict(y_test)
intermediate_output3 = inter_model.predict(z_test)
mid_predictions = np.concatenate((intermediate_output1, intermediate_output2, intermediate_output3))
# print(mid_predictions[0])
# print (len(mid_predictions[0]))
if DO_ANN_ON_EMBEDDINGS:
t = AnnoyIndex(len(mid_predictions[0]), metric='euclidean') # Length of item vector that will be indexed
else:
t = AnnoyIndex(f, metric='euclidean') # Length of item vector that will be indexed
for i in range(len(mid_predictions)):
v = mid_predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
t.save('test.ann')
# ...
all_texts = np.concatenate((x_test_text, y_test_text, z_test_text))
match = 0
no_match = 0
for index in range(len(x_test_text)):
nearest = t.get_nns_by_vector(mid_predictions[index], 5)
# print(nearest)
nearest_text = [all_texts[i] for i in nearest]
# print("query={} names = {} true_match = {} reject= {}".format(x_test_text[index], nearest_text, y_test_text[index], z_test_text[index]))
for i in nearest:
# print(all_texts[i])
if i >= len(x_test_text) and (i < len(x_test_text) + len(y_test_text)):
arr = np.array([y_test[i - len(x_test_text)]])
elif i >= len(x_test_text) + len(y_test_text):
arr = np.array([z_test[i - len(x_test_text) - len(y_test_text)]])
else:
arr = np.array([x_test[i]])
# print(model.predict([np.array([x_test[index]]), arr]))
# print(t.get_distance(index, i))
# print("true match prediction:")
# print(model.predict([np.array([x_test[index]]), np.array([y_test[index]])]))
# print("true match distance:")
# print(t.get_distance(index, index + len(x_test_text)))
# print("true reject prediction:")
# print(model.predict([np.array([x_test[index]]), np.array([z_test[index]])]))
# print("true reject distance:")
# print(t.get_distance(index, index + len(x_test_text) + len(y_test_text)))
if y_test_text[index] in nearest_text:
match += 1
# print("MATCH FOUND")
else:
no_match += 1
print("match: {} no_match: {}".format(match, no_match))
print("Machine Learning Accuracy")
print(tr_acc)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1socre on test set: %0.4f' % (te_f1))
reverse_word_index = {v: k for k, v in tokenizer.word_index.items()}
print(tr_pairs)
print(sequence_to_word(tr_pairs[0][0], reverse_word_index))
print(sequence_to_word(tr_pairs[1][1], reverse_word_index))
print(tr_y[0])
test_pairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
[lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
matcher = matcher(args.user, args.password, args.db, test_pairs, 1)
pred_rules = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in tr_pairs])
tr_acc = compute_accuracy(pred_rules, tr_y)
tr_f1 = f1score(pred_rules, tr_y)
pred = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in te_pairs])
pred_rules = np.append(pred_rules, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
print("Rule-Based Accuracy")
print('* Accuracy on training set (rules): %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set (rules): %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1score on test set: %0.4f' % (te_f1))
con, meta = connect(args.user, args.password, args.db)
execute_pairs = []
if 'predictions' in meta.tables:
meta.tables['predictions'].drop(con)
predictions = Table('predictions', meta, Column('name1', String), Column('name2', String), Column('rule_predict', Integer), Column('learning_predict', Float), Column('true_pair', Integer), Column('te_or_tr', String), extend_existing=True)
zipping_string = ('name1', 'name2', 'true_pair', 'rule_predict', 'learning_predict', 'te_or_tr')
print(len(tr_y))
print(len(tr_pairs))
print(len(pred_rules))
print(len(pred_learning))
print(len(te_y))
print(len(te_pairs))
for i in range(len(tr_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(tr_pairs[i][1], reverse_word_index), int(tr_y[i]), int(pred_rules[i]), float(pred_learning[i][0].item()), 'tr'))))
offset = len(tr_y)
for i in range(len(te_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(te_pairs[i][1], reverse_word_index), int(te_y[i]), int(pred_rules[offset + i]), float(pred_learning[offset + i][0].item()), 'te'))))
meta.create_all(con)
con.execute(predictions.insert(), execute_pairs)
| 23,976 | 36.289269 | 258 | py |
fuzzyJoiner | fuzzyJoiner-master/old/Named_Entity_Recognition.py | """
This code is modified from
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
and ttps://github.com/fchollet/keras/blob/master/examples/
for our own purposes
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from matcher_functions import connect, get_aliases, load_good_buckets, create_double_num_dicts
from matcher_class import matcher
import os
from sqlalchemy import Table, Column, Integer, String, ForeignKey, Float
import sys
from sys import argv
import numpy as np
from embeddings import KazumaCharEmbedding
import random
from annoy import AnnoyIndex
import random
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from keras.utils import to_categorical
from keras.optimizers import RMSprop
from keras import backend as K
from keras import regularizers
from keras.regularizers import L1L2
BASE_DIR = './Machine_Learning/'
# directory containing glove encodings from Wikipedia (we can swap this out for another encoding later)
# Download glove.6B.zip from https://nlp.stanford.edu/projects/glove/
GLOVE_DIR = BASE_DIR + 'glove/'
TEXT_DATA_DIR = BASE_DIR + 'nerData'
# number of words an entity is allowed to have
# distribution of number of words in peoples names can be found in peopleNamesDisbn
# distribution of number of words in company names can be found in companyNamesDisbn
# Note most of the names above that are fairly esoteric or just plain noise. Included is
# python code to remove them
MAX_SEQUENCE_LENGTH = 10
# Total number of unique tokens in peoples names is 90K, including a lot of non-English names. To remove those
# we use an egregious hack to check if its UTF-8
# Total number of unique tokens in company names is 37K
# Assuming no overlap between the two we get about 127K. We may need to tweak this parameter as we go
# but according to the Keras documentation, this can even be left unset
MAX_NB_WORDS = 140000
# Size of embeddings from Glove (we will try the 100 dimension encoding to start with)
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.05
def check_for_zeroes(to_check, intro_string):
found = 0
for vector in to_check:
if sum(vector) == 0:
print(intro_string + str(vector))
found += 1
if not found:
print(intro_string + " no problems found")
else:
print(intro_string + ' found this many: '+ str(found))
# first, build index mapping words in the glove embeddings set
# to their embedding vector. This is a straightforward lookup of
# words in Glove and then their embeddings which should be a 100 sized array of floats
# print('Reading word embeddings: Indexing word vectors.')
# embeddings_index = {}
# f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
# for line in f:
# values = line.split()
# word = values[0]
# coefs = np.asarray(values[1:], dtype='float32')
# embeddings_index[word] = coefs
# f.close()
# print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
texts1 = [] # list of text samples in part 1
texts2 = [] # list of text samples in part 2
labels_index = {} # dictionary mapping label name to numeric id - here the label name is just the name of the file in the data dir
name_list = sorted(os.listdir(TEXT_DATA_DIR))
name = name_list[0]
label_id = len(labels_index)
labels_index[name] = label_id
fpath = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(fpath):
raise ValueError('bad data directory')
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
#change to get from sql and not read from file
con, meta = connect(argv[1], argv[2], argv[3])
#load pairs from database
aliases = get_aliases(con, meta)
for pair in aliases:
num_tokens = len(pair[0].strip().split(' ')) + len(pair[1].strip().split(' '))
if 0 < num_tokens < MAX_SEQUENCE_LENGTH:
texts1.append(pair[0])
texts2.append(pair[1])
f.close()
#this returns a new set of texts to use as similar non-matches for texts1
def get_no_match_texts(argv, texts1):
def get_non_match(name1, bucket_words, matching_set):
for word in name1.split(" "):
if word in bucket_words:
bucket = bucket_words[word]
else:
return None
if len(bucket[1]) > 1:
for name2 in bucket[1]:
if (name1, name2[1]) not in matching_set:
return name2[1]
return None
no_match_texts = []
#this should not be done here and needs to be fixed up before more work is done
#it should instead be done by a singel function in matcher_functions
#establish connection to database
con, meta = connect(argv[1], argv[2], argv[3])
#load pairs from database
aliases = get_aliases(con, meta)
#create dictionaries assingning serial numbers to names and names from serial numbers
num_to_word, word_to_num = create_double_num_dicts(aliases)
#load the buckets from the database bucket_list is aranges as follows:
#bucket_list[pair_of_buckets][bucket(this must be 0 or 1)][name (this represents a single name)][0 for number and 1 for pre-procced name]
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
for index in range(len(texts1)):
new_text = get_non_match(texts1[index], bucket_words, aliases)
if new_text == None:
new_text = texts1[(index + 1) % len(texts1)]
no_match_texts.append(new_text)
return no_match_texts
print('Found %s texts.' % len(texts1))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
#gets the special no match charectars
texts3 = get_no_match_texts(argv, texts1)
#this removes all non-ascii charectars from the 3 sets of strings
texts1 = [str(item) for item in texts1]
texts2 = [str(item) for item in texts2]
texts3 = [str(item) for item in texts3]
# for i in range(len(texts1)):
print ("1 is {}".format(texts1[1]))
print ("2 is {}".format(texts2[1]))
print ("3 is {}".format(texts3[1]))
tokenizer.fit_on_texts(texts1 + texts2 + texts3)
#this step should get similar but non-matching items to keep for later matching
# this step creates a sequence of words ids for each word in each label
sequences1 = tokenizer.texts_to_sequences(texts1)
for sc in range(len(texts1)):
if sum(sequences1[sc]) == 0:
print('here is a problem word :' + texts1[sc] + '::')
sequences2 = tokenizer.texts_to_sequences(texts2)
no_match_sequences = tokenizer.texts_to_sequences(texts3)
word_index = tokenizer.word_index
check_for_zeroes(sequences1, " sequences")
print('Found %s unique tokens.' % len(word_index))
annoy_data1 = pad_sequences(sequences1, maxlen=MAX_SEQUENCE_LENGTH)
annoy_data2 = pad_sequences(sequences2, maxlen=MAX_SEQUENCE_LENGTH)
no_match_data = pad_sequences(no_match_sequences, maxlen=MAX_SEQUENCE_LENGTH)
# print (data1[0])
# print (data2[0])
# print (texts1[0])
# print (texts2[0])
#labels = to_categorical(np.asarray(labels))
print('Shape of data1 tensor:', annoy_data1.shape)
#print('Shape of label tensor:', labels.shape)
print('Shape of data2 tensor:', annoy_data2.shape)
# split the data into a training set and a validation set
indices = np.arange(annoy_data1.shape[0])
np.random.shuffle(indices)
data1 = annoy_data1[indices]
data2 = annoy_data2[indices]
no_match_data = no_match_data[indices]
num_validation_samples = int(VALIDATION_SPLIT * data1.shape[0])
x_train = data1[:-num_validation_samples]
y_train = data2[:-num_validation_samples]
z_train = no_match_data[:-num_validation_samples]
x_test = data1[-num_validation_samples:]
y_test = data2[-num_validation_samples:]
z_test = no_match_data[-num_validation_samples:]
texts1 = np.array(texts1)
texts2 = np.array(texts2)
texts3 = np.array(texts3)
texts1 = texts1[indices]
texts2 = texts2[indices]
texts3 = texts3[indices]
print('Preparing embedding matrix.')
# prepare embedding matrix
# num_words = min(MAX_NB_WORDS, len(word_index))
num_words = len(word_index) + 1 # word_index is indexed from 1-N
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
kz = KazumaCharEmbedding()
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
# i = 0
# while sum(embedding_vector) == 0 and i <= 1000:
# embedding_vector = k.emb(word)
# i++;
# if i == 1000:
# print("fail")
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
#else:
# print(word + )
# note that we set trainable = False so as to keep the embeddings fixed
check_for_zeroes(embedding_matrix, "here is the first pass")
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
#sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
#need two of these
#embedded_sequences = embedding_layer(sequence_input)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
#for now will take any bad pairs, will take only relivent ones later
def create_pairs(x, y, z):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
for index in range(len(x)):
pairs += [[x[index], y[index]]]
pairs += [[x[index], z[index]]]
labels += [1, 0]
# n = min([len(digit_indices[d]) for d in range(10)]) - 1
# for d in range(10):
# for i in range(n):
# z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
# pairs += [[x[z1], x[z2]]]
# inc = random.randrange(1, 10)
# dn = (d + inc) % 10
# z1, z2 = digit_indices[d][i], digit_indices[dn][i]
# pairs += [[x[z1], x[z2]]]
# labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim, embedding_layer, reg):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
# kernel_regularizer=reg))
seq.add(Dropout(0.1))
final_layer = Dense(128, activation='relu')
seq.add(final_layer)
# kernel_regularizer=reg))
return seq, final_layer
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return np.mean(np.equal(predictions.ravel() < 0.5, labels))
# return labels[predictions.ravel() < 0.5].mean()
def f1score(predictions, labels):
#labels[predictions.ravel() < 0.5].sum()
predictions = predictions.ravel()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(labels)):
if predictions[i] < 0.5:
if labels[i] == 1:
true_positive += 1
else:
false_positive += 1
elif labels[i] == 1:
false_negitive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
print (fscore)
return fscore
# the data, shuffled and split between train and test sets
#need to change this not sure how
input_dim = MAX_SEQUENCE_LENGTH
epochs = 1
# create training+test positive and negative pairs
# these next lines also need to change
#digit_indices = [np.where(y_train == i)[0] for i in range(10)]
print("x_train {} , y_train {} , z_train {} ".format(x_train, y_train, z_train))
tr_pairs, tr_y = create_pairs(x_train, y_train, z_train)
#digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(x_test, y_test, z_test)
print (len(tr_y))
# network definition
base_network, final_layer = create_base_network(input_dim, embedding_layer, L1L2(0.0,0.0))
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
print(base_network.summary())
# train
rms = RMSprop()
#change the optimizer (adam)
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))
# compute final accuracy on training and test sets
#add an LSTM layer (later)
# testpairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
# [lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
# matcher = matcher(argv[1], argv[2], argv[3], test_pairs, 1)
pred_learning = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
# out = model.layers[2].get_output_at(0)
# inp = model.input
# func = K.function([inp], [out]) # evaluation functions
# print("here should be a vector")
# print(func([tr_pairs[:, 0][0], tr_pairs[:, 1][0]]))
# Testing
# print (layer_outs)
tr_acc = compute_accuracy(pred_learning, tr_y)
tr_f1 = f1score(pred_learning, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
pred_learning = np.append(pred_learning, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
x_test_text = texts1[-num_validation_samples:]
y_test_text = texts2[-num_validation_samples:]
z_test_text = texts3[-num_validation_samples:]
for i in range(len(x_test_text)):
print(x_test_text[i] + "|" + y_test_text[i])
print(pred[i])
print(x_test_text[i] + "|" + z_test_text[i])
print(pred[i + 1])
inter_model = Model(input_a, processed_a)
intermediate_output1 = inter_model.predict(annoy_data1)
intermediate_output2 = inter_model.predict(annoy_data2)
mid_predictions = np.concatenate((intermediate_output1, intermediate_output2))
# from https://github.com/spotify/annoy
f = 128
# print(mid_predictions[0])
# print (len(mid_predictions[0]))
t = AnnoyIndex(f, metric='euclidean') # Length of item vector that will be indexed
for i in range(len(mid_predictions)):
v = mid_predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
t.save('test.ann')
# ...
all_texts = np.concatenate((texts1, texts2))
match = 0
no_match = 0
print("shape of annoy data1")
print(annoy_data1[0].shape)
print(tr_pairs[:, 0].shape)
for index in range(len(texts1)):
nearest = t.get_nns_by_vector(mid_predictions[index], 2)
print("query={} names = {} true_match = {}".format(texts1[index], [all_texts[i] for i in nearest], texts2[index]))
for i in nearest:
print(t.get_distance(index, i))
print(model.predict([np.array([annoy_data1[index]]), np.array([annoy_data2[i - len(annoy_data1)]])]))
print(t.get_distance(index, index + len(texts1)))
print(model.predict([np.array([annoy_data1[index]]), np.array([annoy_data2[index]])]))
if (index + len(texts1)) in nearest:
match += 1
else:
no_match += 1
print("match: {} no_match: {}".format(match, no_match))
print("Machine Learning Accuracy")
print(tr_acc)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1socre on test set: %0.4f' % (te_f1))
#compute accuracy using a rule based matcher
def sequence_to_word(sequence, reverse_word_index):
return " ".join([reverse_word_index[x] for x in sequence if x in reverse_word_index])
def sequence_pair_to_word_pair(sequence_pair, reverse_word_index):
return [sequence_to_word(sequence_pair[0], reverse_word_index), sequence_to_word(sequence_pair[1], reverse_word_index)]
reverse_word_index = {v: k for k, v in tokenizer.word_index.items()}
print(tr_pairs)
print(sequence_to_word(tr_pairs[0][0], reverse_word_index))
print(sequence_to_word(tr_pairs[1][1], reverse_word_index))
print(tr_y[0])
test_pairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
[lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
matcher = matcher(argv[1], argv[2], argv[3], test_pairs, 1)
pred_rules = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in tr_pairs])
tr_acc = compute_accuracy(pred_rules, tr_y)
tr_f1 = f1score(pred_rules, tr_y)
pred = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in te_pairs])
pred_rules = np.append(pred_rules, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
print("Rule-Based Accuracy")
print('* Accuracy on training set (rules): %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set (rules): %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1socre on test set: %0.4f' % (te_f1))
con, meta = connect(argv[1], argv[2], argv[3])
execute_pairs = []
if 'predictions' in meta.tables:
meta.tables['predictions'].drop(con)
predictions = Table('predictions', meta, Column('name1', String), Column('name2', String), Column('rule_predict', Integer), Column('learning_predict', Float), Column('true_pair', Integer), Column('te_or_tr', String), extend_existing=True)
zipping_string = ('name1', 'name2', 'true_pair', 'rule_predict', 'learning_predict', 'te_or_tr')
print(len(tr_y))
print(len(tr_pairs))
print(len(pred_rules))
print(len(pred_learning))
print(len(te_y))
print(len(te_pairs))
for i in range(len(tr_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(tr_pairs[i][1], reverse_word_index), int(tr_y[i]), int(pred_rules[i]), float(pred_learning[i][0].item()), 'tr'))))
offset = len(tr_y)
for i in range(len(te_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(te_pairs[i][1], reverse_word_index), int(te_y[i]), int(pred_rules[offset + i]), float(pred_learning[offset + i][0].item()), 'te'))))
meta.create_all(con)
con.execute(predictions.insert(), execute_pairs)
| 19,852 | 31.176661 | 254 | py |
fuzzyJoiner | fuzzyJoiner-master/old/face_vgg.py | 0 | 0 | 0 | py | |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-modified.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=improved_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,841 | 37.319298 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/ANNCharacteristics.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def characterize_dataset(model, sequences, entity2unique, entity2same, unique_text, nnlens):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
for nnlen in nnlens:
print("Characteristics at neighborhood length:" + str(nnlen))
pos_distances = []
neg_distances = []
match = 0
no_match = 0
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], nnlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
overlap = expected_text.intersection(nearest_text)
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), nnlen - 1) - m
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
pos = expected_text
neg = nearest_text - expected_text
for i in pos:
dist_pos = t.get_distance(index, entity2unique[i])
pos_distances.append(dist_pos)
for i in neg:
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
recall = match / (match + no_match)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("recall:" + str(recall))
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Characterize the dataset')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
entity2same = generate_names(entities, people)
print("number of entities")
print(len(entity2same))
number_of_names = []
for i in entity2same:
number_of_names.append(len(entity2same[i]))
print("mean number of names:" + str(statistics.mean(number_of_names)))
print("max number of names:" + str(max(number_of_names)))
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same)
tokenizer.fit_on_texts(unique_text)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
embedder_model = embedded_representation_model(embedder)
characterize_dataset(embedder_model, sequences, entity2unique, entity2same, unique_text, [20, 100, 500, 1500])
| 19,558 | 35.355019 | 134 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenet.py | from sys import argv
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, LSTM
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
import numpy as np
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
import Named_Entity_Recognition_Modified
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
DEBUG = False
DEBUG_DATA_LENGTH = 1000000
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_random_image(img_groups, group_names, gid):
gname = group_names[gid]
photos = img_groups[gname]
pid = np.random.choice(np.arange(len(photos)), size=1)[0]
pname = photos[pid]
return gname + pname + ".jpg"
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
# i = 0
# while sum(embedding_vector) == 0 and i <= 1000:
# embedding_vector = k.emb(word)
# i++;
# if i == 1000:
# print("fail")
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_tokenizer(texts):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts['anchor'] + texts['negative'] + texts['positive'])
return tokenizer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
#removes the new line charecter at the end
texts['negative'].append(line_array[2][:-1])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def get_test(texts, sequences, percent):
indices = np.arange(sequences['anchor'].shape[0])
np.random.shuffle(indices)
ret_sequence = {}
ret_sequence['anchor'] = sequences['anchor'][indices]
ret_sequence['positive'] = sequences['positive'][indices]
ret_sequence['negative'] = sequences['negative'][indices]
num_validation_samples = int(percent * sequences['anchor'].shape[0])
ret_train = {}
ret_train['anchor'] = ret_sequence['anchor'][:-num_validation_samples]
ret_train['positive'] = ret_sequence['positive'][:-num_validation_samples]
ret_train['negative'] = ret_sequence['negative'][:-num_validation_samples]
ret_test = {}
ret_test['anchor']= ret_sequence['anchor'][-num_validation_samples:]
ret_test['positive']= ret_sequence['positive'][-num_validation_samples:]
ret_test['negative'] = ret_sequence['negative'][-num_validation_samples:]
ret_texts = {}
texts['anchor'] = np.array(texts['anchor'])
texts['positive'] = np.array(texts['positive'])
texts['negative'] = np.array(texts['negative'])
ret_texts['anchor'] = texts['anchor'][indices]
ret_texts['positive'] = texts['positive'][indices]
ret_texts['negative'] = texts['negative'][indices]
return ret_train, ret_test, ret_texts
def triplet_loss(y_true, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def do_annoy(model, texts, tokenizer, verbose):
unique_text = []
entity_idx = []
entity2same = {}
for i in range(len(texts['anchor'])):
if not texts['anchor'][i] in entity2same:
entity2same[texts['anchor'][i]] = []
entity_idx.append(len(unique_text))
unique_text.append(texts['anchor'][i])
l = entity2same[texts['anchor'][i]]
if texts['positive'][i] not in l:
entity2same[texts['anchor'][i]].append(texts['positive'][i])
unique_text.append(texts['positive'][i])
print(entity2same)
print(unique_text)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
for i in range(len(predictions)):
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
for index in entity_idx:
nearest = t.get_nns_by_vector(predictions[index], 5)
print(nearest)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[unique_text[index]])
if unique_text[index] in nearest_text:
nearest_text.remove(unique_text[index])
print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
if verbose:
print([t.get_distance(index, i) for i in nearest])
overlap = expected_text.intersection(nearest_text)
print(overlap)
m = len(overlap)
match += m
no_match += len(expected_text) - m
print("match: {} no_match: {}".format(match, no_match))
def print_deb_data(debbuging_data):
for i in range(debbuging_data['number']):
print('anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['texts']['anchor'][i], debbuging_data['texts']['positive'][i], debbuging_data['texts']['negative'][i]))
print('sequences: anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['sequences']['anchor'][i], debbuging_data['sequences']['positive'][i], debbuging_data['sequences']['negative'][i]))
def debugging_text_and_sequences(reordered_text, training_data, number):
debbuging_data = {}
debbuging_data['number'] = number
debbuging_data['sequences'] = {}
debbuging_data['texts'] = {}
debbuging_data['sequences']['anchor'] = []
debbuging_data['sequences']['positive'] = []
debbuging_data['sequences']['negative'] = []
debbuging_data['texts']['anchor'] = []
debbuging_data['texts']['positive'] = []
debbuging_data['texts']['negative'] = []
for i in range(number):
debbuging_data['texts']['anchor'].append(reordered_text['anchor'][i])
debbuging_data['texts']['positive'].append(reordered_text['positive'][i])
debbuging_data['texts']['negative'].append(reordered_text['negative'][i])
debbuging_data['sequences']['anchor'].append(training_data['anchor'][i])
debbuging_data['sequences']['positive'].append(training_data['positive'][i])
debbuging_data['sequences']['negative'].append(training_data['negative'][i])
return debbuging_data
# triples_data = create_triples(IMAGE_DIR)
texts = read_file(argv[1])
print("anchor: {} positive: {} negative: {}".format(texts['anchor'][0], texts['positive'][0], texts['negative'][0]))
tokenizer = get_tokenizer(texts)
print('got tokenizer')
sequences = get_sequences(texts, tokenizer)
train_data, test_data, reordered_text = get_test(texts, sequences, 0.05)
debbuging_data = debugging_text_and_sequences(reordered_text, train_data, 20)
number_of_names = len(train_data['anchor'])
print('sequenced words')
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
embedder = get_embedding_layer(tokenizer)
print('got embeddings')
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = Flatten(name='flatten')(net)
net = Dense(128, activation='relu', name='embed')(net)
net = Dense(128, activation='relu', name='embed2')(net)
net = Dense(128, activation='relu', name='embed3')(net)
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
print(base_model.summary())
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist')([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist')([net_anchor, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists'
)([positive_dist, negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=triplet_loss, metrics=[accuracy])
model.fit([train_data['anchor'], train_data['positive'], train_data['negative']], Y_train, epochs=10, batch_size=15, validation_split=0.2)
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
print("training data predictions")
positives = test_positive_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
negatives = test_negative_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
negatives = test_negative_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
# model.save('triplet_loss_resnet50.h5')
inter_model = Model(input_anchor, net_anchor)
do_annoy(inter_model, texts, tokenizer, False)
print('annoy on embeddings for debbuging_data')
do_annoy(Named_Entity_Recognition_Modified.embedded_representation(embedder), debbuging_data['texts'], tokenizer, True)
print('annoy on full model for debbuging_data')
do_annoy(inter_model, debbuging_data['texts'], tokenizer, True)
print_deb_data(debbuging_data) | 12,095 | 37.893891 | 199 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.31.18.py | from random import shuffle
import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print(str("failed to find embedding for:" + word).encode('utf-8'))
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r', encoding='utf8')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_semi_hard_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
triplets = {}
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
expected_text = set(entity2same[key])
expected_ids = [entity2unique[i] for i in expected_text]
for positive in expected_text:
k = entity2unique[positive]
nearest = t.get_nns_by_vector(predictions[k], NNlen)
dist_k = t.get_distance(index, k)
semi_hards = []
for n in nearest:
if n == index or n in expected_ids or n == k:
continue
n_dist = t.get_distance(index, n)
if n_dist > dist_k:
semi_hards.append(unique_text[n])
# shuffle(semi_hards)
# semi_hards = semi_hards[0:20]
for i in semi_hards:
triplets['anchor'].append(key)
triplets['positive'].append(unique_text[k])
triplets['negative'].append(i)
return triplets
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(str(train).encode('utf-8'))
print("TEST")
print(str(test).encode('utf-8'))
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(str(entity2same_train).encode('utf-8'))
print(str(entity2same_test).encode('utf-8'))
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
triplets = generate_triplets_from_ANN(embedder_model, sequences, entity2unique, entity2same_train, unique_text, True)
print(len(triplets['anchor']))
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, train_match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 22,999 | 35.624204 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/image_join2.py | 0 | 0 | 0 | py | |
fuzzyJoiner | fuzzyJoiner-master/old/seq2seqTriplet.py | '''Sequence to sequence example in Keras (character-level).
This script demonstrates how to implement a basic character-level
sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine translation, as word-level
models are more common in this domain.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and correspding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences, we:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence character)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next character
- Sample the next character using these predictions
(we simply use argmax).
- Append the sampled character to the target sequence
- Repeat until we generate the end-of-sequence character or we
hit the character limit.
# Data download
English to French sentence pairs.
http://www.manythings.org/anki/fra-eng.zip
Lots of neat sentence pairs datasets can be found at:
http://www.manythings.org/anki/
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
from keras.callbacks import ModelCheckpoint, EarlyStopping
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 100000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = 'tripletTranslate'
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
arr = line.split('|')
input_text = arr[0]
target_text = arr[1]
arr = input_text.split()
# data quality issues. In the canonical name, the first name had better not be just a letter or empty
if arr[0].strip()=='' or len(arr[0].strip()) <= 1:
continue
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
filepath = 's2s.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
# check 5 epochs
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks_list,
validation_split=0.2)
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(100):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence) | 9,670 | 39.634454 | 106 | py |
fuzzyJoiner | fuzzyJoiner-master/old/image_join.py | 0 | 0 | 0 | py | |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM_hpo.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
obj = {}
obj['accuracy'] = accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 19,474 | 35.88447 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-schroffloss.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 20000
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=schroff_triplet_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,846 | 37.463028 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/file_parser.py | from sys import argv
onlyfiles = [f for f in listdir(args[1]) if isfile(join(args[1], f))]
for file_path in onlyfiles:
input_file = open(args[1] + "/" + file_path, encoding='utf-8')
output_file = open(argv[2] + "/" + file_path, "w", encoding='utf-8')
write_line = False
for line in input_file:
if write_line:
items = line.split(" ")
output_file.write(items[2] + " | " + items[20] + "\n")
else:
write_line = True
f.close()
output_file.close() | 460 | 31.928571 | 69 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.29.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,235 | 36.061082 | 163 | py |
smt | smt-master/setup.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
Remi Lafage <remi.lafage@onera.fr>
Lucas Alber <lucasd.alber@gmail.com>
This package is distributed under New BSD license.
"""
from setuptools import setup, Extension
import sys
import numpy as np
from Cython.Build import cythonize
from smt import __version__
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: C++
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS
"""
LONG_DESCRIPTION = """
The surrogate modeling toolbox (SMT) is a Python package that contains
a collection of surrogate modeling methods, sampling techniques, and
benchmarking functions. This package provides a library of surrogate
models that is simple to use and facilitates the implementation of additional methods.
SMT is different from existing surrogate modeling libraries because of
its emphasis on derivatives, including training derivatives used for
gradient-enhanced modeling, prediction derivatives, and derivatives
with respect to the training data. It also includes new surrogate models
that are not available elsewhere: kriging by partial-least squares reduction
and energy-minimizing spline interpolation.
"""
extra_compile_args = []
if not sys.platform.startswith("win"):
extra_compile_args.append("-std=c++11")
ext = (
cythonize(
Extension(
"smt.surrogate_models.rbfclib",
sources=["smt/src/rbf/rbf.cpp", "smt/src/rbf/rbfclib.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=[np.get_include()],
)
)
+ cythonize(
Extension(
"smt.surrogate_models.idwclib",
sources=["smt/src/idw/idw.cpp", "smt/src/idw/idwclib.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=[np.get_include()],
)
)
+ cythonize(
Extension(
"smt.surrogate_models.rmtsclib",
sources=[
"smt/src/rmts/rmtsclib.pyx",
"smt/src/rmts/utils.cpp",
"smt/src/rmts/rmts.cpp",
"smt/src/rmts/rmtb.cpp",
"smt/src/rmts/rmtc.cpp",
],
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=[np.get_include()],
)
)
)
metadata = dict(
name="smt",
version=__version__,
description="The Surrogate Modeling Toolbox (SMT)",
long_description=LONG_DESCRIPTION,
author="Mohamed Amine Bouhlel et al.",
author_email="mbouhlel@umich.edu",
license="BSD-3",
classifiers=[_f for _f in CLASSIFIERS.split("\n") if _f],
packages=[
"smt",
"smt.surrogate_models",
"smt.problems",
"smt.sampling_methods",
"smt.utils",
"smt.utils.neural_net",
"smt.applications",
],
install_requires=[
"scikit-learn",
"pyDOE2",
"scipy",
],
extras_require={
"numba": [ # pip install smt[numba]
"numba~=0.56.4",
],
},
python_requires=">=3.7",
zip_safe=False,
ext_modules=ext,
url="https://github.com/SMTorg/smt", # use the URL to the github repo
download_url="https://github.com/SMTorg/smt/releases",
)
setup(**metadata)
| 3,762 | 29.346774 | 87 | py |
smt | smt-master/smt/__init__.py | __version__ = "2.0"
| 20 | 9.5 | 19 | py |
smt | smt-master/smt/examples/run_examples.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
from scipy import linalg
from smt.utils import compute_rms_error
from smt.problems import Sphere, NdimRobotArm
from smt.sampling_methods import LHS
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, DesignSpace
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
try:
import matplotlib.pyplot as plt
plot_status = True
except:
plot_status = False
########### Initialization of the problem, construction of the training and validation points
ndim = 10
ndoe = int(10 * ndim)
# Define the function
fun = Sphere(ndim=ndim)
# Construction of the DOE
sampling = LHS(xlimits=fun.xlimits, criterion="m")
xt = sampling(ndoe)
# Compute the output
yt = fun(xt)
# Compute the gradient
for i in range(ndim):
yd = fun(xt, kx=i)
yt = np.concatenate((yt, yd), axis=1)
# Construction of the validation points
ntest = 500
sampling = LHS(xlimits=fun.xlimits)
xtest = sampling(ntest)
ytest = fun(xtest)
ydtest = np.zeros((ntest, ndim))
for i in range(ndim):
ydtest[:, i] = fun(xtest, kx=i).T
########### The LS model
# Initialization of the model
t = LS(print_prediction=False)
# Add the DOE
t.set_training_values(xt, yt[:, 0])
# Train the model
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("LS, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the LS model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"LS, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The QP model
t = QP(print_prediction=False)
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("QP, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the QP model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"QP, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The Kriging model
# The variable 'theta0' is a list of length ndim.
t = KRG(theta0=[1e-2] * ndim, print_prediction=False)
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("Kriging, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the Kriging model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"Kriging, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The KPLS model
# The variables 'name' must be equal to 'KPLS'. 'n_comp' and 'theta0' must be
# an integer in [1, ndim[ and a list of length n_comp, respectively. Here is an
# an example using 2 principal components.
t = KPLS(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False)
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("KPLS, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the KPLS model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"KPLS, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
# KPLS + absolute exponential correlation kernel
# The variables 'name' must be equal to 'KPLS'. 'n_comp' and 'theta0' must be
# an integer in [1,ndim[ and a list of length n_comp, respectively. Here is an
# an example using 2 principal components.
t = KPLS(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False, corr="abs_exp")
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("KPLS + abs exp, err: " + str(compute_rms_error(t, xtest, ytest)))
########### The KPLSK model
# 'n_comp' and 'theta0' must be an integer in [1, ndim[ and a list of length n_comp, respectively.
t = KPLSK(n_comp=2, theta0=[1e-2, 1e-2], print_prediction=False)
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("KPLSK, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the KPLSK model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"KPLSK, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The GEKPLS model using 1 approximating points
# 'n_comp' and 'theta0' must be an integer in [1,ndim[ and a list of length n_comp, respectively.
design_space = DesignSpace(fun.xlimits)
t = GEKPLS(
n_comp=1,
theta0=[1e-2],
design_space=design_space,
delta_x=1e-2,
extra_points=1,
print_prediction=False,
)
t.set_training_values(xt, yt[:, 0])
# Add the gradient information
for i in range(ndim):
t.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)), i)
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("GEKPLS1, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the GEKPLS1 model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"GEKPLS1, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The GEKPLS model using 2 approximating points
# 'n_comp' and 'theta0' must be an integer in [1,ndim[ and a list of length n_comp, respectively.
t = GEKPLS(
n_comp=1,
theta0=[1e-2],
xlimits=fun.xlimits,
delta_x=1e-4,
extra_points=2,
print_prediction=False,
)
t.set_training_values(xt, yt[:, 0])
# Add the gradient information
for i in range(ndim):
t.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)), i)
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("GEKPLS2, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the GEKPLS2 model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"GEKPLS2, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
if compiled_available:
########### The IDW model
t = IDW(print_prediction=False)
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("IDW, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
plt.figure()
plt.plot(ytest, ytest, "-.")
plt.plot(ytest, y, ".")
plt.xlabel(r"$y_{true}$")
plt.ylabel(r"$\hat{y}$")
plt.title("Validation of the IDW model")
plt.show()
########### The RBF model
t = RBF(print_prediction=False, poly_degree=0)
t.set_training_values(xt, yt[:, 0])
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("RBF, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(4, 3)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[3, 2].arrow(0.3, 0.3, 0.2, 0)
axarr[3, 2].arrow(0.3, 0.3, 0.0, 0.4)
axarr[3, 2].text(0.25, 0.4, r"$\hat{y}$")
axarr[3, 2].text(0.35, 0.15, r"$y_{true}$")
axarr[3, 2].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[3, 2].get_xticklabels(), visible=False)
plt.setp(axarr[3, 2].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the RBF model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:10"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"RBF, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 2:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The RMTB and RMTC models are suitable for low-dimensional problems
# Initialization of the problem
ndim = 3
ndoe = int(250 * ndim)
# Define the function
fun = NdimRobotArm(ndim=ndim)
# Construction of the DOE
sampling = LHS(xlimits=fun.xlimits)
xt = sampling(ndoe)
# Compute the output
yt = fun(xt)
# Compute the gradient
for i in range(ndim):
yd = fun(xt, kx=i)
yt = np.concatenate((yt, yd), axis=1)
# Construction of the validation points
ntest = 500
sampling = LHS(xlimits=fun.xlimits)
xtest = sampling(ntest)
ytest = fun(xtest)
########### The RMTB model
t = RMTB(
xlimits=fun.xlimits,
min_energy=True,
nonlinear_maxiter=20,
print_prediction=False,
)
t.set_training_values(xt, yt[:, 0])
# Add the gradient information
for i in range(ndim):
t.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)), i)
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("RMTB, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(3, 2)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[2, 0].arrow(0.3, 0.3, 0.2, 0)
axarr[2, 0].arrow(0.3, 0.3, 0.0, 0.4)
axarr[2, 0].text(0.25, 0.4, r"$\hat{y}$")
axarr[2, 0].text(0.35, 0.15, r"$y_{true}$")
axarr[2, 0].axis("off")
axarr[2, 1].set_visible(False)
axarr[2, 1].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[2, 0].get_xticklabels(), visible=False)
plt.setp(axarr[2, 0].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the RMTB model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:3"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"RMTB, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 1:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
########### The RMTC model
t = RMTC(
xlimits=fun.xlimits,
min_energy=True,
nonlinear_maxiter=20,
print_prediction=False,
)
t.set_training_values(xt, yt[:, 0])
# Add the gradient information
for i in range(ndim):
t.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)), i)
t.train()
# Prediction of the validation points
y = t.predict_values(xtest)
print("RMTC, err: " + str(compute_rms_error(t, xtest, ytest)))
if plot_status:
k, l = 0, 0
f, axarr = plt.subplots(3, 2)
axarr[k, l].plot(ytest, ytest, "-.")
axarr[k, l].plot(ytest, y, ".")
l += 1
axarr[2, 0].arrow(0.3, 0.3, 0.2, 0)
axarr[2, 0].arrow(0.3, 0.3, 0.0, 0.4)
axarr[2, 0].text(0.25, 0.4, r"$\hat{y}$")
axarr[2, 0].text(0.35, 0.15, r"$y_{true}$")
axarr[2, 0].axis("off")
axarr[2, 1].set_visible(False)
axarr[2, 1].axis("off")
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
plt.setp(axarr[2, 0].get_xticklabels(), visible=False)
plt.setp(axarr[2, 0].get_yticklabels(), visible=False)
plt.suptitle(
"Validation of the RMTC model (from left to right then from top to bottom): validation of the prediction model and the i-th prediction of the derivative---i=1:3"
)
# Prediction of the derivatives with regards to each direction space
yd_prediction = np.zeros((ntest, ndim))
for i in range(ndim):
yd_prediction[:, i] = t.predict_derivatives(xtest, kx=i).T
print(
"RMTC, err of the "
+ str(i)
+ "-th derivative: "
+ str(compute_rms_error(t, xtest, ydtest[:, i], kx=i))
)
if plot_status:
axarr[k, l].plot(ydtest[:, i], ydtest[:, i], "-.")
axarr[k, l].plot(ydtest[:, i], yd_prediction[:, i], ".")
if l == 1:
l = 0
k += 1
else:
l += 1
if plot_status:
plt.show()
| 21,474 | 30.259098 | 173 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.