repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
StyleFusion | StyleFusion-master/src/shared.py | import os, random, sys, datetime, time, socket, io, h5py, argparse, shutil, io
import queue
import numpy as np
import scipy
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
import matplotlib.pyplot as plt
"""
AUTHOR:
Sean Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
SOS_token = '_SOS_'
EOS_token = '_EOS_'
UNK_token = '_UNK_'
# just for arxiv
EQN_token = '_eqn_'
CITE_token = '_cite_'
IX_token = '_ix_'
hostname = socket.gethostname()
SKIP_VIS = hostname not in ['MININT-3LHNLKS', 'xiag-0228']
FIT_VERBOSE = 1
DATA_PATH = 'data/'
OUT_PATH = 'out/'
print('@'*20)
print('hostname: %s'%hostname)
print('data_path: %s'%DATA_PATH)
print('out_path: %s'%OUT_PATH)
print('@'*20)
PHILLY = False
BATCH_SIZE = 128#256
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', type=str, default='toy')
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--max_n_trained', type=int, default=int(10e6))
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--token_embed_dim', type=int, default=1000)
parser.add_argument('--rnn_units', type=int, default=1000)
parser.add_argument('--encoder_depth', type=int, default=2)
parser.add_argument('--decoder_depth', type=int, default=2)
parser.add_argument('--stddev', type=float, default=0.1)
parser.add_argument('--wt_dist', type=float, default=1.)
parser.add_argument('--debug','-d', action='store_true')
parser.add_argument('--max_ctxt_len', type=int, default=90)
parser.add_argument('--max_resp_len', type=int, default=30)
parser.add_argument('--fld_suffix', type=str, default='')
parser.add_argument('--conv_mix_ratio', type=float, default=0.0)
parser.add_argument('--nonc_mix_ratio', type=float, default=1.0)
parser.add_argument('--clf_name', type=str, default='holmes')
parser.add_argument('--model_class', type=str, default='fuse')
parser.add_argument('--restore', type=str, default='')
parser.add_argument('--noisy_vocab', type=int, default=-1)
parser.add_argument('--reld', action='store_true')
parser.add_argument('--ablation', '-abl', action='store_true')
parser.add_argument('--path_test', type=str)
if hostname == 'MININT-3LHNLKS':
parser.add_argument('--cpu_only', '-c', action='store_true')
def reset_rand(RAND_SEED=9):
random.seed(RAND_SEED)
np.random.seed(RAND_SEED)
reset_rand()
def str2bool(s):
return {'true':True, 'false':False}[s.lower()]
def strmap(x):
if 'nan' in str(x):
return 'nan'
if isinstance(x, str):
return x
if int(x) == x:
return '%i'%x
return '%.4f'%x
def int2str(i):
if i < 1000:
return str(i)
else:
k = i/1000
if int(k) == k:
return '%ik'%k
else:
return '%.1fk'%k
def makedirs(fld):
if not os.path.exists(fld):
os.makedirs(fld)
def rand_latent(center, r, limit=True):
if r == 0:
return center
noise = np.random.normal(size=center.shape)
r_raw = np.sqrt(np.sum(np.power(noise, 2)))
sampled = center + noise/r_raw*r
if limit:
return np.minimum(1, np.maximum(-1, sampled))
else:
return sampled
def calc_nltk_bleu(ref, hyp, max_ngram=4, smoothing_function=None):
return sentence_bleu(
[ref.split()],
hyp.split(),
weights=[1./max_ngram]*max_ngram,
smoothing_function=smoothing_function,
)
def calc_nltk_bleu_smoothed(ref, hyp, max_ngram=4):
return calc_nltk_bleu(ref, hyp, max_ngram,
smoothing_function=SmoothingFunction().method7)
def euc_dist(a, b):
# Euclidean distance
if len(a.shape) == 1:
return np.sqrt(np.sum(np.power(a - b, 2)))
return np.sqrt(np.sum(np.power(a - b, 2), axis=1))
def now():
return datetime.datetime.now()
| 3,632 | 25.326087 | 78 | py |
StyleFusion | StyleFusion-master/data/arXiv/arxiv.py | """
AUTHOR:
Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
import re, os, subprocess
from nltk.tokenize import TweetTokenizer
EQN_token = '_eqn_'
CITE_token = '_cite_'
IX_token = '_ix_'
MAX_UTT_LEN = 30 # maximum length of utterance allowed. if longer, ignore
def norm_sentence(txt):
txt = txt.lower()
# url and tag
words = []
for word in txt.lower().split():
if word[0] == '#': # don't allow tag
continue
i = word.find('http')
if i >= 0:
word = word[:i] + ' ' + '__url__'
words.append(word.strip())
txt = ' '.join(words)
# remove illegal char
txt = txt.replace(chr(92),'') # chr(92) = '\'. as twitter has 'b\/c' rather than 'b/c'
txt = txt.replace("b/c","because").replace('j/k','just kidding').replace('w/o','without').replace('w/','with')
txt = re.sub('__mention__','MENTION',txt)
txt = re.sub('__url__','URL',txt)
txt = re.sub(r"[^A-Za-z0-9():,.!?_'“” ]", " ", txt)
txt = re.sub('MENTION','__mention__',txt)
txt = re.sub('URL','__url__',txt)
# contraction
add_space = ["'s", "'m", "'re", "n't", "'ll","'ve","'d","'em"]
tokenizer = TweetTokenizer(preserve_case=False)
txt = ' ' + ' '.join(tokenizer.tokenize(txt)) + ' '
txt = txt.replace(" won't ", " will n't ")
txt = txt.replace(" can't ", " can n't ")
for a in add_space:
txt = txt.replace(a+' ', ' '+a+' ')
# remove un-necessary space
return ' '.join(txt.split())
def arxiv_del_bib(path):
lines = []
for line in open(path, encoding='utf-8', errors='ignore'):
stop = False
for c in ['begin{references}', 'begin{enumerate}', 'begin{thebibliography}']:
if c in line:
stop = True
if stop:
lines.append(chr(92) + 'end{document}\n\n')
break
else:
lines.append(line.strip('\n'))
with open(path+'.delbib', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
def arxiv_pandoc(fld):
# preprocess arxiv latex file with pandoc
# http://www.cs.cornell.edu/projects/kddcup/datasets.html
# http://pandoc.org/index.html
n = 0
for fname in os.listdir(fld):
if '.' in fname:
continue
path = fld + '/' + fname
arxiv_del_bib(path)
cmd = [
'pandoc',
'-f', 'latex',
'-o', path + '_pandoc.txt',
path + '.delbib'
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process.communicate()
print('='*10)
print(path)
n += 1
#if n == 100:
# break
def arxiv_paragraph(path):
def lines2paragraph(lines):
p = ' '.join(lines)
if len(p) == 0:
return None
if not p[0].isalpha():
return None
return arxiv_clean(p)
paragraphs = []
lines = []
for line in open(path, encoding='utf-8', errors='ignore'):
line = line.strip('\n').strip()
if len(line) == 0:
paragraph = lines2paragraph(lines)
if paragraph is not None:
paragraphs.append(paragraph)
lines = []
else:
if len(re.sub(r"[^A-Za-z0-9]", "", line)) > 0:
lines.append(line)
paragraph = lines2paragraph(lines)
if paragraph is not None:
paragraphs.append(paragraph)
with open(path+'.paragraph','w', encoding='utf-8') as f:
f.write('\n'.join(paragraphs))
def arxiv_clean(p):
# deal with equations and citations
is_math = False
i = 0
s = ''
while i < len(p):
flag = False
if p[i] == '$':
flag = True
if i + 1 < len(p) and p[i+1] == '$':
i = i+1
if flag:
if not is_math:
s += ' %s '%EQN_token
is_math = not is_math
elif not is_math:
s += p[i]
i += 1
ww = []
for w in s.split():
if w == EQN_token:
if len(ww) > 0 and ww[-1] == EQN_token:
continue
if len(ww) > 1 and ww[-1] == '.' and ww[-2] == EQN_token:
continue
if chr(92)+'[' in w or chr(92) + ']' in w or '[@' in w or w.startswith('@'):
# citation
w_ = CITE_token
if w[-1]!=']':
w_ += w[-1]
w = w_
if len(ww) > 0 and ww[-1].startswith(CITE_token):
continue
else:
if w[0] == '(' and len(w) > 1 and w[1].isnumeric():
w_ = IX_token
if w[-1]!=')':
w_ += w[-1]
w = w_
w = w.replace('[**','').replace('[*','').replace('**]','').replace('*]','')
ww.append(w)
return ' '.join(ww)
def arxiv_paragraph_all(fld):
n = 0
for fname in os.listdir(fld):
if fname.endswith('_pandoc.txt'):
path = fld + '/' + fname
print(path)
arxiv_paragraph(path)
n += 1
def arxiv_utts(path):
utts = []
for p in open(path, encoding='utf-8'):
p = p.strip('\n').replace(chr(92),'. ')
for utt in p.split('. '):
utt += '.'
alpha = re.sub(r"[^a-z]", "", utt.lower())
if len(alpha) < 5:
continue
utt = norm_sentence(utt)
if len(utt.split()) > MAX_UTT_LEN:
continue
utts.append(utt)
with open(path+'.utt', 'w', encoding='utf-8') as f:
f.write('\n'.join(utts))
return utts
def arxiv_utts_all(fld):
utts = []
n = 0
for fname in os.listdir(fld):
if fname.endswith('.paragraph'):
path = fld + '/' + fname
print(path)
utts_ = arxiv_utts(path)
for utt in utts_:
if len(utt) > 10:
utts.append(utt)
utts = sorted(utts)
with open(fld + '/../all.utt', 'w', encoding='utf-8') as f:
f.write('\n'.join(utts))
def arxiv_filter(path):
# make sure: 1) starts with some word, 2) longer than some min
lines = []
for line in open(path, encoding='utf-8'):
ww = line.strip('\n').replace('“','"').replace('”','"').split()
i0 = None
for i in range(len(ww)):
alpha = re.sub(r"[^a-z]", "", ww[i].lower())
if len(alpha) > 1:
i0 = i
break
if i0 is None:
continue
ww = ww[i0:]
if len(re.sub(r"[^a-z]", "", ''.join(ww))) > 5:
lines.append(' '.join(ww))
with open(path+'.filtered', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
years = range(1998, 2002+1)
for year in years:
fld = 'hep-th-%i/%i/'%(year, year)
arxiv_pandoc(fld)
arxiv_paragraph_all(fld)
arxiv_utts_all(fld)
for year in years:
print(year)
arxiv_filter('hep-th-%i/all.utt'%(year))
| 5,797 | 22.860082 | 111 | py |
NBFNet | NBFNet-master/script/run.py | import os
import sys
import math
import pprint
import torch
from torchdrug import core
from torchdrug.utils import comm
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from nbfnet import dataset, layer, model, task, util
def train_and_validate(cfg, solver):
if cfg.train.num_epoch == 0:
return
step = math.ceil(cfg.train.num_epoch / 10)
best_result = float("-inf")
best_epoch = -1
for i in range(0, cfg.train.num_epoch, step):
kwargs = cfg.train.copy()
kwargs["num_epoch"] = min(step, cfg.train.num_epoch - i)
solver.model.split = "train"
solver.train(**kwargs)
solver.save("model_epoch_%d.pth" % solver.epoch)
solver.model.split = "valid"
metric = solver.evaluate("valid")
result = metric[cfg.metric]
if result > best_result:
best_result = result
best_epoch = solver.epoch
solver.load("model_epoch_%d.pth" % best_epoch)
return solver
def test(cfg, solver):
solver.model.split = "valid"
solver.evaluate("valid")
solver.model.split = "test"
solver.evaluate("test")
if __name__ == "__main__":
args, vars = util.parse_args()
cfg = util.load_config(args.config, context=vars)
working_dir = util.create_working_directory(cfg)
torch.manual_seed(args.seed + comm.get_rank())
logger = util.get_root_logger()
if comm.get_rank() == 0:
logger.warning("Config file: %s" % args.config)
logger.warning(pprint.pformat(cfg))
dataset = core.Configurable.load_config_dict(cfg.dataset)
solver = util.build_solver(cfg, dataset)
train_and_validate(cfg, solver)
test(cfg, solver)
| 1,690 | 25.421875 | 64 | py |
NBFNet | NBFNet-master/script/visualize.py | import os
import sys
import pprint
import torch
from torchdrug import core
from torchdrug.utils import comm
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from nbfnet import dataset, layer, model, task, util
vocab_file = os.path.join(os.path.dirname(__file__), "../data/fb15k237_entity.txt")
vocab_file = os.path.abspath(vocab_file)
def load_vocab(dataset):
entity_mapping = {}
with open(vocab_file, "r") as fin:
for line in fin:
k, v = line.strip().split("\t")
entity_mapping[k] = v
entity_vocab = [entity_mapping[t] for t in dataset.entity_vocab]
relation_vocab = ["%s (%d)" % (t[t.rfind("/") + 1:].replace("_", " "), i)
for i, t in enumerate(dataset.relation_vocab)]
return entity_vocab, relation_vocab
def visualize_path(solver, triplet, entity_vocab, relation_vocab):
num_relation = len(relation_vocab)
h, t, r = triplet.tolist()
triplet = torch.as_tensor([[h, t, r]], device=solver.device)
inverse = torch.as_tensor([[t, h, r + num_relation]], device=solver.device)
solver.model.eval()
pred, (mask, target) = solver.model.predict_and_target(triplet)
pos_pred = pred.gather(-1, target.unsqueeze(-1))
rankings = torch.sum((pos_pred <= pred) & mask, dim=-1) + 1
rankings = rankings.squeeze(0)
logger.warning("")
samples = (triplet, inverse)
for sample, ranking in zip(samples, rankings):
h, t, r = sample.squeeze(0).tolist()
h_name = entity_vocab[h]
t_name = entity_vocab[t]
r_name = relation_vocab[r % num_relation]
if r >= num_relation:
r_name += "^(-1)"
logger.warning(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
logger.warning("rank(%s | %s, %s) = %g" % (t_name, h_name, r_name, ranking))
paths, weights = solver.model.visualize(sample)
for path, weight in zip(paths, weights):
triplets = []
for h, t, r in path:
h_name = entity_vocab[h]
t_name = entity_vocab[t]
r_name = relation_vocab[r % num_relation]
if r >= num_relation:
r_name += "^(-1)"
triplets.append("<%s, %s, %s>" % (h_name, r_name, t_name))
logger.warning("weight: %g\n\t%s" % (weight, " ->\n\t".join(triplets)))
if __name__ == "__main__":
args, vars = util.parse_args()
cfg = util.load_config(args.config, context=vars)
working_dir = util.create_working_directory(cfg)
torch.manual_seed(args.seed + comm.get_rank())
logger = util.get_root_logger()
logger.warning("Config file: %s" % args.config)
logger.warning(pprint.pformat(cfg))
if cfg.dataset["class"] != "FB15k237":
raise ValueError("Visualization is only implemented for FB15k237")
dataset = core.Configurable.load_config_dict(cfg.dataset)
solver = util.build_solver(cfg, dataset)
entity_vocab, relation_vocab = load_vocab(dataset)
for i in range(500):
visualize_path(solver, solver.test_set[i], entity_vocab, relation_vocab)
| 3,106 | 34.306818 | 84 | py |
NBFNet | NBFNet-master/nbfnet/layer.py | import torch
from torch import nn
from torch.nn import functional as F
from torch_scatter import scatter_add, scatter_mean, scatter_max, scatter_min
from torchdrug import layers
from torchdrug.layers import functional
class GeneralizedRelationalConv(layers.MessagePassingBase):
eps = 1e-6
message2mul = {
"transe": "add",
"distmult": "mul",
}
def __init__(self, input_dim, output_dim, num_relation, query_input_dim, message_func="distmult",
aggregate_func="pna", layer_norm=False, activation="relu", dependent=True):
super(GeneralizedRelationalConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_relation = num_relation
self.query_input_dim = query_input_dim
self.message_func = message_func
self.aggregate_func = aggregate_func
self.dependent = dependent
if layer_norm:
self.layer_norm = nn.LayerNorm(output_dim)
else:
self.layer_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if self.aggregate_func == "pna":
self.linear = nn.Linear(input_dim * 13, output_dim)
else:
self.linear = nn.Linear(input_dim * 2, output_dim)
if dependent:
self.relation_linear = nn.Linear(query_input_dim, num_relation * input_dim)
else:
self.relation = nn.Embedding(num_relation, input_dim)
def message(self, graph, input):
assert graph.num_relation == self.num_relation
batch_size = len(graph.query)
node_in, node_out, relation = graph.edge_list.t()
if self.dependent:
relation_input = self.relation_linear(graph.query).view(batch_size, self.num_relation, self.input_dim)
else:
relation_input = self.relation.weight.expand(batch_size, -1, -1)
relation_input = relation_input.transpose(0, 1)
node_input = input[node_in]
edge_input = relation_input[relation]
if self.message_func == "transe":
message = edge_input + node_input
elif self.message_func == "distmult":
message = edge_input * node_input
elif self.message_func == "rotate":
node_re, node_im = node_input.chunk(2, dim=-1)
edge_re, edge_im = edge_input.chunk(2, dim=-1)
message_re = node_re * edge_re - node_im * edge_im
message_im = node_re * edge_im + node_im * edge_re
message = torch.cat([message_re, message_im], dim=-1)
else:
raise ValueError("Unknown message function `%s`" % self.message_func)
message = torch.cat([message, graph.boundary])
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
node_out = torch.cat([node_out, torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
edge_weight = edge_weight.unsqueeze(-1).unsqueeze(-1)
degree_out = graph.degree_out.unsqueeze(-1).unsqueeze(-1) + 1
if self.aggregate_func == "sum":
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
elif self.aggregate_func == "mean":
update = scatter_mean(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
elif self.aggregate_func == "max":
update = scatter_max(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)[0]
elif self.aggregate_func == "pna":
mean = scatter_mean(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
sq_mean = scatter_mean(message ** 2 * edge_weight, node_out, dim=0, dim_size=graph.num_node)
max = scatter_max(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)[0]
min = scatter_min(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)[0]
std = (sq_mean - mean ** 2).clamp(min=self.eps).sqrt()
features = torch.cat([mean.unsqueeze(-1), max.unsqueeze(-1), min.unsqueeze(-1), std.unsqueeze(-1)], dim=-1)
features = features.flatten(-2)
scale = degree_out.log()
scale = scale / scale.mean()
scales = torch.cat([torch.ones_like(scale), scale, 1 / scale.clamp(min=1e-2)], dim=-1)
update = (features.unsqueeze(-1) * scales.unsqueeze(-2)).flatten(-2)
else:
raise ValueError("Unknown aggregation function `%s`" % self.aggregate_func)
return update
def message_and_aggregate(self, graph, input):
if graph.requires_grad or self.message_func == "rotate":
return super(GeneralizedRelationalConv, self).message_and_aggregate(graph, input)
assert graph.num_relation == self.num_relation
batch_size = len(graph.query)
input = input.flatten(1)
boundary = graph.boundary.flatten(1)
degree_out = graph.degree_out.unsqueeze(-1) + 1
if self.dependent:
relation_input = self.relation_linear(graph.query).view(batch_size, self.num_relation, self.input_dim)
relation_input = relation_input.transpose(0, 1).flatten(1)
else:
relation_input = self.relation.weight.repeat(1, batch_size)
adjacency = graph.adjacency.transpose(0, 1)
if self.message_func in self.message2mul:
mul = self.message2mul[self.message_func]
else:
raise ValueError("Unknown message function `%s`" % self.message_func)
if self.aggregate_func == "sum":
update = functional.generalized_rspmm(adjacency, relation_input, input, sum="add", mul=mul)
update = update + boundary
elif self.aggregate_func == "mean":
update = functional.generalized_rspmm(adjacency, relation_input, input, sum="add", mul=mul)
update = (update + boundary) / degree_out
elif self.aggregate_func == "max":
update = functional.generalized_rspmm(adjacency, relation_input, input, sum="max", mul=mul)
update = torch.max(update, boundary)
elif self.aggregate_func == "pna":
sum = functional.generalized_rspmm(adjacency, relation_input, input, sum="add", mul=mul)
sq_sum = functional.generalized_rspmm(adjacency, relation_input ** 2, input ** 2, sum="add", mul=mul)
max = functional.generalized_rspmm(adjacency, relation_input, input, sum="max", mul=mul)
min = functional.generalized_rspmm(adjacency, relation_input, input, sum="min", mul=mul)
mean = (sum + boundary) / degree_out
sq_mean = (sq_sum + boundary ** 2) / degree_out
max = torch.max(max, boundary)
min = torch.min(min, boundary)
std = (sq_mean - mean ** 2).clamp(min=self.eps).sqrt()
features = torch.cat([mean.unsqueeze(-1), max.unsqueeze(-1), min.unsqueeze(-1), std.unsqueeze(-1)], dim=-1)
features = features.flatten(-2)
scale = degree_out.log()
scale = scale / scale.mean()
scales = torch.cat([torch.ones_like(scale), scale, 1 / scale.clamp(min=1e-2)], dim=-1)
update = (features.unsqueeze(-1) * scales.unsqueeze(-2)).flatten(-2)
else:
raise ValueError("Unknown aggregation function `%s`" % self.aggregate_func)
return update.view(len(update), batch_size, -1)
def combine(self, input, update):
output = self.linear(torch.cat([input, update], dim=-1))
if self.layer_norm:
output = self.layer_norm(output)
if self.activation:
output = self.activation(output)
return output | 7,872 | 46.427711 | 119 | py |
NBFNet | NBFNet-master/nbfnet/task.py | import math
import torch
from torch.nn import functional as F
from torch.utils import data as torch_data
from ogb import linkproppred
from torchdrug import core, tasks, metrics
from torchdrug.layers import functional
from torchdrug.core import Registry as R
Evaluator = core.make_configurable(linkproppred.Evaluator)
Evaluator = R.register("ogb.linkproppred.Evaluator")(Evaluator)
setattr(linkproppred, "Evaluator", Evaluator)
@R.register("tasks.KnowledgeGraphCompletionExt")
class KnowledgeGraphCompletionExt(tasks.KnowledgeGraphCompletion, core.Configurable):
def __init__(self, model, criterion="bce",
metric=("mr", "mrr", "hits@1", "hits@3", "hits@10", "1-to-1", "1-to-n", "n-to-1", "n-to-n"),
num_negative=128, margin=6, adversarial_temperature=0, strict_negative=True, filtered_ranking=True,
fact_ratio=None, sample_weight=True):
super(KnowledgeGraphCompletionExt, self).__init__(
model, criterion, metric, num_negative, margin, adversarial_temperature, strict_negative, filtered_ranking,
fact_ratio, sample_weight)
def preprocess(self, train_set, valid_set, test_set):
super(KnowledgeGraphCompletionExt, self).preprocess(train_set, valid_set, test_set)
degree_hr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
degree_tr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
for h, t, r in train_set:
degree_hr[h, r] += 1
degree_tr[t, r] += 1
has_category = False
for _metric in self.metric:
if _metric in ["1-to-1", "1-to-n", "n-to-1", "n-to-n"]:
has_category = True
if has_category:
is_to_one = degree_hr.sum(dim=0).float() / (degree_hr > 0).sum(dim=0) < 1.5
is_one_to = degree_tr.sum(dim=0).float() / (degree_tr > 0).sum(dim=0) < 1.5
self.register_buffer("is_one_to_one", is_one_to & is_to_one)
self.register_buffer("is_one_to_many", is_one_to & ~is_to_one)
self.register_buffer("is_many_to_one", ~is_one_to & is_to_one)
self.register_buffer("is_many_to_many", ~is_one_to & ~is_to_one)
assert self.is_one_to_one.sum() + self.is_one_to_many.sum() + \
self.is_many_to_one.sum() + self.is_many_to_many.sum() == self.num_relation
assert (self.is_one_to_one | self.is_one_to_many | self.is_many_to_one | self.is_many_to_many).all()
def target(self, batch):
mask, target = super(KnowledgeGraphCompletionExt, self).target(batch)
relation = batch[:, 2]
# in case of GPU OOM
return mask, target, relation.cpu()
def evaluate(self, pred, target):
mask, target, relation = target
pos_pred = pred.gather(-1, target.unsqueeze(-1))
if self.filtered_ranking:
ranking = torch.sum((pos_pred <= pred) & mask, dim=-1) + 1
else:
ranking = torch.sum(pos_pred <= pred, dim=-1) + 1
metric = {}
for _metric in self.metric:
if _metric == "mr":
score = ranking.float().mean()
elif _metric == "mrr":
score = (1 / ranking.float()).mean()
elif _metric.startswith("hits@"):
threshold = int(_metric[5:])
score = (ranking <= threshold).float().mean()
elif _metric == "1-to-1":
score = (1 / ranking[self.is_one_to_one[relation]].float()).mean()
metric["1-to-1 tail"] = (1 / ranking[self.is_one_to_one[relation]].float()).mean(dim=0)[0]
metric["1-to-1 head"] = (1 / ranking[self.is_one_to_one[relation]].float()).mean(dim=0)[1]
elif _metric == "1-to-n":
score = (1 / ranking[self.is_one_to_many[relation]].float()).mean()
metric["1-to-n tail"] = (1 / ranking[self.is_one_to_many[relation]].float()).mean(dim=0)[0]
metric["1-to-n head"] = (1 / ranking[self.is_one_to_many[relation]].float()).mean(dim=0)[1]
elif _metric == "n-to-1":
score = (1 / ranking[self.is_many_to_one[relation]].float()).mean()
metric["n-to-1 tail"] = (1 / ranking[self.is_many_to_one[relation]].float()).mean(dim=0)[0]
metric["n-to-1 head"] = (1 / ranking[self.is_many_to_one[relation]].float()).mean(dim=0)[1]
elif _metric == "n-to-n":
score = (1 / ranking[self.is_many_to_many[relation]].float()).mean()
metric["n-to-n tail"] = (1 / ranking[self.is_many_to_many[relation]].float()).mean(dim=0)[0]
metric["n-to-n head"] = (1 / ranking[self.is_many_to_many[relation]].float()).mean(dim=0)[1]
else:
raise ValueError("Unknown metric `%s`" % _metric)
name = tasks._get_metric_name(_metric)
metric[name] = score
return metric
@R.register("tasks.LinkPrediction")
class LinkPrediction(tasks.Task, core.Configurable):
_option_members = ["criterion", "metric"]
def __init__(self, model, criterion="bce", metric=("auroc", "ap"), num_negative=128, strict_negative=True):
super(LinkPrediction, self).__init__()
self.model = model
self.criterion = criterion
self.metric = metric
self.num_negative = num_negative
self.strict_negative = strict_negative
def preprocess(self, train_set, valid_set, test_set):
if isinstance(train_set, torch_data.Subset):
dataset = train_set.dataset
else:
dataset = train_set
self.num_node = dataset.num_node
train_mask = train_set.indices
valid_mask = train_set.indices + valid_set.indices
train_graph = dataset.graph.edge_mask(train_mask)
valid_graph = dataset.graph.edge_mask(valid_mask)
self.register_buffer("train_graph", train_graph.undirected())
self.register_buffer("valid_graph", valid_graph.undirected())
self.register_buffer("test_graph", dataset.graph.undirected())
def forward(self, batch):
all_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
metric = {}
pred, target = self.predict_and_target(batch, all_loss, metric)
metric.update(self.evaluate(pred, target))
for criterion, weight in self.criterion.items():
if criterion == "bce":
loss = F.binary_cross_entropy_with_logits(pred, target, reduction="none")
neg_weight = torch.ones_like(pred)
neg_weight[:, 1:] = 1 / self.num_negative
loss = (loss * neg_weight).sum(dim=-1) / neg_weight.sum(dim=-1)
else:
raise ValueError("Unknown criterion `%s`" % criterion)
loss = loss.mean()
name = tasks._get_criterion_name(criterion)
metric[name] = loss
all_loss += loss * weight
return all_loss, metric
@torch.no_grad()
def _strict_negative(self, count, split="train"):
graph = getattr(self, "%s_graph" % split)
node_in = graph.edge_list[:, 0]
degree_in = torch.bincount(node_in, minlength=self.num_node)
prob = (graph.num_node - degree_in - 1).float()
neg_h_index = functional.multinomial(prob, count, replacement=True)
any = -torch.ones_like(neg_h_index)
pattern = torch.stack([neg_h_index, any], dim=-1)
edge_index, num_t_truth = graph.match(pattern)
t_truth_index = graph.edge_list[edge_index, 1]
pos_index = functional._size_to_index(num_t_truth)
t_mask = torch.ones(count, self.num_node, dtype=torch.bool, device=self.device)
t_mask[pos_index, t_truth_index] = 0
t_mask.scatter_(1, neg_h_index.unsqueeze(-1), 0)
neg_t_candidate = t_mask.nonzero()[:, 1]
num_t_candidate = t_mask.sum(dim=-1)
neg_t_index = functional.variadic_sample(neg_t_candidate, num_t_candidate, 1).squeeze(-1)
return neg_h_index, neg_t_index
def predict_and_target(self, batch, all_loss=None, metric=None):
batch_size = len(batch)
pos_h_index, pos_t_index = batch.t()
if self.split == "train":
num_negative = self.num_negative
else:
num_negative = 1
if self.strict_negative or self.split != "train":
neg_h_index, neg_t_index = self._strict_negative(batch_size * num_negative, self.split)
else:
neg_h_index, neg_t_index = torch.randint(self.num_node, (2, batch_size * num_negative), device=self.device)
neg_h_index = neg_h_index.view(batch_size, num_negative)
neg_t_index = neg_t_index.view(batch_size, num_negative)
h_index = pos_h_index.unsqueeze(-1).repeat(1, num_negative + 1)
t_index = pos_t_index.unsqueeze(-1).repeat(1, num_negative + 1)
h_index[:, 1:] = neg_h_index
t_index[:, 1:] = neg_t_index
pred = self.model(self.train_graph, h_index, t_index, all_loss=all_loss, metric=metric)
target = torch.zeros_like(pred)
target[:, 0] = 1
return pred, target
def evaluate(self, pred, target):
pred = pred.flatten()
target = target.flatten()
metric = {}
for _metric in self.metric:
if _metric == "auroc":
score = metrics.area_under_roc(pred, target)
elif _metric == "ap":
score = metrics.area_under_prc(pred, target)
else:
raise ValueError("Unknown metric `%s`" % _metric)
name = tasks._get_metric_name(_metric)
metric[name] = score
return metric
@R.register("tasks.InductiveKnowledgeGraphCompletion")
class InductiveKnowledgeGraphCompletion(tasks.KnowledgeGraphCompletion, core.Configurable):
def __init__(self, model, criterion="bce", metric=("mr", "mrr", "hits@1", "hits@3", "hits@10", "hits@10_50"),
num_negative=128, margin=6, adversarial_temperature=0, strict_negative=True, sample_weight=True):
super(InductiveKnowledgeGraphCompletion, self).__init__(
model, criterion, metric, num_negative, margin, adversarial_temperature, strict_negative,
sample_weight=sample_weight)
def preprocess(self, train_set, valid_set, test_set):
if isinstance(train_set, torch_data.Subset):
dataset = train_set.dataset
else:
dataset = train_set
self.num_entity = dataset.num_entity
self.num_relation = dataset.num_relation
self.register_buffer("fact_graph", dataset.graph)
if self.sample_weight:
degree_hr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
degree_tr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
for h, t, r in train_set:
degree_hr[h, r] += 1
degree_tr[t, r] += 1
self.register_buffer("degree_hr", degree_hr)
self.register_buffer("degree_tr", degree_tr)
self.register_buffer("train_graph", dataset.train_graph)
self.register_buffer("valid_graph", dataset.valid_graph)
self.register_buffer("test_graph", dataset.test_graph)
return train_set, valid_set, test_set
def predict(self, batch, all_loss=None, metric=None):
pos_h_index, pos_t_index, pos_r_index = batch.t()
batch_size = len(batch)
graph = getattr(self, "%s_graph" % self.split)
if all_loss is None:
# test
all_index = torch.arange(graph.num_node, device=self.device)
t_preds = []
h_preds = []
for neg_index in all_index.split(self.num_negative):
r_index = pos_r_index.unsqueeze(-1).expand(-1, len(neg_index))
h_index, t_index = torch.meshgrid(pos_h_index, neg_index)
t_pred = self.model(graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
t_preds.append(t_pred)
t_pred = torch.cat(t_preds, dim=-1)
for neg_index in all_index.split(self.num_negative):
r_index = pos_r_index.unsqueeze(-1).expand(-1, len(neg_index))
t_index, h_index = torch.meshgrid(pos_t_index, neg_index)
h_pred = self.model(graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
h_preds.append(h_pred)
h_pred = torch.cat(h_preds, dim=-1)
pred = torch.stack([t_pred, h_pred], dim=1)
# in case of GPU OOM
pred = pred.cpu()
else:
# train
if self.strict_negative:
neg_index = self._strict_negative(pos_h_index, pos_t_index, pos_r_index)
else:
neg_index = torch.randint(self.num_entity, (batch_size, self.num_negative), device=self.device)
h_index = pos_h_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index = pos_t_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
r_index = pos_r_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index[:batch_size // 2, 1:] = neg_index[:batch_size // 2]
h_index[batch_size // 2:, 1:] = neg_index[batch_size // 2:]
pred = self.model(graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
return pred
def target(self, batch):
# test target
batch_size = len(batch)
graph = getattr(self, "%s_graph" % self.split)
pos_h_index, pos_t_index, pos_r_index = batch.t()
any = -torch.ones_like(pos_h_index)
pattern = torch.stack([pos_h_index, any, pos_r_index], dim=-1)
edge_index, num_t_truth = graph.match(pattern)
t_truth_index = graph.edge_list[edge_index, 1]
pos_index = functional._size_to_index(num_t_truth)
t_mask = torch.ones(batch_size, graph.num_node, dtype=torch.bool, device=self.device)
t_mask[pos_index, t_truth_index] = 0
pattern = torch.stack([any, pos_t_index, pos_r_index], dim=-1)
edge_index, num_h_truth = graph.match(pattern)
h_truth_index = graph.edge_list[edge_index, 0]
pos_index = functional._size_to_index(num_h_truth)
h_mask = torch.ones(batch_size, graph.num_node, dtype=torch.bool, device=self.device)
h_mask[pos_index, h_truth_index] = 0
mask = torch.stack([t_mask, h_mask], dim=1)
target = torch.stack([pos_t_index, pos_h_index], dim=1)
# in case of GPU OOM
return mask.cpu(), target.cpu()
def evaluate(self, pred, target):
mask, target = target
pos_pred = pred.gather(-1, target.unsqueeze(-1))
ranking = torch.sum((pos_pred <= pred) & mask, dim=-1) + 1
metric = {}
for _metric in self.metric:
if _metric == "mr":
score = ranking.float().mean()
elif _metric == "mrr":
score = (1 / ranking.float()).mean()
elif _metric.startswith("hits@"):
values = _metric[5:].split("_")
threshold = int(values[0])
if len(values) > 1:
num_sample = int(values[1])
# unbiased estimation
fp_rate = (ranking - 1).float() / mask.sum(dim=-1)
score = 0
for i in range(threshold):
# choose i false positive from num_sample negatives
num_comb = math.factorial(num_sample) / math.factorial(i) / math.factorial(num_sample - i)
score += num_comb * (fp_rate ** i) * ((1 - fp_rate) ** (num_sample - i))
score = score.mean()
else:
score = (ranking <= threshold).float().mean()
else:
raise ValueError("Unknown metric `%s`" % _metric)
name = tasks._get_metric_name(_metric)
metric[name] = score
return metric
@R.register("tasks.KnowledgeGraphCompletionOGB")
class KnowledgeGraphCompletionOGB(tasks.KnowledgeGraphCompletion, core.Configurable):
def __init__(self, model, criterion="bce", evaluator=None, num_negative=128, margin=6, adversarial_temperature=0,
strict_negative=True, heterogeneous_negative=False, fact_ratio=None, sample_weight=True):
super(KnowledgeGraphCompletionOGB, self).__init__(
model, criterion, None, num_negative, margin, adversarial_temperature, strict_negative, True,
fact_ratio, sample_weight)
self.evaluator = evaluator
self.heterogeneous_negative = heterogeneous_negative
def preprocess(self, train_set, valid_set, test_set):
if isinstance(train_set, torch_data.Subset):
dataset = train_set.dataset
else:
dataset = train_set
self.num_entity = dataset.num_entity
self.num_relation = dataset.num_relation
self.register_buffer("graph", dataset.graph)
fact_mask = torch.zeros(len(dataset), dtype=torch.bool)
fact_mask[train_set.indices] = 1
if self.fact_ratio:
length = int(len(train_set) * self.fact_ratio)
index = torch.randperm(len(train_set))[length:]
train_indices = torch.tensor(train_set.indices)
fact_mask[train_indices[index]] = 0
train_set = torch_data.Subset(train_set, index)
self.register_buffer("fact_graph", dataset.graph.edge_mask(fact_mask))
if self.sample_weight:
degree_hr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
degree_tr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
for h, t, r in train_set:
degree_hr[h, r] += 1
degree_tr[t, r] += 1
self.register_buffer("degree_hr", degree_hr)
self.register_buffer("degree_tr", degree_tr)
return train_set, valid_set, test_set
@torch.no_grad()
def _strict_negative(self, pos_h_index, pos_t_index, pos_r_index):
batch_size = len(pos_h_index)
any = -torch.ones_like(pos_h_index)
node_type = self.fact_graph.node_type
pattern = torch.stack([pos_h_index, any, pos_r_index], dim=-1)
pattern = pattern[:batch_size // 2]
edge_index, num_t_truth = self.fact_graph.match(pattern)
t_truth_index = self.fact_graph.edge_list[edge_index, 1]
pos_index = functional._size_to_index(num_t_truth)
if self.heterogeneous_negative:
pos_t_type = node_type[pos_t_index[:batch_size // 2]]
t_mask = pos_t_type.unsqueeze(-1) == node_type.unsqueeze(0)
else:
t_mask = torch.ones(len(pattern), self.num_entity, dtype=torch.bool, device=self.device)
t_mask[pos_index, t_truth_index] = 0
neg_t_candidate = t_mask.nonzero()[:, 1]
num_t_candidate = t_mask.sum(dim=-1)
neg_t_index = functional.variadic_sample(neg_t_candidate, num_t_candidate, self.num_negative)
pattern = torch.stack([any, pos_t_index, pos_r_index], dim=-1)
pattern = pattern[batch_size // 2:]
edge_index, num_h_truth = self.fact_graph.match(pattern)
h_truth_index = self.fact_graph.edge_list[edge_index, 0]
pos_index = functional._size_to_index(num_h_truth)
if self.heterogeneous_negative:
pos_h_type = node_type[pos_h_index[batch_size // 2:]]
h_mask = pos_h_type.unsqueeze(-1) == node_type.unsqueeze(0)
else:
h_mask = torch.ones(len(pattern), self.num_entity, dtype=torch.bool, device=self.device)
h_mask[pos_index, h_truth_index] = 0
neg_h_candidate = h_mask.nonzero()[:, 1]
num_h_candidate = h_mask.sum(dim=-1)
neg_h_index = functional.variadic_sample(neg_h_candidate, num_h_candidate, self.num_negative)
neg_index = torch.cat([neg_t_index, neg_h_index])
return neg_index
def predict(self, batch, all_loss=None, metric=None):
batch_size = len(batch)
if all_loss is None:
# test
h_index, t_index, r_index = batch.unbind(-1)
pattern = batch[:, 0, :]
num_match = self.fact_graph.match(pattern)[1]
assert (num_match == 0).all()
pred = self.model(self.fact_graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
# in case of GPU OOM
pred = pred.cpu()
else:
# train
pos_h_index, pos_t_index, pos_r_index = batch.t()
if self.strict_negative:
neg_index = self._strict_negative(pos_h_index, pos_t_index, pos_r_index)
else:
neg_index = torch.randint(self.num_entity, (batch_size, self.num_negative), device=self.device)
h_index = pos_h_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index = pos_t_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
r_index = pos_r_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index[:batch_size // 2, 1:] = neg_index[:batch_size // 2]
h_index[batch_size // 2:, 1:] = neg_index[batch_size // 2:]
pred = self.model(self.fact_graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
return pred
def target(self, batch):
# test target
batch_size = len(batch)
target = torch.zeros(batch_size, dtype=torch.long, device=self.device)
# in case of GPU OOM
return target.cpu()
def evaluate(self, pred, target):
is_positive = torch.zeros(pred.shape, dtype=torch.bool)
is_positive.scatter_(-1, target.unsqueeze(-1), 1)
pos_pred = pred[is_positive]
neg_pred = pred[~is_positive].view(len(pos_pred), -1)
metric = self.evaluator.eval({"y_pred_pos": pos_pred, "y_pred_neg": neg_pred})
new_metric = {}
for key in metric:
new_key = key.split("_")[0]
new_metric[new_key] = metric[key].mean()
return new_metric | 22,136 | 44.832298 | 119 | py |
NBFNet | NBFNet-master/nbfnet/model.py | from collections.abc import Sequence
import torch
from torch import nn
from torch import autograd
from torch_scatter import scatter_add
from torchdrug import core, layers, utils
from torchdrug.layers import functional
from torchdrug.core import Registry as R
from . import layer
@R.register("model.NBFNet")
class NeuralBellmanFordNetwork(nn.Module, core.Configurable):
def __init__(self, input_dim, hidden_dims, num_relation=None, symmetric=False,
message_func="distmult", aggregate_func="pna", short_cut=False, layer_norm=False, activation="relu",
concat_hidden=False, num_mlp_layer=2, dependent=True, remove_one_hop=False,
num_beam=10, path_topk=10):
super(NeuralBellmanFordNetwork, self).__init__()
if not isinstance(hidden_dims, Sequence):
hidden_dims = [hidden_dims]
if num_relation is None:
double_relation = 1
else:
num_relation = int(num_relation)
double_relation = num_relation * 2
self.dims = [input_dim] + list(hidden_dims)
self.num_relation = num_relation
self.symmetric = symmetric
self.short_cut = short_cut
self.concat_hidden = concat_hidden
self.remove_one_hop = remove_one_hop
self.num_beam = num_beam
self.path_topk = path_topk
self.layers = nn.ModuleList()
for i in range(len(self.dims) - 1):
self.layers.append(layer.GeneralizedRelationalConv(self.dims[i], self.dims[i + 1], double_relation,
self.dims[0], message_func, aggregate_func, layer_norm,
activation, dependent))
feature_dim = hidden_dims[-1] * (len(hidden_dims) if concat_hidden else 1) + input_dim
self.query = nn.Embedding(double_relation, input_dim)
self.mlp = layers.MLP(feature_dim, [feature_dim] * (num_mlp_layer - 1) + [1])
def remove_easy_edges(self, graph, h_index, t_index, r_index=None):
if self.remove_one_hop:
h_index_ext = torch.cat([h_index, t_index], dim=-1)
t_index_ext = torch.cat([t_index, h_index], dim=-1)
if r_index is not None:
any = -torch.ones_like(h_index_ext)
pattern = torch.stack([h_index_ext, t_index_ext, any], dim=-1)
else:
pattern = torch.stack([h_index_ext, t_index_ext], dim=-1)
else:
if r_index is not None:
pattern = torch.stack([h_index, t_index, r_index], dim=-1)
else:
pattern = torch.stack([h_index, t_index], dim=-1)
pattern = pattern.flatten(0, -2)
edge_index = graph.match(pattern)[0]
edge_mask = ~functional.as_mask(edge_index, graph.num_edge)
return graph.edge_mask(edge_mask)
def negative_sample_to_tail(self, h_index, t_index, r_index):
# convert p(h | t, r) to p(t' | h', r')
# h' = t, r' = r^{-1}, t' = h
is_t_neg = (h_index == h_index[:, [0]]).all(dim=-1, keepdim=True)
new_h_index = torch.where(is_t_neg, h_index, t_index)
new_t_index = torch.where(is_t_neg, t_index, h_index)
new_r_index = torch.where(is_t_neg, r_index, r_index + self.num_relation)
return new_h_index, new_t_index, new_r_index
def as_relational_graph(self, graph, self_loop=True):
# add self loop
# convert homogeneous graphs to knowledge graphs with 1 relation
edge_list = graph.edge_list
edge_weight = graph.edge_weight
if self_loop:
node_in = node_out = torch.arange(graph.num_node, device=self.device)
loop = torch.stack([node_in, node_out], dim=-1)
edge_list = torch.cat([edge_list, loop])
edge_weight = torch.cat([edge_weight, torch.ones(graph.num_node, device=self.device)])
relation = torch.zeros(len(edge_list), 1, dtype=torch.long, device=self.device)
edge_list = torch.cat([edge_list, relation], dim=-1)
graph = type(graph)(edge_list, edge_weight=edge_weight, num_node=graph.num_node,
num_relation=1, meta_dict=graph.meta_dict, **graph.data_dict)
return graph
@utils.cached
def bellmanford(self, graph, h_index, r_index, separate_grad=False):
query = self.query(r_index)
index = h_index.unsqueeze(-1).expand_as(query)
boundary = torch.zeros(graph.num_node, *query.shape, device=self.device)
boundary.scatter_add_(0, index.unsqueeze(0), query.unsqueeze(0))
with graph.graph():
graph.query = query
with graph.node():
graph.boundary = boundary
hiddens = []
step_graphs = []
layer_input = boundary
for layer in self.layers:
if separate_grad:
step_graph = graph.clone().requires_grad_()
else:
step_graph = graph
hidden = layer(step_graph, layer_input)
if self.short_cut and hidden.shape == layer_input.shape:
hidden = hidden + layer_input
hiddens.append(hidden)
step_graphs.append(step_graph)
layer_input = hidden
node_query = query.expand(graph.num_node, -1, -1)
if self.concat_hidden:
output = torch.cat(hiddens + [node_query], dim=-1)
else:
output = torch.cat([hiddens[-1], node_query], dim=-1)
return {
"node_feature": output,
"step_graphs": step_graphs,
}
def forward(self, graph, h_index, t_index, r_index=None, all_loss=None, metric=None):
if all_loss is not None:
graph = self.remove_easy_edges(graph, h_index, t_index, r_index)
shape = h_index.shape
if graph.num_relation:
graph = graph.undirected(add_inverse=True)
h_index, t_index, r_index = self.negative_sample_to_tail(h_index, t_index, r_index)
else:
graph = self.as_relational_graph(graph)
h_index = h_index.view(-1, 1)
t_index = t_index.view(-1, 1)
r_index = torch.zeros_like(h_index)
assert (h_index[:, [0]] == h_index).all()
assert (r_index[:, [0]] == r_index).all()
output = self.bellmanford(graph, h_index[:, 0], r_index[:, 0])
feature = output["node_feature"].transpose(0, 1)
index = t_index.unsqueeze(-1).expand(-1, -1, feature.shape[-1])
feature = feature.gather(1, index)
if self.symmetric:
assert (t_index[:, [0]] == t_index).all()
output = self.bellmanford(graph, t_index[:, 0], r_index[:, 0])
inv_feature = output["node_feature"].transpose(0, 1)
index = h_index.unsqueeze(-1).expand(-1, -1, inv_feature.shape[-1])
inv_feature = inv_feature.gather(1, index)
feature = (feature + inv_feature) / 2
score = self.mlp(feature).squeeze(-1)
return score.view(shape)
def visualize(self, graph, h_index, t_index, r_index):
assert h_index.numel() == 1 and h_index.ndim == 1
graph = graph.undirected(add_inverse=True)
output = self.bellmanford(graph, h_index, r_index, separate_grad=True)
feature = output["node_feature"]
step_graphs = output["step_graphs"]
index = t_index.unsqueeze(0).unsqueeze(-1).expand(-1, -1, feature.shape[-1])
feature = feature.gather(0, index).squeeze(0)
score = self.mlp(feature).squeeze(-1)
edge_weights = [graph.edge_weight for graph in step_graphs]
edge_grads = autograd.grad(score, edge_weights)
for graph, edge_grad in zip(step_graphs, edge_grads):
with graph.edge():
graph.edge_grad = edge_grad
distances, back_edges = self.beam_search_distance(step_graphs, h_index, t_index, self.num_beam)
paths, weights = self.topk_average_length(distances, back_edges, t_index, self.path_topk)
return paths, weights
@torch.no_grad()
def beam_search_distance(self, graphs, h_index, t_index, num_beam=10):
num_node = graphs[0].num_node
input = torch.full((num_node, num_beam), float("-inf"), device=self.device)
input[h_index, 0] = 0
distances = []
back_edges = []
for graph in graphs:
graph = graph.edge_mask(graph.edge_list[:, 0] != t_index)
node_in, node_out = graph.edge_list.t()[:2]
message = input[node_in] + graph.edge_grad.unsqueeze(-1)
msg_source = graph.edge_list.unsqueeze(1).expand(-1, num_beam, -1)
is_duplicate = torch.isclose(message.unsqueeze(-1), message.unsqueeze(-2)) & \
(msg_source.unsqueeze(-2) == msg_source.unsqueeze(-3)).all(dim=-1)
is_duplicate = is_duplicate.float() - \
torch.arange(num_beam, dtype=torch.float, device=self.device) / (num_beam + 1)
# pick the first occurrence as the previous state
prev_rank = is_duplicate.argmax(dim=-1, keepdim=True)
msg_source = torch.cat([msg_source, prev_rank], dim=-1)
node_out, order = node_out.sort()
node_out_set = torch.unique(node_out)
# sort message w.r.t. node_out
message = message[order].flatten()
msg_source = msg_source[order].flatten(0, -2)
size = scatter_add(torch.ones_like(node_out), node_out, dim_size=num_node)
msg2out = functional._size_to_index(size[node_out_set] * num_beam)
# deduplicate
is_duplicate = (msg_source[1:] == msg_source[:-1]).all(dim=-1)
is_duplicate = torch.cat([torch.zeros(1, dtype=torch.bool, device=self.device), is_duplicate])
message = message[~is_duplicate]
msg_source = msg_source[~is_duplicate]
msg2out = msg2out[~is_duplicate]
size = scatter_add(torch.ones_like(msg2out), msg2out, dim_size=len(node_out_set))
if not torch.isinf(message).all():
distance, rel_index = functional.variadic_topk(message, size, k=num_beam)
abs_index = rel_index + (size.cumsum(0) - size).unsqueeze(-1)
back_edge = msg_source[abs_index]
distance = distance.view(len(node_out_set), num_beam)
back_edge = back_edge.view(len(node_out_set), num_beam, 4)
distance = scatter_add(distance, node_out_set, dim=0, dim_size=num_node)
back_edge = scatter_add(back_edge, node_out_set, dim=0, dim_size=num_node)
else:
distance = torch.full((num_node, num_beam), float("-inf"), device=self.device)
back_edge = torch.zeros(num_node, num_beam, 4, dtype=torch.long, device=self.device)
distances.append(distance)
back_edges.append(back_edge)
input = distance
return distances, back_edges
def topk_average_length(self, distances, back_edges, t_index, k=10):
paths = []
average_lengths = []
for i in range(len(distances)):
distance, order = distances[i][t_index].flatten(0, -1).sort(descending=True)
back_edge = back_edges[i][t_index].flatten(0, -2)[order]
for d, (h, t, r, prev_rank) in zip(distance[:k].tolist(), back_edge[:k].tolist()):
if d == float("-inf"):
break
path = [(h, t, r)]
for j in range(i - 1, -1, -1):
h, t, r, prev_rank = back_edges[j][h, prev_rank].tolist()
path.append((h, t, r))
paths.append(path[::-1])
average_lengths.append(d / len(path))
if paths:
average_lengths, paths = zip(*sorted(zip(average_lengths, paths), reverse=True)[:k])
return paths, average_lengths | 11,950 | 44.441065 | 118 | py |
NBFNet | NBFNet-master/nbfnet/dataset.py | import os
import csv
import glob
from tqdm import tqdm
from ogb import linkproppred
import torch
from torch.utils import data as torch_data
from torchdrug import data, datasets, utils
from torchdrug.core import Registry as R
class InductiveKnowledgeGraphDataset(data.KnowledgeGraphDataset):
def load_inductive_tsvs(self, train_files, test_files, verbose=0):
assert len(train_files) == len(test_files) == 2
inv_train_entity_vocab = {}
inv_test_entity_vocab = {}
inv_relation_vocab = {}
triplets = []
num_samples = []
for txt_file in train_files:
with open(txt_file, "r") as fin:
reader = csv.reader(fin, delimiter="\t")
if verbose:
reader = tqdm(reader, "Loading %s" % txt_file, utils.get_line_count(txt_file))
num_sample = 0
for tokens in reader:
h_token, r_token, t_token = tokens
if h_token not in inv_train_entity_vocab:
inv_train_entity_vocab[h_token] = len(inv_train_entity_vocab)
h = inv_train_entity_vocab[h_token]
if r_token not in inv_relation_vocab:
inv_relation_vocab[r_token] = len(inv_relation_vocab)
r = inv_relation_vocab[r_token]
if t_token not in inv_train_entity_vocab:
inv_train_entity_vocab[t_token] = len(inv_train_entity_vocab)
t = inv_train_entity_vocab[t_token]
triplets.append((h, t, r))
num_sample += 1
num_samples.append(num_sample)
for txt_file in test_files:
with open(txt_file, "r") as fin:
reader = csv.reader(fin, delimiter="\t")
if verbose:
reader = tqdm(reader, "Loading %s" % txt_file, utils.get_line_count(txt_file))
num_sample = 0
for tokens in reader:
h_token, r_token, t_token = tokens
if h_token not in inv_test_entity_vocab:
inv_test_entity_vocab[h_token] = len(inv_test_entity_vocab)
h = inv_test_entity_vocab[h_token]
assert r_token in inv_relation_vocab
r = inv_relation_vocab[r_token]
if t_token not in inv_test_entity_vocab:
inv_test_entity_vocab[t_token] = len(inv_test_entity_vocab)
t = inv_test_entity_vocab[t_token]
triplets.append((h, t, r))
num_sample += 1
num_samples.append(num_sample)
train_entity_vocab, inv_train_entity_vocab = self._standarize_vocab(None, inv_train_entity_vocab)
test_entity_vocab, inv_test_entity_vocab = self._standarize_vocab(None, inv_test_entity_vocab)
relation_vocab, inv_relation_vocab = self._standarize_vocab(None, inv_relation_vocab)
self.train_graph = data.Graph(triplets[:num_samples[0]],
num_node=len(train_entity_vocab), num_relation=len(relation_vocab))
self.valid_graph = self.train_graph
self.test_graph = data.Graph(triplets[sum(num_samples[:2]): sum(num_samples[:3])],
num_node=len(test_entity_vocab), num_relation=len(relation_vocab))
self.graph = self.train_graph
self.triplets = torch.tensor(triplets[:sum(num_samples[:2])] + triplets[sum(num_samples[:3]):])
self.num_samples = num_samples[:2] + num_samples[3:]
self.train_entity_vocab = train_entity_vocab
self.test_entity_vocab = test_entity_vocab
self.relation_vocab = relation_vocab
self.inv_train_entity_vocab = inv_train_entity_vocab
self.inv_test_entity_vocab = inv_test_entity_vocab
self.inv_relation_vocab = inv_relation_vocab
def __getitem__(self, index):
return self.triplets[index]
def split(self):
offset = 0
splits = []
for num_sample in self.num_samples:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
return splits
@R.register("datasets.CoraLinkPrediction")
class CoraLinkPrediction(datasets.Cora):
def __init__(self, **kwargs):
super(CoraLinkPrediction, self).__init__(**kwargs)
self.transform = None
def __getitem__(self, index):
return self.graph.edge_list[index]
def __len__(self):
return self.graph.num_edge
def split(self, ratios=(85, 5, 10)):
length = self.graph.num_edge
norm = sum(ratios)
lengths = [int(r / norm * length) for r in ratios]
lengths[-1] = length - sum(lengths[:-1])
g = torch.Generator()
g.manual_seed(0)
return torch_data.random_split(self, lengths, generator=g)
@R.register("datasets.CiteSeerLinkPrediction")
class CiteSeerLinkPrediction(datasets.CiteSeer):
def __init__(self, **kwargs):
super(CiteSeerLinkPrediction, self).__init__(**kwargs)
self.transform = None
def __getitem__(self, index):
return self.graph.edge_list[index]
def __len__(self):
return self.graph.num_edge
def split(self, ratios=(85, 5, 10)):
length = self.graph.num_edge
norm = sum(ratios)
lengths = [int(r / norm * length) for r in ratios]
lengths[-1] = length - sum(lengths[:-1])
g = torch.Generator()
g.manual_seed(0)
return torch_data.random_split(self, lengths, generator=g)
@R.register("datasets.PubMedLinkPrediction")
class PubMedLinkPrediction(datasets.PubMed):
def __init__(self, **kwargs):
super(PubMedLinkPrediction, self).__init__(**kwargs)
self.transform = None
def __getitem__(self, index):
return self.graph.edge_list[index]
def __len__(self):
return self.graph.num_edge
def split(self, ratios=(85, 5, 10)):
length = self.graph.num_edge
norm = sum(ratios)
lengths = [int(r / norm * length) for r in ratios]
lengths[-1] = length - sum(lengths[:-1])
g = torch.Generator()
g.manual_seed(0)
return torch_data.random_split(self, lengths, generator=g)
@R.register("datasets.FB15k237Inductive")
class FB15k237Inductive(InductiveKnowledgeGraphDataset):
train_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s/valid.txt",
]
test_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s_ind/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s_ind/test.txt",
]
def __init__(self, path, version="v1", verbose=1):
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
train_files = []
for url in self.train_urls:
url = url % version
save_file = "fb15k237_%s_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
train_files.append(txt_file)
test_files = []
for url in self.test_urls:
url = url % version
save_file = "fb15k237_%s_ind_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
test_files.append(txt_file)
self.load_inductive_tsvs(train_files, test_files, verbose=verbose)
@R.register("datasets.WN18RRInductive")
class WN18RRInductive(InductiveKnowledgeGraphDataset):
train_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s/valid.txt",
]
test_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s_ind/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s_ind/test.txt",
]
def __init__(self, path, version="v1", verbose=1):
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
train_files = []
for url in self.train_urls:
url = url % version
save_file = "wn18rr_%s_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
train_files.append(txt_file)
test_files = []
for url in self.test_urls:
url = url % version
save_file = "wn18rr_%s_ind_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
test_files.append(txt_file)
self.load_inductive_tsvs(train_files, test_files, verbose=verbose)
@R.register("datasets.OGBLBioKG")
class OGBLBioKG(data.KnowledgeGraphDataset):
def __init__(self, path, verbose=1):
path = os.path.expanduser(path)
self.path = path
dataset = linkproppred.LinkPropPredDataset("ogbl-biokg", path)
self.load_ogb(dataset, verbose=verbose)
def load_ogb(self, dataset, verbose=1):
entity_vocab = []
relation_vocab = []
entity_type_vocab = []
inv_entity_type_offset = {}
entity_type2num = []
zip_files = glob.glob(os.path.join(dataset.root, "mapping/*.gz"))
for zip_file in zip_files:
csv_file = utils.extract(zip_file)
type = os.path.basename(csv_file).split("_")[0]
with open(csv_file, "r") as fin:
reader = csv.reader(fin)
if verbose:
reader = iter(tqdm(reader, "Loading %s" % csv_file, utils.get_line_count(csv_file)))
fields = next(reader)
if "relidx" in csv_file:
for index, token in reader:
relation_vocab.append(token)
else:
entity_type_vocab.append(type)
inv_entity_type_offset[type] = len(entity_vocab)
num_entity = 0
for index, token in reader:
entity_vocab.append("%s (%s)" % (type, token))
num_entity += 1
entity_type2num.append(num_entity)
edge_split = dataset.get_edge_split()
triplets = []
num_samples = []
num_samples_with_neg = []
negative_heads = []
negative_tails = []
for key in ["train", "valid", "test"]:
split_dict = edge_split[key]
h = torch.as_tensor(split_dict["head"])
t = torch.as_tensor(split_dict["tail"])
r = torch.as_tensor(split_dict["relation"])
h_type = torch.tensor([inv_entity_type_offset[h] for h in split_dict["head_type"]])
t_type = torch.tensor([inv_entity_type_offset[t] for t in split_dict["tail_type"]])
h = h + h_type
t = t + t_type
triplet = torch.stack([h, t, r], dim=-1)
triplets.append(triplet)
num_samples.append(len(h))
if "head_neg" in split_dict:
neg_h = torch.as_tensor(split_dict["head_neg"])
neg_t = torch.as_tensor(split_dict["tail_neg"])
neg_h = neg_h + h_type.unsqueeze(-1)
neg_t = neg_t + t_type.unsqueeze(-1)
negative_heads.append(neg_h)
negative_tails.append(neg_t)
num_samples_with_neg.append(len(h))
else:
num_samples_with_neg.append(0)
triplets = torch.cat(triplets)
self.load_triplet(triplets, entity_vocab=entity_vocab, relation_vocab=relation_vocab)
entity_type_vocab, inv_entity_type_vocab = self._standarize_vocab(entity_type_vocab, None)
self.entity_type_vocab = entity_type_vocab
self.inv_entity_type_vocab = inv_entity_type_vocab
self.num_samples = num_samples
self.num_samples_with_neg = num_samples_with_neg
self.negative_heads = torch.cat(negative_heads)
self.negative_tails = torch.cat(negative_tails)
node_type = []
for i, num_entity in enumerate(entity_type2num):
node_type += [i] * num_entity
with self.graph.node():
self.graph.node_type = torch.tensor(node_type)
def split(self, test_negative=True):
offset = 0
neg_offset = 0
splits = []
for num_sample, num_sample_with_neg in zip(self.num_samples, self.num_samples_with_neg):
if test_negative and num_sample_with_neg:
pos_h, pos_t, pos_r = self[offset: offset + num_sample].t()
neg_h = self.negative_heads[neg_offset: neg_offset + num_sample_with_neg]
neg_t = self.negative_tails[neg_offset: neg_offset + num_sample_with_neg]
num_negative = neg_h.shape[-1]
h = pos_h.unsqueeze(-1).repeat(2, num_negative + 1)
t = pos_t.unsqueeze(-1).repeat(2, num_negative + 1)
r = pos_r.unsqueeze(-1).repeat(2, num_negative + 1)
t[:num_sample_with_neg, 1:] = neg_t
h[num_sample_with_neg:, 1:] = neg_h
split = torch.stack([h, t, r], dim=-1)
else:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
neg_offset += num_sample_with_neg
return splits
| 14,201 | 39.005634 | 105 | py |
NBFNet | NBFNet-master/nbfnet/util.py | import os
import time
import logging
import argparse
import yaml
import jinja2
from jinja2 import meta
import easydict
import torch
from torch.utils import data as torch_data
from torch import distributed as dist
from torchdrug import core, utils
from torchdrug.utils import comm
logger = logging.getLogger(__file__)
def get_root_logger(file=True):
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
format = logging.Formatter("%(asctime)-10s %(message)s", "%H:%M:%S")
if file:
handler = logging.FileHandler("log.txt")
handler.setFormatter(format)
logger.addHandler(handler)
return logger
def create_working_directory(cfg):
file_name = "working_dir.tmp"
world_size = comm.get_world_size()
if world_size > 1 and not dist.is_initialized():
comm.init_process_group("nccl", init_method="env://")
working_dir = os.path.join(os.path.expanduser(cfg.output_dir),
cfg.task["class"], cfg.dataset["class"], cfg.task.model["class"],
time.strftime("%Y-%m-%d-%H-%M-%S"))
# synchronize working directory
if comm.get_rank() == 0:
with open(file_name, "w") as fout:
fout.write(working_dir)
os.makedirs(working_dir)
comm.synchronize()
if comm.get_rank() != 0:
with open(file_name, "r") as fin:
working_dir = fin.read()
comm.synchronize()
if comm.get_rank() == 0:
os.remove(file_name)
os.chdir(working_dir)
return working_dir
def detect_variables(cfg_file):
with open(cfg_file, "r") as fin:
raw = fin.read()
env = jinja2.Environment()
ast = env.parse(raw)
vars = meta.find_undeclared_variables(ast)
return vars
def load_config(cfg_file, context=None):
with open(cfg_file, "r") as fin:
raw = fin.read()
template = jinja2.Template(raw)
instance = template.render(context)
cfg = yaml.safe_load(instance)
cfg = easydict.EasyDict(cfg)
return cfg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="yaml configuration file", required=True)
parser.add_argument("-s", "--seed", help="random seed for PyTorch", type=int, default=1024)
args, unparsed = parser.parse_known_args()
# get dynamic arguments defined in the config file
vars = detect_variables(args.config)
parser = argparse.ArgumentParser()
for var in vars:
parser.add_argument("--%s" % var, required=True)
vars = parser.parse_known_args(unparsed)[0]
vars = {k: utils.literal_eval(v) for k, v in vars._get_kwargs()}
return args, vars
def build_solver(cfg, dataset):
train_set, valid_set, test_set = dataset.split()
if comm.get_rank() == 0:
logger.warning(dataset)
logger.warning("#train: %d, #valid: %d, #test: %d" % (len(train_set), len(valid_set), len(test_set)))
if "fast_test" in cfg:
if comm.get_rank() == 0:
logger.warning("Quick test mode on. Only evaluate on %d samples for valid / test." % cfg.fast_test)
g = torch.Generator()
g.manual_seed(1024)
valid_set = torch_data.random_split(valid_set, [cfg.fast_test, len(valid_set) - cfg.fast_test], generator=g)[0]
test_set = torch_data.random_split(test_set, [cfg.fast_test, len(test_set) - cfg.fast_test], generator=g)[0]
if hasattr(dataset, "num_relation"):
cfg.task.model.num_relation = dataset.num_relation
task = core.Configurable.load_config_dict(cfg.task)
cfg.optimizer.params = task.parameters()
optimizer = core.Configurable.load_config_dict(cfg.optimizer)
solver = core.Engine(task, train_set, valid_set, test_set, optimizer, **cfg.engine)
if "checkpoint" in cfg:
solver.load(cfg.checkpoint)
return solver | 3,838 | 30.467213 | 119 | py |
NBFNet | NBFNet-master/nbfnet/__init__.py | 0 | 0 | 0 | py | |
gca-rom | gca-rom-main/main.py | import sys
sys.path.append('../')
import torch
from gca_rom import network, pde, loader, plotting, preprocessing, training, initialization, testing, error
import numpy as np
if __name__ == "__main__":
problem_name, variable, mu1_range, mu2_range = pde.problem(int(sys.argv[1]))
print("PROBLEM: ", problem_name, "for variable ", variable, "\n")
AE_Params = network.AE_Params
device = initialization.set_device()
initialization.set_reproducibility(AE_Params)
initialization.set_path(AE_Params)
dataset_dir = '../dataset/'+problem_name+'_unstructured.mat'
dataset = loader.LoadDataset(dataset_dir, variable)
dataset_graph, graph_loader, train_loader, test_loader, \
val_loader, scaler_all, scaler_test, xx, yy, var, VAR_all, VAR_test, \
train_trajectories, test_trajectories = preprocessing.graphs_dataset(dataset, AE_Params)
mu1, mu2 = np.meshgrid(mu1_range, mu2_range)
params = torch.tensor(np.vstack((mu1.T, mu2.T)).reshape(2, -1).T)
params = params.to(device)
print('Shape of parameter space:', params.shape, '\n')
model = network.Net()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=AE_Params.learning_rate, weight_decay=AE_Params.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=AE_Params.miles, gamma=AE_Params.gamma)
history = dict(train=[], l1=[], l2=[])
history_test = dict(test=[], l1=[], l2=[])
min_test_loss = np.Inf
try:
model.load_state_dict(torch.load(AE_Params.net_dir+AE_Params.net_name+AE_Params.net_run+'.pt', map_location=torch.device('cpu')))
print('Loading saved network')
except FileNotFoundError:
print('Training net')
# with torch.autograd.profiler.profile() as prof:
for epoch in range(AE_Params.max_epochs):
train_rmse = training.train(model, optimizer, device, scheduler, params, train_loader, train_trajectories, AE_Params, history)
if AE_Params.cross_validation:
test_rmse = training.val(model, device, params, test_loader, test_trajectories, AE_Params, history_test)
print("Epoch[{}/{}, train_mse loss:{}, test_mse loss:{}".format(epoch + 1, AE_Params.max_epochs, history['train'][-1], history_test['test'][-1]))
else:
test_rmse = train_rmse
print("Epoch[{}/{}, train_mse loss:{}".format(epoch + 1, AE_Params.max_epochs, history['train'][-1]))
if test_rmse < min_test_loss:
min_test_loss = test_rmse
best_epoch = epoch
torch.save(model.state_dict(), AE_Params.net_dir+AE_Params.net_name+AE_Params.net_run+'.pt')
if AE_Params.tolerance >= train_rmse:
print('Early stopping!')
break
np.save(AE_Params.net_dir+'history'+AE_Params.net_run+'.npy', history)
np.save(AE_Params.net_dir+'history_test'+AE_Params.net_run+'.npy', history_test)
# print(prof.key_averages().table(sort_by="self_cpu_time_total"))
print("\nLoading best network for epoch: ", best_epoch)
model.load_state_dict(torch.load(AE_Params.net_dir+AE_Params.net_name+AE_Params.net_run+'.pt', map_location=torch.device('cpu')))
model.to("cpu")
params = params.to("cpu")
vars = "GCA-ROM"
results, latents_map, latents_gca = testing.evaluate(VAR_all, model, graph_loader, params, AE_Params, range(params.shape[0]))
plotting.plot_loss(AE_Params)
plotting.plot_latent(AE_Params, latents_map, latents_gca)
plotting.plot_error(results, VAR_all, scaler_all, AE_Params, mu1_range, mu2_range, params, train_trajectories, vars)
N = 5
snapshots = np.arange(params.shape[0]).tolist()
np.random.shuffle(snapshots)
for SNAP in snapshots[0:N]:
plotting.plot_fields(SNAP, results, scaler_all, AE_Params, dataset, xx, yy, params)
plotting.plot_error_fields(SNAP, results, VAR_all, scaler_all, AE_Params, dataset, xx, yy, params)
results_test, _, _ = testing.evaluate(VAR_test, model, val_loader, params, AE_Params, test_trajectories)
error_abs, norm = error.compute_error(results_test, VAR_test, scaler_test, AE_Params)
error.print_error(error_abs, norm, vars)
error.save_error(error_abs, norm, AE_Params, vars)
| 4,087 | 45.454545 | 157 | py |
gca-rom | gca-rom-main/examples/advection.py | import os
# HYPER-PARAMETERS FOR POISSON
problem_name = "advection"
scalers_type = "sampling-feature"
scalers_fun = "standard"
skip_connection = "skip1"
pn = 2
st = 4
sf = 3
sk = 1
train_rate = 30
ffc_nodes = 200
latent_nodes = 100
btt_nodes = 15
lambda_map = 1e1
hidden_channels = 2
print("\n\nRUN MAIN FOR PROBLEM "+problem_name+" WITH SCALING: "+scalers_type+" WITH "+scalers_fun+" FUNCTION")
print("TRAIN RATE = "+str(train_rate)+" and FFC NODES = "+str(ffc_nodes)+" and HIDDEN CHANNELS = "+str(hidden_channels)+" and SKIP = "+skip_connection)
print("LATENT NODES = "+str(latent_nodes)+" and BTT NODES = "+str(btt_nodes)+" and LAMBDA = "+str(lambda_map))
os.system("python3 ../main.py %s %s %s %s %s %s %s %s %s %s" %(pn, st, sf, sk, train_rate, ffc_nodes, latent_nodes, btt_nodes, lambda_map, hidden_channels)) | 819 | 34.652174 | 156 | py |
gca-rom | gca-rom-main/examples/navier-stokes.py | import os
# HYPER-PARAMETERS FOR POISSON
problem_names = ["VX_navier_stokes", "VY_navier_stokes", "P_navier_stokes"]
comp = 2
problem_name = problem_names[comp]
scalers_type = "sampling-feature"
scalers_fun = "standard"
skip_connection = "skip1"
pn = 4 + comp
st = 4
sf = 3
sk = 1
train_rate = 10
ffc_nodes = 200
latent_nodes = 100
btt_nodes = 25
lambda_map = 1e0
hidden_channels = 3
print("\n\nRUN MAIN FOR PROBLEM "+problem_name+" WITH SCALING: "+scalers_type+" WITH "+scalers_fun+" FUNCTION")
print("TRAIN RATE = "+str(train_rate)+" and FFC NODES = "+str(ffc_nodes)+" and HIDDEN CHANNELS = "+str(hidden_channels)+" and SKIP = "+skip_connection)
print("LATENT NODES = "+str(latent_nodes)+" and BTT NODES = "+str(btt_nodes)+" and LAMBDA = "+str(lambda_map))
os.system("python3 ../main.py %s %s %s %s %s %s %s %s %s %s" %(pn, st, sf, sk, train_rate, ffc_nodes, latent_nodes, btt_nodes, lambda_map, hidden_channels)) | 919 | 35.8 | 156 | py |
gca-rom | gca-rom-main/examples/graetz.py | import os
# HYPER-PARAMETERS FOR POISSON
problem_name = "graetz"
scalers_type = "sampling-feature"
scalers_fun = "standard"
skip_connection = "skip1"
pn = 3
st = 4
sf = 3
sk = 1
train_rate = 30
ffc_nodes = 200
latent_nodes = 50
btt_nodes = 25
lambda_map = 1e1
hidden_channels = 2
print("\n\nRUN MAIN FOR PROBLEM "+problem_name+" WITH SCALING: "+scalers_type+" WITH "+scalers_fun+" FUNCTION")
print("TRAIN RATE = "+str(train_rate)+" and FFC NODES = "+str(ffc_nodes)+" and HIDDEN CHANNELS = "+str(hidden_channels)+" and SKIP = "+skip_connection)
print("LATENT NODES = "+str(latent_nodes)+" and BTT NODES = "+str(btt_nodes)+" and LAMBDA = "+str(lambda_map))
os.system("python3 ../main.py %s %s %s %s %s %s %s %s %s %s" %(pn, st, sf, sk, train_rate, ffc_nodes, latent_nodes, btt_nodes, lambda_map, hidden_channels)) | 815 | 34.478261 | 156 | py |
gca-rom | gca-rom-main/examples/poisson.py | import os
# HYPER-PARAMETERS FOR POISSON
problem_name = "poisson"
scalers_type = "sampling-feature"
scalers_fun = "standard"
skip_connection = "skip1"
pn = 1
st = 4
sf = 3
sk = 1
train_rate = 30
ffc_nodes = 200
latent_nodes = 50
btt_nodes = 15
lambda_map = 1e1
hidden_channels = 3
print("\n\nRUN MAIN FOR PROBLEM "+problem_name+" WITH SCALING: "+scalers_type+" WITH "+scalers_fun+" FUNCTION")
print("TRAIN RATE = "+str(train_rate)+" and FFC NODES = "+str(ffc_nodes)+" and HIDDEN CHANNELS = "+str(hidden_channels)+" and SKIP = "+skip_connection)
print("LATENT NODES = "+str(latent_nodes)+" and BTT NODES = "+str(btt_nodes)+" and LAMBDA = "+str(lambda_map))
os.system("python3 ../main.py %s %s %s %s %s %s %s %s %s %s" %(pn, st, sf, sk, train_rate, ffc_nodes, latent_nodes, btt_nodes, lambda_map, hidden_channels)) | 816 | 34.521739 | 156 | py |
gca-rom | gca-rom-main/gca_rom/error.py | import numpy as np
from gca_rom import scaling
def save_error(error, norm, AE_Params, vars):
"""
save_error(error: List[float], norm: List[float], AE_Params: object, vars: str)
This function takes in two lists of same length, error and norm, computed on the whole dataset for plotting reasons, and saves the relative error, along with the max, mean and min to a txt file.
The relative error is calculated as error/norm for each corresponding element. The file is saved with a specific naming convention: AE_Params.net_dir + 'relative_errors' + AE_Params.net_run + vars + '.txt'
Parameters:
error (List[float]): A list of error values.
norm (List[float]): A list of norm values of same length as error.
AE_Params (object): An object containing information required to form the file name.
vars (str): A string to be appended to the file name.
"""
error = np.array(error)
norm = np.array(norm)
rel_error = error/norm
np.savetxt(AE_Params.net_dir+'relative_errors'+AE_Params.net_run+vars+'.txt', [max(rel_error), sum(rel_error)/len(rel_error), min(rel_error)])
def print_error(error, norm, vars):
"""
print_error(error: List[float], norm: List[float], vars: str)
This function takes in two lists error and norm of same length and prints their absolute and relative errors, along with the max, mean and min of both the absolute and relative errors.
The relative error is calculated as error/norm for each corresponding elements.
Parameters:
error (List[float]): A list of error values.
norm (List[float]): A list of norm values of same length as error.
vars (str): A string to describe the type of field.
"""
error = np.array(error)
norm = np.array(norm)
rel_error = error/norm
print("\nMaximum absolute error for field "+vars+" = ", max(error))
print("Mean absolute error for field "+vars+" = ", sum(error)/len(error))
print("Minimum absolute error for field "+vars+" = ", min(error))
print("\nMaximum relative error for field "+vars+" = ", max(rel_error))
print("Mean relative error for field "+vars+" = ", sum(rel_error)/len(rel_error))
print("Minimum relative error for field "+vars+" = ", min(rel_error))
def compute_error(res, VAR, scaler, AE_Params):
"""
Compute the absolute error and norm between the original data and the data generated by the autoencoder
:param res: Resulting data generated by the autoencoder (numpy ndarray)
:param VAR: Original data (numpy ndarray)
:param scaler: Scaler object for scaling the data
:param AE_Params: Autoencoder parameters (object)
:return: error_abs_list, norm_z_list, lists of absolute error and norm for each snapshot of data
"""
error_abs_list = list()
norm_z_list = list()
Z = scaling.inverse_scaling(VAR, scaler, AE_Params.scaling_type)
Z_net = scaling.inverse_scaling(res, scaler, AE_Params.scaling_type)
for snap in range(VAR.shape[0]):
error_abs = np.linalg.norm(abs(Z[:, snap] - Z_net[:, snap]))
norm_z = np.linalg.norm(Z[:, snap], 2)
error_abs_list.append(error_abs)
norm_z_list.append(norm_z)
return error_abs_list, norm_z_list
| 3,215 | 43.666667 | 209 | py |
gca-rom | gca-rom-main/gca_rom/preprocessing.py | import numpy as np
import torch
from torch_geometric.data import Data
from torch_geometric.loader import DataLoader
from gca_rom import scaling
def graphs_dataset(dataset, AE_Params):
"""
graphs_dataset: function to process and scale the input dataset for graph autoencoder model.
Inputs:
dataset: an object containing the dataset to be processed.
AE_Params: an object containing the hyperparameters of the graph autoencoder model.
Outputs:
dataset_graph: an object containing the processed and scaled dataset.
loader: a DataLoader object of the processed and scaled dataset.
train_loader: a DataLoader object of the training set.
test_loader: a DataLoader object of the test set.
val_loader: a DataLoader object of the validation set.
scaler_all: a scaler object to scale the entire dataset.
scaler_test: a scaler object to scale the test set.
xx: an array of the x-coordinate of the nodes.
yy: an array of the y-coordinate of the nodes.
var: an array of the node features.
VAR_all: an array of the scaled node features of the entire dataset.
VAR_test: an array of the scaled node features of the test set.
train_snapshots: a list of indices of the training set.
test_snapshots: a list of indices of the test set.
"""
xx = dataset.xx
yy = dataset.yy
var = dataset.U
# PROCESSING DATASET
num_nodes = var.shape[0]
num_graphs = var.shape[1]
print("Number of nodes processed: ", num_nodes)
print("Number of graphs processed: ", num_graphs)
total_sims = int(num_graphs)
rate = AE_Params.rate/100
train_sims = int(rate * total_sims)
test_sims = total_sims - train_sims
main_loop = np.arange(total_sims).tolist()
np.random.shuffle(main_loop)
train_snapshots = main_loop[0:train_sims]
train_snapshots.sort()
test_snapshots = main_loop[train_sims:total_sims]
test_snapshots.sort()
## FEATURE SCALING
var_test = dataset.U[:, test_snapshots]
scaling_type = AE_Params.scaling_type
scaler_all, VAR_all = scaling.tensor_scaling(var, scaling_type)
scaler_test, VAR_test = scaling.tensor_scaling(var_test, scaling_type)
print("Shape of scaled dataset: ", VAR_all.shape)
print("Shape of scaled test set: ", VAR_test.shape)
graphs = []
edge_index = torch.t(dataset.E) - 1
for graph in range(num_graphs):
pos = torch.cat((xx[:, graph].unsqueeze(1), yy[:, graph].unsqueeze(1)), 1)
ei = torch.index_select(pos, 0, edge_index[0, :])
ej = torch.index_select(pos, 0, edge_index[1, :])
edge_attr = ej - ei
edge_attr = torch.sqrt(torch.pow(edge_attr[:, 0], 2) + torch.pow(edge_attr[:, 1], 2))
node_features = VAR_all[graph, :]
dataset_graph = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, pos=pos)
graphs.append(dataset_graph)
AE_Params.num_nodes = dataset_graph.num_nodes
train_dataset = [graphs[i] for i in train_snapshots]
test_dataset = [graphs[i] for i in test_snapshots]
loader = DataLoader(graphs, batch_size=1)
train_loader = DataLoader(train_dataset, batch_size=train_sims, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=test_sims, shuffle=False)
val_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
return dataset_graph, loader, train_loader, test_loader, \
val_loader, scaler_all, scaler_test, xx, yy, var, VAR_all, VAR_test, train_snapshots, test_snapshots
| 3,515 | 38.954545 | 112 | py |
gca-rom | gca-rom-main/gca_rom/plotting.py | import numpy as np
import matplotlib.pyplot as plt
from gca_rom import scaling
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
from matplotlib import ticker
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
params = {'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
def plot_loss(AE_Params):
"""
Plots the history of losses during the training of the autoencoder.
Attributes:
AE_Params (namedtuple): An object containing the parameters of the autoencoder.
"""
history = np.load(AE_Params.net_dir+'history'+AE_Params.net_run+'.npy', allow_pickle=True).item()
history_test = np.load(AE_Params.net_dir+'history_test'+AE_Params.net_run+'.npy', allow_pickle=True).item()
ax = plt.figure().gca()
ax.semilogy(history['l1'])
ax.semilogy(history['l2'])
ax.semilogy(history_test['l1'], '--')
ax.semilogy(history_test['l2'], '--')
plt.ylabel('Loss')
plt.ylabel('Epochs')
plt.title('Loss over training epochs')
plt.legend(['loss_mse', 'loss_map', 'loss_test_mse', 'loss_test_map'])
plt.savefig(AE_Params.net_dir+'history_losses'+AE_Params.net_run+'.png', bbox_inches='tight', dpi=500)
def plot_latent(AE_Params, latents, latents_estimation):
"""
Plot the original and estimated latent spaces
Parameters:
AE_Params (obj): object containing the Autoencoder parameters
latents (tensor): tensor of original latent spaces
latents_estimation (tensor): tensor of estimated latent spaces
"""
plt.figure()
for i1 in range(AE_Params.bottleneck_dim):
plt.plot(latents[:,i1].detach(), '--')
plt.plot(latents_estimation[:,i1].detach(),'-')
plt.savefig(AE_Params.net_dir+'latents'+AE_Params.net_run+'.png', bbox_inches='tight')
green_diamond = dict(markerfacecolor='g', marker='D')
_, ax = plt.subplots()
ax.boxplot(latents_estimation.detach().numpy(), flierprops=green_diamond)
plt.savefig(AE_Params.net_dir+'box_plot_latents'+AE_Params.net_run+'.png', bbox_inches='tight', dpi=500)
def plot_error(res, VAR_all, scaler_all, AE_Params, mu1_range, mu2_range, params, train_trajectories, vars):
"""
This function plots the relative error between the predicted and actual results.
Parameters:
res (ndarray): The predicted results
VAR_all (ndarray): The actual results
scaler_all (object): The scaler object used for scaling the results
AE_Params (object): The AE_Params object holding the necessary hyperparameters
mu1_range (ndarray): Range of the first input variable
mu2_range (ndarray): Range of the second input variable
params (ndarray): The input variables
train_trajectories (ndarray): The indices of the training data
vars (str): The name of the variable being plotted
"""
u_hf = scaling.inverse_scaling(VAR_all, scaler_all, AE_Params.scaling_type)
u_app = scaling.inverse_scaling(res, scaler_all, AE_Params.scaling_type)
error = np.linalg.norm(u_app - u_hf, axis=0) / np.linalg.norm(u_hf, axis=0)
X1, X2 = np.meshgrid(mu1_range, mu2_range, indexing='ij')
output = np.reshape(error, (len(mu1_range), len(mu2_range)))
fig = plt.figure('Relative Error '+vars)
ax = fig.add_subplot(projection='3d')
ax.plot_surface(X1, X2, output, cmap=cm.coolwarm, color='blue')
ax.contour(X1, X2, output, zdir='z', offset=output.min(), cmap=cm.coolwarm)
ax.set(xlim=tuple([mu1_range[0], mu1_range[-1]]), ylim=tuple([mu2_range[0], mu2_range[-1]]), xlabel='$\mu_1$', ylabel='$\mu_2$', zlabel='$\\epsilon_{GCA}(\\mathbf{\mu})$')
ax.plot(params[train_trajectories,0], params[train_trajectories,1], output.min()*np.ones(len(params[train_trajectories,1])), '*r')
plt.ticklabel_format(axis='z', style='sci', scilimits=(0, 0))
ax.set_title('Relative Error '+vars)
ax.zaxis.offsetText.set_visible(False)
exponent_axis = np.floor(np.log10(max(ax.get_zticks()))).astype(int)
ax.text2D(0.9, 0.82, "1e"+str(exponent_axis), transform=ax.transAxes, fontsize="x-large")
plt.tight_layout()
plt.savefig(AE_Params.net_dir+'relative_error_'+vars+AE_Params.net_run+'.png', transparent=True, dpi=500)
def plot_fields(SNAP, results, scaler_all, AE_Params, dataset, xx, yy, params):
"""
Plots the field solution for a given snapshot.
The function takes in the following inputs:
SNAP: integer value indicating the snapshot to be plotted.
results: array of shape (num_samples, num_features), representing the network's output.
scaler_all: instance of the scaler used to scale the data.
AE_Params: instance of the Autoencoder parameters class containing information about the network architecture and training.
dataset: array of shape (num_samples, 3), representing the triangulation of the spatial domain.
xx: array of shape (num_samples, num_features), containing the x-coordinates of the domain.
yy: array of shape (num_samples, num_features), containing the y-coordinates of the domain.
params: array of shape (num_features,), containing the parameters associated with each snapshot.
The function generates a plot of the field solution and saves it to disk using the filepath specified in AE_Params.net_dir.
"""
triang = np.asarray(dataset.T - 1)
cmap = cm.get_cmap(name='jet', lut=None)
# fig = plt.figure(figsize=(14, 6))
gs1 = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs1[0, 0])
Z_net = scaling.inverse_scaling(results, scaler_all, AE_Params.scaling_type)
z_net = Z_net[:, SNAP]
# ax.triplot(xx[:, SNAP], yy[:, SNAP], triang, lw=0.5, color='black')
cs = ax.tricontourf(xx[:, SNAP], yy[:, SNAP], triang, z_net, 100, cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(cs, cax=cax)
tick_locator = MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.formatter.set_powerlimits((0, 0))
cbar.update_ticks()
plt.tight_layout()
ax.set_aspect('equal', 'box')
ax.set_title('Solution field for $\mu$ = '+str(np.around(params[SNAP].detach().numpy(), 2)))
plt.savefig(AE_Params.net_dir+'field_solution_'+str(SNAP)+''+AE_Params.net_run+'.png', bbox_inches='tight', dpi=500)
def plot_error_fields(SNAP, results, VAR_all, scaler_all, AE_Params, dataset, xx, yy, params):
"""
This function plots a contour map of the error field of a given solution of a scalar field.
The error is computed as the absolute difference between the true solution and the predicted solution,
normalized by the 2-norm of the true solution.
Inputs:
SNAP: int, snapshot of the solution to be plotted
results: np.array, predicted solution
VAR_all: np.array, true solution
scaler_all: np.array, scaling information used in the prediction
AE_Params: class, model architecture and training parameters
dataset: np.array, mesh information
xx: np.array, x-coordinate of the mesh
yy: np.array, y-coordinate of the mesh
params: np.array, model parameters
"""
triang = np.asarray(dataset.T - 1)
cmap = cm.get_cmap(name='jet', lut=None)
# fig = plt.figure(figsize=(14, 6))
gs1 = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs1[0, 0])
Z = scaling.inverse_scaling(VAR_all, scaler_all, AE_Params.scaling_type)
Z_net = scaling.inverse_scaling(results, scaler_all, AE_Params.scaling_type)
z = Z[:, SNAP]
z_net = Z_net[:, SNAP]
error = abs(z - z_net)/np.linalg.norm(z, 2)
# ax.triplot(xx[:,SNAP], yy[:, SNAP], triang, lw=0.5, color='black')
cs = ax.tricontourf(xx[:, SNAP], yy[:, SNAP], triang, error, 100, cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(cs, cax=cax)
tick_locator = MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.formatter.set_powerlimits((0, 0))
cbar.update_ticks()
plt.tight_layout()
ax.set_aspect('equal', 'box')
ax.set_title('Error field for $\mu$ = '+str(np.around(params[SNAP].detach().numpy(), 2)))
plt.savefig(AE_Params.net_dir+'error_field_'+str(SNAP)+''+AE_Params.net_run+'.png', bbox_inches='tight', dpi=500) | 8,379 | 46.613636 | 175 | py |
gca-rom | gca-rom-main/gca_rom/network.py | import sys
import torch
from torch import nn
from gca_rom import gca, scaling, pde
problem_name, variable, mu1_range, mu2_range = pde.problem(int(sys.argv[1]))
class AE_Params():
"""Class that holds the hyperparameters for the autoencoder model.
Args:
sparse_method (str): The method to use for sparsity constraint.
rate (int): Amount of data used in training.
seed (int): Seed for the random number generator.
bottleneck_dim (int): The dimension of the bottleneck layer.
tolerance (float): The tolerance value for stopping the training.
lambda_map (float): The weight for the map loss.
learning_rate (float): The learning rate for the optimizer.
ffn (int): The number of feed-forward layers.
in_channels (int): The number of input channels.
hidden_channels (list): The number of hidden channels for each layer.
act (function): The activation function to use.
nodes (int): The number of nodes in each hidden layer.
skip (int): The number of skipped connections.
layer_vec (list): The structure of the network.
net_name (str): The name of the network.
scaler_name (str): The name of the scaler used for preprocessing.
weight_decay (float): The weight decay for the optimizer.
max_epochs (int): The maximum number of epochs to run training for.
miles (list): The miles for learning rate update in scheduler.
gamma (float): The gamma value for the optimizer.
num_nodes (int): The number of nodes in the network.
scaling_type (int): The type of scaling to use for preprocessing.
net_dir (str): The directory to save the network in.
cross_validation (bool): Whether to perform cross-validation.
"""
def __init__(self):
self.scaling_type = int(sys.argv[2])
_, self.scaler_name = scaling.scaler_functions(int(sys.argv[3]))
self.skip = int(sys.argv[4])
self.rate = int(sys.argv[5])
self.sparse_method = 'L1_mean'
self.ffn = int(sys.argv[6])
self.nodes = int(sys.argv[7])
self.bottleneck_dim = int(sys.argv[8])
self.lambda_map = float(sys.argv[9])
self.in_channels = int(sys.argv[10])
self.seed = 10
self.tolerance = 1e-6
self.learning_rate = 0.001
self.hidden_channels = [1]*self.in_channels
self.act = torch.tanh
self.layer_vec=[2, self.nodes, self.nodes, self.nodes, self.nodes, self.bottleneck_dim]
self.net_name = problem_name
self.net_run = '_' + self.scaler_name
self.weight_decay = 0.00001
self.max_epochs = 5000
self.miles = []
self.gamma = 0.0001
self.num_nodes = 0
self.net_dir = './' + problem_name + '/' + self.net_run+ '/' + variable + '_' + self.net_name + '_lmap' + str(self.lambda_map) + '_btt' + str(self.bottleneck_dim) \
+ '_seed' + str(self.seed) + '_lv' + str(len(self.layer_vec)-2) + '_hc' + str(len(self.hidden_channels)) + '_nd' + str(self.nodes) \
+ '_ffn' + str(self.ffn) + '_skip' + str(self.skip) + '_lr' + str(self.learning_rate) + '_sc' + str(self.scaling_type) + '_rate' + str(self.rate) + '/'
self.cross_validation = True
AE_Params = AE_Params()
class Net(torch.nn.Module):
"""
Class Net
---------
A PyTorch neural network class which consists of encoder, decoder and mapping modules.
Attributes
----------
encoder : gca.Encoder
An encoder module from the gca module.
decoder : gca.Decoder
A decoder module from the gca module.
act_map : AE_Params.act
The activation map specified in the AE_Params.
layer_vec : AE_Params.layer_vec
The layer vector specified in the AE_Params. Shape of the layers for the parameter space mapping.
steps : int
Number of layers for the parameter mapping MLP.
maptovec : nn.ModuleList
A list of linear layers for mapping.
Methods
-------
solo_encoder(data)
Encodes the input data using the encoder module.
Returns the encoded representation.
solo_decoder(x, data)
Decodes the encoded representation and the input data using the decoder module.
Returns the decoded output.
mapping(x)
Maps the input using the linear modules in maptovec.
Returns the mapped output.
forward(data, parameters)
Runs a forward pass through the network using the input data and parameters.
Returns the decoded output, encoded representation, and estimated encoded representation.
"""
def __init__(self):
super().__init__()
self.encoder = gca.Encoder(AE_Params.hidden_channels, AE_Params.bottleneck_dim, AE_Params.num_nodes, ffn=AE_Params.ffn, skip=AE_Params.skip)
self.decoder = gca.Decoder(AE_Params.hidden_channels, AE_Params.bottleneck_dim, AE_Params.num_nodes, ffn=AE_Params.ffn, skip=AE_Params.skip)
self.act_map = AE_Params.act
self.layer_vec = AE_Params.layer_vec
self.steps = len(self.layer_vec) - 1
self.maptovec = nn.ModuleList()
for k in range(self.steps):
self.maptovec.append(nn.Linear(self.layer_vec[k], self.layer_vec[k+1]))
def solo_encoder(self,data):
x = self.encoder(data)
return x
def solo_decoder(self,x, data):
x = self.decoder(x, data)
return x
def mapping(self, x):
idx = 0
for layer in self.maptovec:
if(idx==self.steps): x = layer(x)
else: x =self.act_map(layer(x))
idx += 1
return x
def forward(self, data, parameters):
z = self.solo_encoder(data)
z_estimation = self.mapping(parameters)
x = self.solo_decoder(z, data)
# x = self.solo_decoder(z_estimation, data)
return x, z, z_estimation
| 5,946 | 39.732877 | 179 | py |
gca-rom | gca-rom-main/gca_rom/training.py | import torch
import torch.nn.functional as F
def train(model, optimizer, device, scheduler, params, train_loader, train_trajectories, AE_Params, history):
"""Trains the autoencoder model.
This function trains the autoencoder model using mean squared error (MSE) loss and a map loss, where the map loss
is the MSE between the estimated z (latent space) and the actual z. The final loss is the sum of the MSE loss and the
map loss multiplied by the weight `AE_Params.lambda_map`. The model is trained on the data from `train_loader` and
the optimization process is performed using the `optimizer`. The learning rate is updated after every iteration
using `scheduler`. Use of mini-batching for reducing the computational cost.
Args:
model (torch.nn.Module): The autoencoder model to be trained.
optimizer (torch.optim.Optimizer): The optimizer to update the model parameters.
device (str): The device to run the model on ('cuda' or 'cpu').
scheduler (torch.optim.lr_scheduler._LRScheduler): The learning rate scheduler to update the learning rate.
params (torch.Tensor): Tensor containing the parameters of the model.
train_loader (torch.utils.data.DataLoader): The data loader to provide the training data.
train_trajectories (int): The number of training trajectories.
AE_Params (dict): The dictionary containing the hyperparameters for the autoencoder model.
history (dict): The dictionary to store the loss history.
Returns:
float: The average loss over all training examples.
"""
model.train()
total_loss_train = total_examples = sum_loss = 0
total_loss_train_1 = total_loss_train_2 = 0
sum_loss_1 = sum_loss_2 = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
out, z, z_estimation = model(data, params[train_trajectories, :])
loss_train_mse = F.mse_loss(out, data.x, reduction='mean')
loss_train_map = F.mse_loss(z_estimation, z, reduction='mean')
loss_train = loss_train_mse + AE_Params.lambda_map * loss_train_map
loss_train.backward()
optimizer.step()
sum_loss += loss_train.item()
sum_loss_1 += loss_train_mse.item()
sum_loss_2 += loss_train_map.item()
total_examples += 1
scheduler.step()
total_loss_train = sum_loss / total_examples
total_loss_train_1 = sum_loss_1 / total_examples
total_loss_train_2 = sum_loss_2 / total_examples
history['train'].append(total_loss_train)
history['l1'].append(total_loss_train_1)
history['l2'].append(total_loss_train_2)
return total_loss_train
def val(model, device, params, test_loader, test_trajectories, AE_Params, history_test):
"""
Evaluate the performance of a model on a test set.
This function calculates the mean of the total loss, the mean of loss_test_mse and the mean of loss_test_map for all test examples. The losses are computed as the mean squared error (MSE) between the model's predictions and the true target variables, and between the estimated latent code and the true latent code. The lambda_map weight balances the contribution of each loss term to the total loss. The function adds the computed loss values to the history_test dictionary.
Parameters:
model (torch.nn.Module): The model to be evaluated.
device (torch.device): The device to use for computations (e.g. 'cpu' or 'cuda').
params (torch.Tensor): Tensor containing the parameters of the model.
test_loader (torch.utils.data.DataLoader): The test data to use for evaluation.
test_trajectories (int): The index of the test trajectory.
AE_Params (object): Object containing hyperparameters for the model.
history_test (dict): Dictionary to store the evaluation results.
Returns:
float: The mean of the total loss computed over all test examples.
"""
with torch.no_grad():
model.eval()
total_loss_test = total_examples = sum_loss = 0
total_loss_test_1 = total_loss_test_2 = 0
sum_loss_1 = sum_loss_2 = 0
for data in test_loader:
data = data.to(device)
out, z, z_estimation = model(data, params[test_trajectories, :])
loss_test_mse = F.mse_loss(out, data.x, reduction='mean')
loss_test_map = F.mse_loss(z_estimation, z, reduction='mean')
loss_test = loss_test_mse + AE_Params.lambda_map * loss_test_map
sum_loss += loss_test.item()
sum_loss_1 += loss_test_mse.item()
sum_loss_2 += loss_test_map.item()
total_examples += 1
total_loss_test = sum_loss / total_examples
total_loss_test_1 = sum_loss_1 / total_examples
total_loss_test_2 = sum_loss_2 / total_examples
history_test['test'].append(total_loss_test)
history_test['l1'].append(total_loss_test_1)
history_test['l2'].append(total_loss_test_2)
return total_loss_test
| 5,063 | 48.647059 | 478 | py |
gca-rom | gca-rom-main/gca_rom/testing.py | import torch
from tqdm import tqdm
import numpy as np
def evaluate(VAR, model, loader, params, AE_Params, test):
"""
This function evaluates the performance of a trained Autoencoder (AE) model.
It encodes the input data using both the model's encoder and a mapping function,
and decodes the resulting latent representations to obtain predicted solutions.
The relative error between the two latent representations is also computed.
Inputs:
VAR: np.array, ground truth solution
model: object, trained AE model
loader: object, data loader for the input data
params: np.array, model parameters
AE_Params: class, model architecture and training parameters
Returns:
results: np.array, predicted solutions
latents_map: np.array, latent representations obtained using the mapping function
latents_gca: np.array, latent representations obtained using the AE encoder
"""
results = torch.zeros(VAR.shape[0], VAR.shape[1], 1)
latents_map = torch.zeros(VAR.shape[0], AE_Params.bottleneck_dim)
latents_gca = torch.zeros(VAR.shape[0], AE_Params.bottleneck_dim)
index = 0
latents_error = list()
with torch.no_grad():
for data in tqdm(loader):
z_net = model.solo_encoder(data)
z_map = model.mapping(params[test[index], :])
latents_map[index, :] = z_map
latents_gca[index, :] = z_net
lat_err = np.linalg.norm(z_net - z_map)/np.linalg.norm(z_net)
latents_error.append(lat_err)
results[index, :, :] = model.solo_decoder(z_map, data)
index += 1
np.savetxt(AE_Params.net_dir+'latents'+AE_Params.net_run+'.csv', latents_map.detach(), delimiter =',')
latents_error = np.array(latents_error)
# print("\nMaximum relative error for latent = ", max(latents_error))
# print("Mean relative error for latent = ", sum(latents_error)/len(latents_error))
# print("Minimum relative error for latent = ", min(latents_error))
return results, latents_map, latents_gca | 2,071 | 44.043478 | 110 | py |
gca-rom | gca-rom-main/gca_rom/initialization.py | import os
import torch
import numpy as np
import random
import warnings
def set_device():
"""
Returns the device to be used (GPU or CPU)
Returns:
device (str): The device to be used ('cuda' if GPU is available, 'cpu' otherwise)
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Device used = ", device)
torch.set_default_dtype(torch.float64)
warnings.filterwarnings("ignore")
return device
def set_reproducibility(AE_Params):
"""
Sets the seed for reproducibility of results.
Args:
AE_Params (class): Contains the hyperparameters of the autoencoder
"""
seed = AE_Params.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def set_path(AE_Params):
"""
Creates the directory path to store the network results.
Args:
AE_Params (class): Contains the hyperparameters of the autoencoder
"""
path = AE_Params.net_dir
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path, exist_ok=False)
| 1,224 | 21.685185 | 89 | py |
gca-rom | gca-rom-main/gca_rom/__init__.py | 0 | 0 | 0 | py | |
gca-rom | gca-rom-main/gca_rom/pde.py | import numpy as np
def problem(argument):
"""
problem(argument: int) -> Tuple[str, str, np.ndarray, np.ndarray]
This function takes in an integer argument and returns a tuple containing the problem name (str), variable name (str), mu1 (np.ndarray) and mu2 (np.ndarray) for the specified case.
The possible values of argument are:
Poisson problem
Advection problem
Graetz problem
Navier-Stokes problem (variable = VX)
Navier-Stokes problem (variable = VY)
Navier-Stokes problem (variable = P)
Returns:
Tuple containing the problem name (str), variable name (str), mu1 (np.ndarray) and mu2 (np.ndarray) for the specified case.
"""
match argument:
case 1:
problem = "poisson"
variable = 'U'
mu1 = np.linspace(0.01, 10., 10)
mu2 = np.linspace(0.01, 10., 10)
case 2:
problem = "advection"
variable = 'U'
mu1 = np.linspace(0., 6., 10)
mu2 = np.linspace(-1.0, 1.0, 10)
case 3:
problem = "graetz"
variable = 'U'
mu1 = np.linspace(1., 3., 10)
mu2 = np.linspace(0.01, 0.1, 20)
case 4:
problem = "navier_stokes"
variable = 'VX'
mu1 = np.linspace(0.5, 2., 21)[::2]
mu2 = np.linspace(2., 0.5, 151)[::5]
case 5:
problem = "navier_stokes"
variable = 'VY'
mu1 = np.linspace(0.5, 2., 21)[::2]
mu2 = np.linspace(2., 0.5, 151)[::5]
case 6:
problem = "navier_stokes"
variable = 'P'
mu1 = np.linspace(0.5, 2., 21)[::2]
mu2 = np.linspace(2., 0.5, 151)[::5]
return problem, variable, mu1, mu2 | 1,771 | 32.433962 | 184 | py |
gca-rom | gca-rom-main/gca_rom/loader.py | import sys
from torch_geometric.data import Dataset
import torch
import scipy
class LoadDataset(Dataset):
"""
A custom dataset class which loads data from a .mat file using scipy.io.loadmat.
data_mat : scipy.io.loadmat
The loaded data in a scipy.io.loadmat object.
U : torch.Tensor
The tensor representation of the specified variable from the data_mat.
xx : torch.Tensor
The tensor representation of the 'xx' key from the data_mat. Refers to X coordinates of the domain
yy : torch.Tensor
The tensor representation of the 'yy' key from the data_mat.Refers to Y coordinates of the domain
T : torch.Tensor
The tensor representation of the 'T' key from the data_mat, casted to int. Adjacency Matrix
E : torch.Tensor
The tensor representation of the 'E' key from the data_mat, casted to int. Connection Matrix
__init__(self, root_dir, variable)
Initializes the LoadDataset object by loading the data from the .mat file at the root_dir location and converting the specified variable to a tensor representation.
"""
def __init__(self, root_dir, variable):
self.data_mat = scipy.io.loadmat(root_dir)
self.U = torch.tensor(self.data_mat[variable])
self.xx = torch.tensor(self.data_mat['xx'])
self.yy = torch.tensor(self.data_mat['yy'])
self.T = torch.tensor(self.data_mat['T'].astype(int))
self.E = torch.tensor(self.data_mat['E'].astype(int))
| 1,488 | 41.542857 | 172 | py |
gca-rom | gca-rom-main/gca_rom/gca.py | import torch
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import GMMConv
class Encoder(torch.nn.Module):
"""
Encoder Class
The Encoder class is a subclass of torch.nn.Module that implements a deep neural network for encoding graph data.
It uses the Gaussian Mixture convolution (GMMConv) module to extract features from the graph structure and node features.
The encoding is then passed through a feed-forward neural network with two fully connected layers to produce the final encoding.
Arguments:
hidden_channels (list): A list of hidden channel sizes for each layer of the GMMConv module.
bottleneck (int): Size of the bottleneck layer in the feed-forward neural network.
input_size (int): Size of the node features.
ffn (int): Size of the intermediate layer in the feed-forward neural network.
skip (bool): If True, the input node features will be concatenated with the GMMConv output at each layer.
act (function): Activation function used in the GMMConv layers and feed-forward neural network. Defaults to F.elu.
Methods:
encoder(data): Encodes the graph data using the GMMConv module and feed-forward neural network.
reset_parameters(): Resets the parameters of the GMMConv layers and feed-forward neural network.
forward(data): A convenience function that calls the encoder method.
"""
def __init__(self, hidden_channels, bottleneck, input_size, ffn, skip, act=F.elu):
super().__init__()
self.hidden_channels = hidden_channels
self.depth = len(self.hidden_channels)
self.act = act
self.ffn = ffn
self.skip = skip
self.bottleneck = bottleneck
self.input_size = input_size
self.down_convs = torch.nn.ModuleList()
for i in range(self.depth-1):
self.down_convs.append(GMMConv(self.hidden_channels[i], self.hidden_channels[i+1], dim=1, kernel_size=5))
self.fc_in1 = nn.Linear(self.input_size*self.hidden_channels[-1], self.ffn)
self.fc_in2 = nn.Linear(self.ffn, self.bottleneck)
self.reset_parameters()
def encoder(self, data):
edge_weight = data.edge_attr
edge_index = data.edge_index
x = data.x
idx = 0
for layer in self.down_convs:
x = self.act(layer(x, edge_index, edge_weight.unsqueeze(1)))
if self.skip:
x = x + data.x
idx += 1
x = x.reshape(data.num_graphs, self.input_size * self.hidden_channels[-1])
x = self.act(self.fc_in1(x))
x = self.fc_in2(x)
return x
def reset_parameters(self):
for conv in self.down_convs:
conv.reset_parameters()
for name, param in conv.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
else:
nn.init.kaiming_uniform_(param)
def forward(self,data):
x = self.encoder(data)
return x
class Decoder(torch.nn.Module):
"""
Class Decoder
A torch.nn.Module subclass for the decoder part of a neural network.
Attributes:
hidden_channels (list of ints): A list of hidden channel sizes.
depth (int): The length of hidden_channels list.
act (function): Activation function to use.
ffn (int): Size of output after the first linear layer.
skip (bool): Whether to add skip connections.
bottleneck (int): Size of bottleneck layer.
input_size (int): Size of input data.
fc_out1 (torch.nn.Linear): Linear layer from bottleneck to ffn.
fc_out2 (torch.nn.Linear): Linear layer from ffn to input_size * hidden_channels[-1].
up_convs (torch.nn.ModuleList): A list of GMMConv layers.
Methods:
decoder(self, x, data):
Decodes the input data x and returns the output.
reset_parameters(self):
Resets the parameters of the up_convs layer.
forward(self, x, data):
Performs a forward pass on the input data x and returns the output.
"""
def __init__(self, hidden_channels, bottleneck, input_size, ffn, skip, act=F.elu):
super().__init__()
self.hidden_channels = hidden_channels
self.depth = len(self.hidden_channels)
self.act = act
self.ffn = ffn
self.skip = skip
self.bottleneck = bottleneck
self.input_size = input_size
self.fc_out1 = nn.Linear(self.bottleneck, self.ffn)
self.fc_out2 = nn.Linear(self.ffn, self.input_size * self.hidden_channels[-1])
self.up_convs = torch.nn.ModuleList()
for i in range(self.depth-1):
self.up_convs.append(GMMConv(self.hidden_channels[self.depth-1-i], self.hidden_channels[self.depth-i-2], dim=1, kernel_size=5))
self.reset_parameters()
def decoder(self, x, data):
edge_weight = data.edge_attr
edge_index = data.edge_index
x = self.act(self.fc_out1(x))
x = self.act(self.fc_out2(x))
h = x.reshape(data.num_graphs*self.input_size, self.hidden_channels[-1])
x = h
idx = 0
for layer in self.up_convs:
if (idx == self.depth - 2):
x = layer(x, edge_index, edge_weight.unsqueeze(1))
else:
x = self.act(layer(x, edge_index, edge_weight.unsqueeze(1)))
if self.skip:
x = x + h
idx += 1
return x
def reset_parameters(self):
for conv in self.up_convs:
conv.reset_parameters()
for name, param in conv.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
else:
nn.init.kaiming_uniform_(param)
def forward(self, x, data):
x = self.decoder(x, data)
return x
| 5,918 | 36.226415 | 139 | py |
gca-rom | gca-rom-main/gca_rom/scaling.py | from sklearn import preprocessing
import torch
import sys
def scaler_functions(k):
match k:
case 1:
sc_name = "minmax"
sc_fun = preprocessing.MinMaxScaler()
case 2:
sc_name = "robust"
sc_fun = preprocessing.RobustScaler()
case 3:
sc_name = "standard"
sc_fun = preprocessing.StandardScaler()
return sc_fun, sc_name
def tensor_scaling(tensor, scaling_type):
scaling_fun_1, _ = scaler_functions(int(sys.argv[3]))
scaling_fun_2, _ = scaler_functions(int(sys.argv[3]))
match scaling_type:
case 1:
# print("SAMPLE SCALING")
scale = scaling_fun_1.fit(tensor)
scaled_data = torch.unsqueeze(torch.tensor(scale.transform(tensor)),0).permute(2, 1, 0)
case 2:
# print("FEATURE SCALING")
scale = scaling_fun_1.fit(torch.t(tensor))
scaled_data = torch.unsqueeze(torch.tensor(scale.transform(torch.t(tensor))), 0).permute(1, 2, 0)
case 3:
# print("FEATURE-SAMPLE SCALING")
scaler_f = scaling_fun_1.fit(torch.t(tensor))
temp = torch.tensor(scaler_f.transform(torch.t(tensor)))
scaler_s = scaling_fun_2.fit(temp)
scaled_data = torch.unsqueeze(torch.tensor(scaler_s.transform(temp)), 0).permute(1, 2, 0)
scale = [scaler_f, scaler_s]
case 4:
# print("SAMPLE-FEATURE SCALING")
scaler_s = scaling_fun_1.fit(tensor)
temp = torch.t(torch.tensor(scaler_s.transform(tensor)))
scaler_f = scaling_fun_2.fit(temp)
scaled_data = torch.unsqueeze(torch.t(torch.tensor(scaler_f.transform(temp))), 0).permute(2, 1, 0)
scale = [scaler_s, scaler_f]
return scale, scaled_data
def inverse_scaling(tensor, scale, scaling_type):
match scaling_type:
case 1:
# print("SAMPLE SCALING")
rescaled_data = torch.tensor(scale.inverse_transform(torch.t(torch.tensor(tensor[:, :, 0].detach().numpy().squeeze()))))
case 2:
# print("FEATURE SCALING")
rescaled_data = torch.tensor(torch.t(torch.tensor(scale.inverse_transform(tensor[:, :, 0].detach().numpy().squeeze()))))
case 3:
# print("FEATURE-SAMPLE SCALING")
scaler_f = scale[0]
scaler_s = scale[1]
rescaled_data = torch.t(torch.tensor(scaler_f.inverse_transform(torch.tensor(scaler_s.inverse_transform(tensor[:, :, 0].detach().numpy().squeeze())))))
case 4:
# print("SAMPLE-FEATURE SCALING")
scaler_s = scale[0]
scaler_f = scale[1]
rescaled_data = torch.tensor(scaler_s.inverse_transform(torch.t(torch.tensor(scaler_f.inverse_transform(tensor[:, :, 0].detach().numpy().squeeze())))))
return rescaled_data
| 2,869 | 41.205882 | 163 | py |
gca-rom | gca-rom-main/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'gca-rom'
copyright = '2023, Federico Pichi, Beatriz Moya, Jan S. Hesthaven'
author = 'Federico Pichi, Beatriz Moya, Jan S. Hesthaven'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.autodoc"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip) | 2,162 | 33.333333 | 79 | py |
gca-rom | gca-rom-main/utils/test_all_scalar.py | import os
# HYPER-PARAMETERS LIST
problem_names_list = ["poisson", "advection", "graetz", "VX_navier_stokes", "VY_navier_stokes", "P_navier_stokes"]
scalers_type_list = ["sample", "feature", "feature-sampling", "sampling-feature"]
scalers_fun_list = ["minmax", "robust", "standard"]
skip_connection_list = ["skip0", "skip1"]
train_rates_list = [10, 20, 30, 40, 50]
ffc_nodes_list = [50, 100, 200, 300, 400]
latent_nodes_list = [25, 50, 75, 100, 125]
btt_nodes_list = [10, 15, 20, 25, 30]
lambda_map_list = [1e-2, 1e-1, 1e0, 1e1, 1e2]
hidden_channels_list = [1, 2, 3, 4, 5]
# HYPER-PARAMETERS TEST
problem_names = ["poisson", "advection", "graetz", "VX_navier_stokes"]
scalers_type = ["sampling-feature"]
scalers_fun = ["standard"]
skip_connection = ["skip1"]
train_rates = [10, 30, 50]
ffc_nodes = [100, 200, 300]
latent_nodes = [50, 100]
btt_nodes = [15, 25]
lambda_map = [1e-1, 1e0, 1e1]
hidden_channels = [1, 2, 3]
# BEST_U_poisson_lmap10.0_btt15_seed10_lv4_hc3_nd50_ffn200_skip1_lr0.001_sc4_rate30
# BEST_U_advection_lmap10.0_btt15_seed10_lv4_hc2_nd100_ffn200_skip1_lr0.001_sc4_rate30
# BEST_U_graetz_lmap10.0_btt25_seed10_lv4_hc2_nd50_ffn200_skip1_lr0.001_sc4_rate30
# BEST_VX_navier_stokes_rid_lmap10.0_btt25_seed10_lv4_hc2_nd100_ffn300_skip1_lr0.001_sc4_rate30
# BEST_VY_navier_stokes_rid_lmap1.0_btt25_seed10_lv4_hc2_nd100_ffn300_skip1_lr0.001_sc4_rate30
# BEST_P_navier_stokes_rid_lmap1.0_btt25_seed10_lv4_hc2_nd100_ffn200_skip1_lr0.001_sc4_rate30
index_pb = [problem_names_list.index(pb)+1 for pb in problem_names]
index_st = [scalers_type_list.index(st)+1 for st in scalers_type]
index_sf = [scalers_fun_list.index(sf)+1 for sf in scalers_fun]
index_sk = [skip_connection_list.index(sk) for sk in skip_connection]
for (i, pb) in zip(index_pb, problem_names):
for (j, st) in zip(index_st, scalers_type):
for (k, sf) in zip(index_sf, scalers_fun):
for (m, sk) in zip(index_sk, skip_connection):
for rt in train_rates:
for ffc in ffc_nodes:
for ln in latent_nodes:
for bn in btt_nodes:
for la in lambda_map:
for hc in hidden_channels:
print("\n\nRUN MAIN FOR PROBLEM "+pb+" WITH SCALING: "+st+" WITH "+sf+" FUNCTION")
print("TRAIN RATE = "+str(rt)+" and FFC NODES = "+str(ffc)+" and HIDDEN CHANNELS = "+str(hc)+" and SKIP = "+str(sk))
print("LATENT NODES = "+str(ln)+" and BTT NODES = "+str(bn)+" and LAMBDA = "+str(la))
os.system("python3 ../main.py %s %s %s %s %s %s %s %s %s %s" %(i, j, k, m, rt, ffc, ln, bn, la, hc)) | 2,774 | 52.365385 | 156 | py |
gca-rom | gca-rom-main/utils/h5_to_mat.py | """
.. _IMPORTING_dataset_form_FEniCS_solution:
IMPORTING dataset form FEniCS solution - .h5 to .mat
This script is used to import a FEniCS solution in .h5 format and convert it to a .mat file.
Dependencies
h5py
numpy
scipy
Functions
extract_edges(triangulation)
Given a triangulation, this function returns a list of edges in the triangulation.
"""
import h5py
import numpy as np
from scipy.io import savemat
def extract_edges(triangulation):
edges = set()
for triangle in triangulation:
for i in range(3):
edge = (triangle[i], triangle[(i + 1) % 3])
edges.add(tuple(sorted(edge)))
return list(edges)
# These lines define and open the path of the .h5 file
h5_file = "../data/solution_hf_advection.h5"
f = h5py.File(h5_file)
# This line calculates the number of degrees of freedom
dof = f["/Mesh/0/mesh/geometry"].shape[0]
# This for loop iterates through all the mesh elements in the .h5 file
for i in range(len(f["Mesh"])):
# These lines extract the mesh information, x and y coordinates, and the solution vector
mesh = f["Mesh/"+str(i)+"/mesh/geometry"]
triang = np.array(f["/Mesh/"+str(i)+"/mesh/topology"]) + 1
x = mesh[:, 0:1]
y = mesh[:, 1:2]
u = np.array(f["VisualisationVector/"+str(i)])
try:
xx = np.concatenate([xx, x], axis=1)
yy = np.concatenate([yy, y], axis=1)
solution = np.concatenate([solution, u], axis=1)
except:
xx = x
yy = y
solution = u
# These lines extract the edges from the triangulation
edges = np.array(extract_edges(triang))
edges = edges[edges[:, 1].argsort()]
edges = edges[edges[:, 0].argsort(kind='mergesort')]
# These lines create a dictionary that stores the triangulation, edges, number of degrees of freedom, solution, x and y coordinates
mat_file = "../data/file.mat"
dataset = dict()
dataset['T'] = triang.astype('float')
dataset['E'] = edges.astype('float')
dataset['dof'] = float(dof)
dataset['U'] = solution
dataset['xx'] = xx
dataset['yy'] = yy
# This line saves the dataset as a .mat file
savemat(mat_file, dataset) | 2,107 | 27.876712 | 131 | py |
gca-rom | gca-rom-main/utils/save_all_scalar.py | import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import warnings
import seaborn as sns
import os
warnings.filterwarnings("ignore")
# HYPER-PARAMETERS LIST
model_names_list = ["poisson", "advection", "graetz", "navier_stokes"]
problem_names_list = ["poisson", "advection", "graetz", "VX_navier_stokes", "VY_navier_stokes", "P_navier_stokes"]
scalers_type_list = ["sample", "feature", "feature-sampling", "sampling-feature"]
scalers_fun_list = ["minmax", "robust", "standard"]
skip_connection_list = ["skip0", "skip1"]
train_rates_list = ["rate10", "rate20", "rate30", "rate40", "rate50"]
ffc_nodes_list = ["ffn50", "ffn100", "ffn200", "ffn300", "ffn400"]
latent_nodes = ["nd25", "nd50", "nd75", "nd100", "nd125"]
btt_nodes = ["btt10", "btt15", "btt20", "btt25", "btt30"]
lambda_map_list = ["lmap0.01", "lmap0.1", "lmap1.0", "lmap10", "lmap100"]
hidden_channels_list = ["hc1", "hc2", "hc3", "hc4", "hc5"]
# HYPER-PARAMETERS SAVE
model_names = ["poisson", "advection", "graetz", "navier_stokes", "navier_stokes", "navier_stokes"]
problem_names = ["poisson", "advection", "graetz", "VX_navier_stokes", "VY_navier_stokes", "P_navier_stokes"]
variable_names = ["U_", "U_", "U_", "VX_", "VY_", "P_"]
scalers_fun = ["standard"]
scalers_type = ["sc4"]
skip_connection = ["skip1"]
train_rates = ["rate10", "rate30", "rate50"]
ffn_nodes = ["ffn100", "ffn200", "ffn300"]
latent_nodes = ["nd50", "nd100"]
btt_nodes = ["btt15", "btt25"]
lambda_map = ["lmap0.1", "lmap1.0", "lmap10.0"]
hidden_channels = ["hc1", "hc2", "hc3"]
train_rates_l = [10, 30, 50]
ffn_nodes_l = [100, 200, 300]
latent_nodes_l = [50, 100]
btt_nodes_l = [15, 25]
lambda_map_l = [0.1, 1.0, 10.0]
hidden_channels_l = [1, 2, 3]
folder = ""
for name in [*set(model_names)]:
folder += str(name)+"/_standard/ "
print("FOLDERS: ", folder)
## SCALAR CASES
output = subprocess.check_output("find " + folder + "-name '*GCA-ROM.txt' -print > res_all.txt", shell=True)
file = 'res_all.txt'
sim_list = open(file).readlines()
error = dict()
for name in problem_names:
for sct in scalers_type:
for scf in scalers_fun:
for sk in skip_connection:
for rt in train_rates:
for ffn in ffn_nodes:
for ln in latent_nodes:
for bn in btt_nodes:
for la in lambda_map:
for hc in hidden_channels:
error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc] = list()
data = pd.DataFrame()
k = 0
for (j, r) in enumerate(sim_list):
r = r.replace('//', '/').replace('\n', '')
e = pd.read_csv(r, sep=" ", header=None).to_numpy().squeeze()
for it_1, name in enumerate(problem_names):
for it_2, sct in enumerate(scalers_type):
for it_3, scf in enumerate(scalers_fun):
for it_4, sk in enumerate(skip_connection):
for it_5, rt in enumerate(train_rates):
for it_6, ffn in enumerate(ffn_nodes):
for it_7, ln in enumerate(latent_nodes):
for it_8, bn in enumerate(btt_nodes):
for it_9, la in enumerate(lambda_map):
for it_10, hc in enumerate(hidden_channels):
if all(x in r for x in [name, sct, scf, sk, rt, ffn, ln, bn, la, hc]):
print(name, sct, scf, sk, rt, ffn, ln, bn, la, hc, e)
if not error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc]:
error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc].append(e)
temp = pd.DataFrame({'problem_names': it_1,
'scalers_type': it_2,
'scalers_fun': it_3,
'skip_connection': it_4,
'train_rates': train_rates_l[it_5],
'ffn_nodes': ffn_nodes_l[it_6],
'latent_nodes': latent_nodes_l[it_7],
'btt_nodes': btt_nodes_l[it_8],
'lambda_map': lambda_map_l[it_9],
'hidden_channels': hidden_channels_l[it_10],
'error': e[1]} , index=[k])
data = pd.concat([data, temp])
k+=1
else:
error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc][0] = np.concatenate([error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc][0], np.array(e)])
my_xticks = []
for sct in scalers_type:
for scf in scalers_fun:
for sk in skip_connection:
for rt in train_rates:
for ffn in ffn_nodes:
for ln in latent_nodes:
for bn in btt_nodes:
for la in lambda_map:
for hc in hidden_channels:
my_xticks.append(sct+scf+sk+rt+ffn+ln+bn+la+hc)
num_exp = len(scalers_fun)*len(scalers_type)*len(skip_connection)*len(train_rates)*len(ffn_nodes)*len(latent_nodes)*len(btt_nodes)*len(lambda_map)*len(hidden_channels)
x = range(num_exp)
er = np.array(list(error.items()))[:, 1]
for y in er:
if not y:
y.append(np.ones(3))
error_array = np.vstack([y for y in er])
def plot_error(ax, name, x, var, i_min):
i = 0
ax.set_title(name)
for sct in scalers_type:
for scf in scalers_fun:
for sk in skip_connection:
for rt in train_rates:
for ffn in ffn_nodes:
for ln in latent_nodes:
for bn in btt_nodes:
for la in lambda_map:
for hc in hidden_channels:
if np.array_equal(error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc][0], np.ones(3)):
color_min_max = "white"
col_mean = "white"
else:
if i == i_min:
print("HYPERPARAMETERS: ", name, sct, scf, sk, rt, ffn, ln, bn, la, hc)
string = str(model)+"/_standard/"+str(var)+str(model)+"_"+str(la)+"_"+str(bn)+"_seed"+str(10)+"_lv"+str(4)+"_"+str(hc)+"_"+str(ln)+"_"+str(ffn)+"_"+str(sk)+"_lr"+str(0.001)+"_sc"+str(4)+"_"+str(rt)+"/"
print("FOLDER = ", string)
color_min_max = "#1C3144"
col_mean = "#4cb944"
else:
color_min_max = "#1C3144"
col_mean = "#70161E"
ax.scatter(i, error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc][0][0], marker="^", facecolors='white', edgecolors=color_min_max)
ax.scatter(i, error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc][0][1], marker="d", facecolors=col_mean, edgecolors=col_mean)
ax.scatter(i, error[name, sct, scf, sk, rt, ffn, ln, bn, la, hc][0][2], marker="v", facecolors='white', edgecolors=color_min_max)
ax.set_yscale('log')
i+=1
ax.tick_params(labelsize=8)
# ax.set_xticks(x[:i], my_xticks[:i], rotation=65)
ax.set_xlim([0, len(x[:i])])
# dim = np.reshape(range(len(problem_names)), (2, -1)).shape
# fig2, axs2 = plt.subplot_mosaic([['upper left', 'upper mid', 'upper right'],
# ['lower left', 'lower mid', 'lower right']])
for i, (name, model, var) in enumerate(zip(problem_names, model_names, variable_names)):
model_path = '../plots/'+str(model)+'/tmp'
if not os.path.exists(model_path):
os.makedirs(model_path)
fig1, axs1 = plt.subplots(1)
i_min = np.argmin(error_array[i*num_exp:(i+1)*num_exp, 1], axis=0)
print("\nBEST NETWORK FOR " + str(name) + " IS CORRESPONDING TO i = ", i_min)
plot_error(axs1, name, x, var, i_min)
# plot_error(axs2[k], name, x, var, i_min)
fig1.tight_layout()
fig1.savefig('../plots/'+str(model)+'/box_plot_error_all_'+str(name)+'.png', dpi=500)
plt.close()
# fig2.tight_layout()
# fig2.savefig('../plots/box_plot_error_all.png', dpi=500)
# plt.show()
### PAIRPLOT
# print("Dataset shape: ", data.shape)
# print("Dataset: ", data)
columns_names = ["train_rates", "ffn_nodes", "latent_nodes", "btt_nodes", "lambda_map", "hidden_channels", "error"]
list_hyper = [train_rates, ffn_nodes, latent_nodes, btt_nodes, lambda_map, hidden_channels]
x_vars = ["train_rates", "ffn_nodes", "latent_nodes", "btt_nodes", "lambda_map", "hidden_channels"]
x_vars_title = ["$r_t$", "ffn", "$n_l$", "$n$", "$\lambda$", "hc"]
y_vars = ["error"]
n_vars = len(x_vars)
mark =["o", "s", "D"]
line =["-", "--", "-."]
def pairplot_box(data, xv, hue_var):
sns.set(style="ticks")
return sns.catplot(data=data, x=xv, y=y_vars[0], hue=hue_var, kind="box")
def pairplot_point(data, xv, c, hue_var):
sns.set(style="ticks")
return sns.catplot(data=data, x=xv, y=y_vars[0], hue=hue_var, linestyles=line[:len(list_hyper[c])], markers=mark[:len(list_hyper[c])], kind="point")
# return sns.pairplot(data[columns_names], x_vars=x_vars, y_vars=y_vars, hue=hue_var, markers=mark[:len(col)])
fontsize = 25
for n, (name, model) in enumerate(zip(problem_names, model_names)):
data_name = data[data["problem_names"]==n]
# print("Dataset name shape: ", data_name.shape)
# print("Dataset name: ", data_name)
j = 0
for c, col in enumerate(x_vars):
for tit, xv in enumerate(x_vars):
counter = 0
if xv == col:
counter += 1
else:
g1 = pairplot_box(data_name, xv, col)
g2 = pairplot_point(data_name, xv, c, col)
g1._legend.remove()
g2._legend.remove()
for ax in g1.axes.flat:
if ax.get_ylabel() in y_vars:
ax.set(yscale="log")
ax.set_xlabel(x_vars_title[tit+counter], fontsize=fontsize)
ax.set_ylabel(y_vars[0], fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=fontsize)
ax.tick_params(labelsize=fontsize)
if j in [nn - 1 for nn in range(0, n_vars*(n_vars - 1) + 1, n_vars - 1)[1:]]:
ax.legend(title=x_vars_title[c+counter], title_fontsize=fontsize)
sns.move_legend(ax, "center left", bbox_to_anchor=(1., 0.5), ncol=1, frameon=False, fontsize=fontsize)
g1.savefig('../plots/'+str(model)+'/tmp/g1_'+str(j)+'_'+str(name)+'.png', dpi=500)
plt.close(g1.fig)
for ax in g2.axes.flat:
if ax.get_ylabel() in y_vars:
ax.set(yscale="log")
ax.set_xlabel(x_vars_title[tit+counter], fontsize=fontsize)
ax.set_ylabel(y_vars[0], fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=fontsize)
ax.tick_params(labelsize=fontsize)
if j in [nn - 1 for nn in range(0, n_vars*(n_vars - 1) + 1, n_vars - 1)[1:]]:
ax.legend(title=x_vars_title[c+counter], title_fontsize=fontsize)
sns.move_legend(ax, "center left", bbox_to_anchor=(1., 0.5), ncol=1, frameon=False, fontsize=fontsize)
g2.savefig('../plots/'+str(model)+'/tmp/g2_'+str(j)+'_'+str(name)+'.png', dpi=500)
plt.close(g2.fig)
j += 1
f1, axarr1 = plt.subplots(len(x_vars), len(x_vars[:-1]), figsize=(20, 20))
f2, axarr2 = plt.subplots(len(x_vars), len(x_vars[:-1]), figsize=(20, 20))
i = 0
for c, col in enumerate(x_vars):
for cc, col in enumerate(x_vars[:-1]):
axarr1[c,cc].imshow(mpimg.imread('../plots/'+str(model)+'/tmp/g1_'+str(i)+'_'+str(name)+'.png'))
axarr2[c,cc].imshow(mpimg.imread('../plots/'+str(model)+'/tmp/g2_'+str(i)+'_'+str(name)+'.png'))
i += 1
[ax.set_axis_off() for ax in axarr1.ravel()]
[ax.set_axis_off() for ax in axarr2.ravel()]
# shutil.rmtree('../plots/'+str(model)+'/tmp/')
f1.tight_layout()
f2.tight_layout()
f1.savefig('../plots/'+str(model)+'/sns_error_all_box_'+str(name)+'.png', dpi=500)
f2.savefig('../plots/'+str(model)+'/sns_error_all_point_'+str(name)+'.png', dpi=500) | 13,880 | 51.579545 | 249 | py |
CRST | CRST-master/evaluate.py | import argparse
import scipy
from scipy import ndimage
import numpy as np
import sys
from packaging import version
import time
import util
import torch
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
from deeplab.model import Res_Deeplab
from deeplab.datasets import GTA5TestDataSet
from collections import OrderedDict
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
# IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
DATA_DIRECTORY = './dataset/cityscapes'
DATA_LIST_PATH = './dataset/list/cityscapes/val.lst'
SAVE_PATH = './cityscapes/eval'
TEST_IMAGE_SIZE = '1024,2048'
TEST_SCALE = 1.0
IGNORE_LABEL = 255
NUM_CLASSES = 19
NUM_STEPS = 500 # Number of images in the validation set.
RESTORE_FROM = './src_model/gta5/src_model.pth'
DATA_SRC = 'cityscapes'
SET = 'val'
LOG_FILE = 'log'
MODEL = 'DeeplabRes'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG).")
parser.add_argument("--data-src", type=str, default=DATA_SRC,
help="Data name.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the Cityscapes dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument("--set", type=str, default=SET,
help="choose evaluation set.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
parser.add_argument('--test-scale', type=str, default=TEST_SCALE,
help='The test scales. Multi-scale supported')
parser.add_argument('--test-image-size', default=TEST_IMAGE_SIZE,
help='The test image size',
type=str)
return parser.parse_args()
args = get_arguments()
# palette
if args.data_src == 'gta' or args.data_src == 'cityscapes':
# gta:
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
elif args.data_src == 'synthia':
# synthia:
palette = [128,64,128,244,35,232,70,70,70,102,102,156,64,64,128,153,153,153,250,170,30,220,220,0,
107,142,35,70,130,180,220,20,60,255,0,0,0,0,142,0,60,100,0,0,230,119,11,32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
"""Create the model and start the evaluation process."""
device = torch.device("cuda:" + str(args.gpu))
if not os.path.exists(args.save):
os.makedirs(args.save)
logger = util.set_logger(args.save, args.log_file, args.debug)
logger.info('start with arguments %s', args)
x_num = 0
with open(args.data_list) as f:
for _ in f.readlines():
x_num = x_num + 1
sys.path.insert(0, 'dataset/helpers')
if args.data_src == 'gta' or args.data_src == 'cityscapes':
from labels import id2label, trainId2label
elif args.data_src == 'synthia':
from labels_cityscapes_synthia import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
valid_labels = sorted(set(id_2_label.ravel()))
scorer = ScoreUpdater(valid_labels, args.num_classes, x_num, logger)
scorer.reset()
if args.model == 'DeeplabRes':
model = Res_Deeplab(num_classes=args.num_classes)
# elif args.model == 'DeeplabVGG':
# model = DeeplabVGG(num_classes=args.num_classes)
# if args.restore_from == RESTORE_FROM:
# args.restore_from = RESTORE_FROM_VGG
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
# Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
else:
loc = "cuda:" + str(args.gpu)
saved_state_dict = torch.load(args.restore_from,map_location=loc)
new_params = saved_state_dict.copy()
model.load_state_dict(new_params)
#model.train()
model.eval()
model.to(device)
testloader = data.DataLoader(GTA5TestDataSet(args.data_dir, args.data_list, test_scale = 1.0, test_size=(1024, 512), mean=IMG_MEAN, std=IMG_STD, scale=False, mirror=False),
batch_size=1, shuffle=False, pin_memory=True)
test_scales = [float(_) for _ in str(args.test_scale).split(',')]
h, w = map(int, args.test_image_size.split(','))
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(h, w), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(h, w), mode='bilinear')
test_image_size = (h, w)
mean_rgb = IMG_MEAN[::-1].copy()
std_rgb = IMG_STD[::-1].copy()
with torch.no_grad():
for index, batch in enumerate(testloader):
image, label, _, name = batch
img = image.clone()
num_scales = len(test_scales)
# output_dict = {k: [] for k in range(num_scales)}
for scale_idx in range(num_scales):
if version.parse(torch.__version__) > version.parse('0.4.0'):
image = F.interpolate(image, scale_factor=test_scales[scale_idx], mode='bilinear', align_corners=True)
else:
test_size = ( int(h*test_scales[scale_idx]), int(w*test_scales[scale_idx]) )
interp_tmp = nn.Upsample(size=test_size, mode='bilinear', align_corners=True)
image = interp_tmp(img)
if args.model == 'DeeplabRes':
output2 = model(image.to(device))
coutput = interp(output2).cpu().data[0].numpy()
if args.test_flipping:
output2 = model(torch.from_numpy(image.numpy()[:,:,:,::-1].copy()).to(device))
coutput = 0.5 * ( coutput + interp(output2).cpu().data[0].numpy()[:,:,::-1] )
if scale_idx == 0:
output = coutput.copy()
else:
output += coutput
output = output/num_scales
output = output.transpose(1,2,0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
pred_label = output.copy()
label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
scorer.update(pred_label.flatten(), label.flatten(), index)
output_col = colorize_mask(output)
output = Image.fromarray(output)
name = name[0].split('/')[-1]
output.save('%s/%s' % (args.save, name))
output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
class ScoreUpdater(object):
# only IoU are computed. accu, cls_accu, etc are ignored.
def __init__(self, valid_labels, c_num, x_num, logger=None, label=None, info=None):
self._valid_labels = valid_labels
self._confs = np.zeros((c_num, c_num))
self._per_cls_iou = np.zeros(c_num)
self._logger = logger
self._label = label
self._info = info
self._num_class = c_num
self._num_sample = x_num
@property
def info(self):
return self._info
def reset(self):
self._start = time.time()
self._computed = np.zeros(self._num_sample) # one-dimension
self._confs[:] = 0
def fast_hist(self,label, pred_label, n):
k = (label >= 0) & (label < n)
return np.bincount(n * label[k].astype(int) + pred_label[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(self,hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def do_updates(self, conf, i, computed=True):
if computed:
self._computed[i] = 1
self._per_cls_iou = self.per_class_iu(conf)
def update(self, pred_label, label, i, computed=True):
conf = self.fast_hist(label, pred_label, self._num_class)
self._confs += conf
self.do_updates(self._confs, i, computed)
self.scores(i)
def scores(self, i=None, logger=None):
x_num = self._num_sample
ious = np.nan_to_num( self._per_cls_iou )
logger = self._logger if logger is None else logger
if logger is not None:
if i is not None:
speed = 1. * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}mean iou: {:.2f}%'. \
format(name, np.mean(ious) * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(ious * 100))
return ious
if __name__ == '__main__':
main()
| 11,168 | 39.762774 | 176 | py |
CRST | CRST-master/util.py | """
utilities for convenience
"""
import contextlib
import h5py
import logging
import os.path as osp
import yaml
from io import StringIO
from PIL import Image
import numpy as np
cfg = {}
def as_list(obj):
"""A utility function that treat the argument as a list.
Parameters
----------
obj : object
Returns
-------
If `obj` is a list, return it. Otherwise, return `[obj]` as a single-element list.
"""
if isinstance(obj, list):
return obj
else:
return [obj]
def get_interp_method(imh_src, imw_src, imh_dst, imw_dst, default=Image.CUBIC):
if not cfg.get('choose_interpolation_method', False):
return default
if imh_dst < imh_src and imw_dst < imw_src:
return Image.ANTIALIAS
elif imh_dst > imh_src and imw_dst > imw_src:
return Image.CUBIC
else:
return Image.LINEAR
def h5py_save(to_path, *data):
with h5py.File(to_path, 'w') as f:
for i, datum in enumerate(data):
f.create_dataset('d{}'.format(i), data=datum)
def h5py_load(from_path):
data = []
if osp.isfile(from_path):
with h5py.File(from_path) as f:
for k in f.keys():
data.append(f[k][()])
return tuple(data)
def load_image_with_cache(path, cache=None):
if cache is not None:
if not cache.has_key(path):
with open(path, 'rb') as f:
cache[path] = f.read()
return Image.open(StringIO(cache[path]))
return Image.open(path)
@contextlib.contextmanager
def np_print_options(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def read_cfg(cfg_file, cfg_info=None):
if cfg_file is not None:
print('Read config file {}'.format(cfg_file))
with open(cfg_file) as f:
cfg_info = yaml.load(f)
return cfg_info
def set_logger(output_dir=None, log_file=None, debug=False):
head = '%(asctime)-15s Host %(message)s'
logger_level = logging.INFO if not debug else logging.DEBUG
if all((output_dir, log_file)) and len(log_file) > 0:
logger = logging.getLogger()
log_path = osp.join(output_dir, log_file)
handler = logging.FileHandler(log_path)
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logger_level)
else:
logging.basicConfig(level=logger_level, format=head)
logger = logging.getLogger()
return logger
| 2,693 | 26.489796 | 86 | py |
CRST | CRST-master/crst_seg.py | import argparse
import sys
from packaging import version
import time
import util
import os
import os.path as osp
import timeit
from collections import OrderedDict
import scipy.io
import torch
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from operator import itemgetter
import scipy
from scipy import ndimage
import math
from PIL import Image
import numpy as np
import shutil
import random
from deeplab.model import Res_Deeplab
from deeplab.datasets import GTA5TestDataSet
from deeplab.datasets import SrcSTDataSet, GTA5StMineDataSet, SoftSrcSTDataSet, SoftGTA5StMineDataSet
### shared ###
IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
# data
### source
## gta
DATA_SRC_DIRECTORY = './dataset/gta5'
DATA_SRC_LIST_PATH = './dataset/list/gta5/train.lst'
DATA_SRC = 'gta'
RESTORE_FROM = './src_model/gta5/src_model.pth'
NUM_CLASSES = 19
INIT_SRC_PORT = 0.03 # GTA: 0.03
### target
DATA_TGT_DIRECTORY = './dataset/cityscapes'
DATA_TGT_TRAIN_LIST_PATH = './dataset/list/cityscapes/train_ClsConfSet.lst'
DATA_TGT_TEST_LIST_PATH = './dataset/list/cityscapes/val.lst'
IGNORE_LABEL = 255
# train scales for src and tgt
TRAIN_SCALE_SRC = '0.5,1.5'
TRAIN_SCALE_TGT = '0.5,1.5'
# model
MODEL = 'DeeplabRes'
# gpu
GPU = 0
PIN_MEMORY = False
# log files
LOG_FILE = 'self_training_log'
### train ###
BATCH_SIZE = 2
INPUT_SIZE = '512,1024'# 512,1024 for GTA;
RANDSEED = 3
# params for optimizor
LEARNING_RATE =5e-5
POWER = 0.0
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0005
NUM_ROUNDS = 4
EPR = 2
SRC_SAMPLING_POLICY = 'r'
KC_POLICY = 'cb'
KC_VALUE = 'conf'
INIT_TGT_PORT = 0.2
MAX_TGT_PORT = 0.5
TGT_PORT_STEP = 0.05
# varies but dataset
MAX_SRC_PORT = 0.06 #0.06;
SRC_PORT_STEP = 0.0025 #0.0025:
MRKLD = 0.0
LRENT = 0.0
MRSRC = 0.0
MINE_PORT = 1e-3
RARE_CLS_NUM = 3
MINE_CHANCE = 0.8
### val ###
SAVE_PATH = 'debug'
TEST_IMAGE_SIZE = '1024,2048'
EVAL_SCALE = 0.9
TEST_SCALE = '0.9,1.0,1.2'
DS_RATE = 4
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
#torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
#torch.backends.cudnn.deterministic = True
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
### shared by train & val
# data
parser.add_argument("--data-src", type=str, default=DATA_SRC,
help="Name of source dataset.")
parser.add_argument("--data-src-dir", type=str, default=DATA_SRC_DIRECTORY,
help="Path to the directory containing the source dataset.")
parser.add_argument("--data-src-list", type=str, default=DATA_SRC_LIST_PATH,
help="Path to the file listing the images&labels in the source dataset.")
parser.add_argument("--data-tgt-dir", type=str, default=DATA_TGT_DIRECTORY,
help="Path to the directory containing the target dataset.")
parser.add_argument("--data-tgt-train-list", type=str, default=DATA_TGT_TRAIN_LIST_PATH,
help="Path to the file listing the images*GT labels in the target train dataset.")
parser.add_argument("--data-tgt-test-list", type=str, default=DATA_TGT_TEST_LIST_PATH,
help="Path to the file listing the images*GT labels in the target test dataset.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
# model
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
# gpu
parser.add_argument("--gpu", type=int, default=GPU,
help="choose gpu device.")
parser.add_argument("--pin-memory", type=bool, default=PIN_MEMORY,
help="Whether to pin memory in train & eval.")
# log files
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
### train ###
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--is-training", action="store_true",
help="Whether to updates the running means and variances during the training.")
parser.add_argument("--eval-training", action="store_true",
help="Use the saved means and variances, or running means and variances during the evaluation.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--train-scale-src", type=str, default=TRAIN_SCALE_SRC,
help="The scale for multi-scale training in source domain.")
parser.add_argument("--train-scale-tgt", type=str, default=TRAIN_SCALE_TGT,
help="The scale for multi-scale training in target domain.")
# params for optimizor
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
### val
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument("--test-image-size", type=str, default=TEST_IMAGE_SIZE,
help="The test image size.")
parser.add_argument("--eval-scale", type=float, default=EVAL_SCALE,
help="The test image scale.")
parser.add_argument("--test-scale", type=str, default=TEST_SCALE,
help="The test image scale.")
### self-training params
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result for self-training.")
parser.add_argument("--num-rounds", type=int, default=NUM_ROUNDS,
help="Number of rounds for self-training.")
parser.add_argument("--epr", type=int, default=EPR,
help="Number of epochs per round for self-training.")
parser.add_argument('--kc-policy', default=KC_POLICY, type=str, dest='kc_policy',
help='The policy to determine kc. "cb" for weighted class-balanced threshold')
parser.add_argument('--kc-value', default=KC_VALUE, type=str,
help='The way to determine kc values, either "conf", or "prob".')
parser.add_argument('--ds-rate', default=DS_RATE, type=int,
help='The downsampling rate in kc calculation.')
parser.add_argument('--init-tgt-port', default=INIT_TGT_PORT, type=float, dest='init_tgt_port',
help='The initial portion of target to determine kc')
parser.add_argument('--max-tgt-port', default=MAX_TGT_PORT, type=float, dest='max_tgt_port',
help='The max portion of target to determine kc')
parser.add_argument('--tgt-port-step', default=TGT_PORT_STEP, type=float, dest='tgt_port_step',
help='The portion step in target domain in every round of self-paced self-trained neural network')
parser.add_argument('--init-src-port', default=INIT_SRC_PORT, type=float, dest='init_src_port',
help='The initial portion of source portion for self-trained neural network')
parser.add_argument('--max-src-port', default=MAX_SRC_PORT, type=float, dest='max_src_port',
help='The max portion of source portion for self-trained neural network')
parser.add_argument('--src-port-step', default=SRC_PORT_STEP, type=float, dest='src_port_step',
help='The portion step in source domain in every round of self-paced self-trained neural network')
parser.add_argument('--randseed', default=RANDSEED, type=int,
help='The random seed to sample the source dataset.')
parser.add_argument("--src-sampling-policy", type=str, default=SRC_SAMPLING_POLICY,
help="The sampling policy on source dataset: 'c' for 'cumulative' and 'r' for replace ")
parser.add_argument('--mine-port', default=MINE_PORT, type=float,
help='If a class has a predication portion lower than the mine_port, then mine the patches including the class in self-training.')
parser.add_argument('--rare-cls-num', default=RARE_CLS_NUM, type=int,
help='The number of classes to be mined.')
parser.add_argument('--mine-chance', default=MINE_CHANCE, type=float,
help='The chance of patch mining.')
parser.add_argument('--rm-prob',
help='If remove the probability maps generated in every round.',
default=False, action='store_true')
parser.add_argument('--mr-weight-kld', default=MRKLD, type=float, dest='mr_weight_kld',
help='weight of kld model regularization')
parser.add_argument('--lr-weight-ent', default=LRENT, type=float, dest='lr_weight_ent',
help='weight of negative entropy label regularization')
parser.add_argument('--mr-weight-src', default=MRSRC, type=float, dest='mr_weight_src',
help='weight of regularization in source domain')
return parser.parse_args()
args = get_arguments()
# palette
if args.data_src == 'gta':
# gta:
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
randseed = args.randseed
seed_torch(randseed)
device = torch.device("cuda:" + str(args.gpu))
save_path = args.save
save_pseudo_label_path = osp.join(save_path, 'pseudo_label') # in 'save_path'. Save labelIDs, not trainIDs.
save_stats_path = osp.join(save_path, 'stats') # in 'save_path'
save_lst_path = osp.join(save_path, 'list')
if not os.path.exists(save_path):
os.makedirs(save_path)
if not os.path.exists(save_pseudo_label_path):
os.makedirs(save_pseudo_label_path)
if not os.path.exists(save_stats_path):
os.makedirs(save_stats_path)
if not os.path.exists(save_lst_path):
os.makedirs(save_lst_path)
logger = util.set_logger(args.save, args.log_file, args.debug)
logger.info('start with arguments %s', args)
if args.model == 'DeeplabRes':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
# Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
else:
loc = "cuda:" + str(args.gpu)
saved_state_dict = torch.load(args.restore_from,map_location=loc)
new_params = saved_state_dict.copy()
model.load_state_dict(new_params)
image_src_list, _, label_src_list, src_num = parse_split_list(args.data_src_list)
image_tgt_list, image_name_tgt_list, _, tgt_num = parse_split_list(args.data_tgt_train_list)
_, _, _, test_num = parse_split_list(args.data_tgt_test_list)
## label mapping
sys.path.insert(0, 'dataset/helpers')
if args.data_src == 'synthia':
from labels_cityscapes_synthia import id2label, trainId2label
elif args.data_src == 'gta':
from labels import id2label, trainId2label
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
valid_labels = sorted(set(id_2_label.ravel()))
# portions
tgt_portion = args.init_tgt_port
src_portion = args.init_src_port
# training crop size
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
lscale_src, hscale_src = map(float, args.train_scale_src.split(','))
train_scale_src = (lscale_src, hscale_src)
lscale_tgt, hscale_tgt = map(float, args.train_scale_tgt.split(','))
train_scale_tgt = (lscale_tgt, hscale_tgt)
for round_idx in range(args.num_rounds):
save_round_eval_path = osp.join(args.save,str(round_idx))
save_pseudo_label_color_path = osp.join(save_round_eval_path, 'pseudo_label_color') # in every 'save_round_eval_path'
if not os.path.exists(save_round_eval_path):
os.makedirs(save_round_eval_path)
if not os.path.exists(save_pseudo_label_color_path):
os.makedirs(save_pseudo_label_color_path)
########## pseudo-label generation
if round_idx != args.num_rounds - 1:
# evaluation & save confidence vectors
conf_dict, pred_cls_num, save_prob_path, save_pred_path = val(model, device, save_round_eval_path, round_idx, tgt_num,
label_2_id, valid_labels, args, logger)
# class-balanced thresholds
cls_thresh = kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, args, logger)
tgt_portion = min(tgt_portion + args.tgt_port_step, args.max_tgt_port)
# pseudo-label maps generation
label_selection(cls_thresh, tgt_num, image_name_tgt_list, id_2_label, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, args, logger)
# save training list
if args.src_sampling_policy == 'c':
randseed = args.randseed
elif args.src_sampling_policy == 'r':
randseed += 1
src_train_lst, tgt_train_lst, src_num_sel = savelst_SrcTgt(src_portion, image_tgt_list, image_name_tgt_list, image_src_list, label_src_list, save_lst_path, save_pseudo_label_path, src_num, tgt_num, randseed, args)
src_portion = min(src_portion + args.src_port_step, args.max_src_port)
########### model retraining
# dataset
epoch_per_round = args.epr
# reg weights
if args.mr_weight_kld == 0:
reg_weight_tgt = 0.0
else: # currently only one kind of model regularizer is supported
reg_weight_tgt = args.mr_weight_kld
reg_weight_src = args.mr_weight_src
### patch mining params
# no patch mining in src
# patch mining in target
rare_id = np.load(save_stats_path + '/rare_id_round' + str(round_idx) + '.npy')
mine_id = np.load(save_stats_path + '/mine_id_round' + str(round_idx) + '.npy')
mine_chance = args.mine_chance
# dataloader
if args.lr_weight_ent == 0.0:
srctrainset = SrcSTDataSet(args.data_src_dir, src_train_lst, max_iters=src_num_sel,reg_weight=reg_weight_src,data_src=args.data_src,
crop_size=input_size,scale=args.random_scale, mirror=args.random_mirror, train_scale=train_scale_src, mean=IMG_MEAN, std=IMG_STD)
tgttrainset = GTA5StMineDataSet(args.data_tgt_dir, tgt_train_lst, pseudo_root=save_pseudo_label_path, max_iters=tgt_num,reg_weight=reg_weight_tgt,rare_id = rare_id,
mine_id=mine_id, mine_chance = mine_chance, crop_size=input_size,scale=args.random_scale,data_src=args.data_src,
mirror=args.random_mirror, train_scale=train_scale_tgt, mean=IMG_MEAN, std=IMG_STD)
elif args.lr_weight_ent > 0.0:
srctrainset = SoftSrcSTDataSet(args.data_src_dir, src_train_lst, max_iters=src_num_sel,reg_weight=reg_weight_src,data_src=args.data_src,num_classes=args.num_classes,
crop_size=input_size,scale=args.random_scale, mirror=args.random_mirror, train_scale=train_scale_src, mean=IMG_MEAN, std=IMG_STD)
tgttrainset = SoftGTA5StMineDataSet(args.data_tgt_dir, tgt_train_lst, pseudo_root=save_pseudo_label_path, max_iters=tgt_num,reg_weight=reg_weight_tgt,rare_id = rare_id,
mine_id=mine_id, mine_chance = mine_chance, crop_size=input_size,scale=args.random_scale,data_src=args.data_src,num_classes=args.num_classes,
mirror=args.random_mirror, train_scale=train_scale_tgt, mean=IMG_MEAN, std=IMG_STD)
mixtrainset = torch.utils.data.ConcatDataset([srctrainset, tgttrainset])
mix_trainloader = torch.utils.data.DataLoader(mixtrainset, batch_size=args.batch_size, shuffle=True,
num_workers=0, pin_memory=args.pin_memory)
# optimizer
tot_iter = np.ceil(float(src_num_sel + tgt_num) / args.batch_size)
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate},
{'params': get_10x_lr_params(model), 'lr': 10 * args.learning_rate}],
lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
logger.info('###### Start model retraining dataset in round {}! ######'.format(round_idx))
# model
if args.is_training:
model.train()
else:
model.eval()
start = timeit.default_timer()
# cudnn
cudnn.enabled = True # enable cudnn
cudnn.benchmark = True # enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
# start training
for epoch in range(epoch_per_round):
train(mix_trainloader, model, device, interp, optimizer, tot_iter, round_idx, epoch, args, logger)
end = timeit.default_timer()
logger.info('###### Finish model retraining dataset in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, end - start))
# test self-trained model in target domain test set
tgt_set = 'test'
test(model, device, save_round_eval_path, round_idx, tgt_set, test_num, args.data_tgt_test_list, label_2_id,
valid_labels, args, logger)
elif round_idx == args.num_rounds - 1:
shutil.rmtree(save_pseudo_label_path)
tgt_set = 'train'
test(model, device, save_round_eval_path, round_idx, tgt_set, tgt_num, args.data_tgt_train_list, label_2_id,
valid_labels, args, logger)
tgt_set = 'test'
test(model, device, save_round_eval_path, round_idx, tgt_set, test_num, args.data_tgt_test_list, label_2_id,
valid_labels, args, logger)
def val(model, device, save_round_eval_path, round_idx, tgt_num, label_2_id, valid_labels, args, logger):
"""Create the model and start the evaluation process."""
## scorer
scorer = ScoreUpdater(valid_labels, args.num_classes, tgt_num, logger)
scorer.reset()
h, w = map(int, args.test_image_size.split(','))
test_image_size = (h, w)
test_size = ( int(h*args.eval_scale), int(w*args.eval_scale) )
## test data loader
testloader = data.DataLoader(GTA5TestDataSet(args.data_tgt_dir, args.data_tgt_train_list, test_size=test_size, test_scale=args.eval_scale, mean=IMG_MEAN, std=IMG_STD, scale=False, mirror=False),
batch_size=1, shuffle=False, pin_memory=args.pin_memory)
## model for evaluation
if args.eval_training:
model.train()
else:
model.eval()
#
model.to(device)
## upsampling layer
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=test_image_size, mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=test_image_size, mode='bilinear')
## output of deeplab is logits, not probability
softmax2d = nn.Softmax2d()
## output folder
save_pred_vis_path = osp.join(save_round_eval_path, 'pred_vis')
save_prob_path = osp.join(save_round_eval_path, 'prob')
save_pred_path = osp.join(save_round_eval_path, 'pred')
if not os.path.exists(save_pred_vis_path):
os.makedirs(save_pred_vis_path)
if not os.path.exists(save_prob_path):
os.makedirs(save_prob_path)
if not os.path.exists(save_pred_path):
os.makedirs(save_pred_path)
# saving output data
conf_dict = {k: [] for k in range(args.num_classes)}
pred_cls_num = np.zeros(args.num_classes)
## evaluation process
logger.info('###### Start evaluating target domain train set in round {}! ######'.format(round_idx))
start_eval = time.time()
with torch.no_grad():
for index, batch in enumerate(testloader):
image, label, _, name = batch
if args.model == 'DeeplabRes':
output2 = model(image.to(device))
output = softmax2d(interp(output2)).cpu().data[0].numpy()
if args.test_flipping:
output2 = model(torch.from_numpy(image.numpy()[:,:,:,::-1].copy()).to(device))
output = 0.5 * ( output + softmax2d(interp(output2)).cpu().data[0].numpy()[:,:,::-1] )
output = output.transpose(1,2,0)
amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
conf = np.amax(output,axis=2)
# score
pred_label = amax_output.copy()
label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
scorer.update(pred_label.flatten(), label.flatten(), index)
# save visualized seg maps & predication prob map
amax_output_col = colorize_mask(amax_output)
name = name[0].split('/')[-1]
image_name = name.split('.')[0]
# prob
np.save('%s/%s.npy' % (save_prob_path, image_name), output)
# trainIDs/vis seg maps
amax_output = Image.fromarray(amax_output)
amax_output.save('%s/%s.png' % (save_pred_path, image_name))
amax_output_col.save('%s/%s_color.png' % (save_pred_vis_path, image_name))
# save class-wise confidence maps
if args.kc_value == 'conf':
for idx_cls in range(args.num_classes):
idx_temp = pred_label == idx_cls
pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp)
if idx_temp.any():
conf_cls_temp = conf[idx_temp].astype(np.float32)
len_cls_temp = conf_cls_temp.size
# downsampling by ds_rate
conf_cls = conf_cls_temp[0:len_cls_temp:args.ds_rate]
conf_dict[idx_cls].extend(conf_cls)
elif args.kc_value == 'prob':
for idx_cls in range(args.num_classes):
idx_temp = pred_label == idx_cls
pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp)
# prob slice
prob_cls_temp = output[:,:,idx_cls].astype(np.float32).ravel()
len_cls_temp = prob_cls_temp.size
# downsampling by ds_rate
prob_cls = prob_cls_temp[0:len_cls_temp:args.ds_rate]
conf_dict[idx_cls].extend(prob_cls) # it should be prob_dict; but for unification, use conf_dict
logger.info('###### Finish evaluating target domain train set in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, time.time()-start_eval))
return conf_dict, pred_cls_num, save_prob_path, save_pred_path # return the dictionary containing all the class-wise confidence vectors
def train(mix_trainloader, model, device, interp, optimizer, tot_iter, round_idx, epoch_idx, args, logger):
"""Create the model and start the training."""
for i_iter, batch in enumerate(mix_trainloader):
images, labels, _, _, reg_weights = batch
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
adjust_learning_rate(optimizer, i_iter, tot_iter)
pred = interp(model(images))
if args.lr_weight_ent == 0.0:
loss = reg_loss_calc(pred, labels, reg_weights.to(device), args)
if args.lr_weight_ent > 0.0:
loss = reg_loss_calc_expand(pred, labels, reg_weights.to(device), args)
loss.backward()
optimizer.step()
logger.info('iter = {} of {} completed, loss = {:.4f}'.format(i_iter+1, tot_iter, loss.data.cpu().numpy()))
print('taking snapshot ...')
torch.save(model.state_dict(), osp.join(args.save, args.data_src + '2city_round' + str(round_idx) + '_epoch' + str(epoch_idx+1) + '.pth'))
def test(model, device, save_round_eval_path, round_idx, tgt_set, test_num, test_list, label_2_id, valid_labels, args, logger):
"""Create the model and start the evaluation process."""
## scorer
scorer = ScoreUpdater(valid_labels, args.num_classes, test_num, logger)
scorer.reset()
h, w = map(int, args.test_image_size.split(','))
test_image_size = (h, w)
test_size = ( h, w )
test_scales = [float(_) for _ in str(args.test_scale).split(',')]
num_scales = len(test_scales)
## test data loader
testloader = data.DataLoader(GTA5TestDataSet(args.data_tgt_dir, test_list, test_size=test_size, test_scale=1.0, mean=IMG_MEAN, std=IMG_STD, scale=False, mirror=False),
batch_size=1, shuffle=False, pin_memory=args.pin_memory)
## model for evaluation
if args.eval_training:
model.train()
else:
model.eval()
#
model.to(device)
## upsampling layer
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=test_image_size, mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=test_image_size, mode='bilinear')
## output of deeplab is logits, not probability
softmax2d = nn.Softmax2d()
## output folder
if tgt_set == 'train':
save_test_vis_path = osp.join(save_round_eval_path, 'trainSet_vis')
elif tgt_set == 'test':
save_test_vis_path = osp.join(save_round_eval_path, 'testSet_vis')
if not os.path.exists(save_test_vis_path):
os.makedirs(save_test_vis_path)
## evaluation process
logger.info('###### Start evaluating in target domain {} set in round {}! ######'.format(tgt_set, round_idx))
start_eval = time.time()
with torch.no_grad():
for index, batch in enumerate(testloader):
image, label, _, name = batch
img = image.clone()
for scale_idx in range(num_scales):
if version.parse(torch.__version__) > version.parse('0.4.0'):
image = F.interpolate(img, scale_factor=test_scales[scale_idx], mode='bilinear', align_corners=True)
else:
test_size = (int(h * test_scales[scale_idx]), int(w * test_scales[scale_idx]))
interp_tmp = nn.Upsample(size=test_size, mode='bilinear', align_corners=True)
image = interp_tmp(img)
if args.model == 'DeeplabRes':
output2 = model(image.to(device))
coutput = interp(output2).cpu().data[0].numpy()
if args.test_flipping:
output2 = model(torch.from_numpy(image.numpy()[:,:,:,::-1].copy()).to(device))
coutput = 0.5 * ( coutput + interp(output2).cpu().data[0].numpy()[:,:,::-1] )
if scale_idx == 0:
output = coutput.copy()
else:
output = output+coutput
output = output/num_scales
output = output.transpose(1,2,0)
amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# score
pred_label = amax_output.copy()
label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
scorer.update(pred_label.flatten(), label.flatten(), index)
# save visualized seg maps & predication prob map
amax_output_col = colorize_mask(amax_output)
name = name[0].split('/')[-1]
image_name = name.split('.')[0]
# vis seg maps
amax_output_col.save('%s/%s_color.png' % (save_test_vis_path, image_name))
logger.info('###### Finish evaluating in target domain {} set in round {}! Time cost: {:.2f} seconds. ######'.format(tgt_set, round_idx, time.time()-start_eval))
return
def kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, args, logger):
logger.info('###### Start kc generation in round {} ! ######'.format(round_idx))
start_kc = time.time()
# threshold for each class
conf_tot = 0.0
cls_thresh = np.ones(args.num_classes,dtype = np.float32)
cls_sel_size = np.zeros(args.num_classes, dtype=np.float32)
cls_size = np.zeros(args.num_classes, dtype=np.float32)
if args.kc_policy == 'cb' and args.kc_value == 'conf':
for idx_cls in np.arange(0, args.num_classes):
cls_size[idx_cls] = pred_cls_num[idx_cls]
if conf_dict[idx_cls] != None:
conf_dict[idx_cls].sort(reverse=True) # sort in descending order
len_cls = len(conf_dict[idx_cls])
cls_sel_size[idx_cls] = int(math.floor(len_cls * tgt_portion))
len_cls_thresh = int(cls_sel_size[idx_cls])
if len_cls_thresh != 0:
cls_thresh[idx_cls] = conf_dict[idx_cls][len_cls_thresh-1]
conf_dict[idx_cls] = None
# threshold for mine_id with priority
num_mine_id = len(np.nonzero(cls_size / np.sum(cls_size) < args.mine_port)[0])
# chose the smallest mine_id
id_all = np.argsort(cls_size / np.sum(cls_size))
rare_id = id_all[:args.rare_cls_num]
mine_id = id_all[:num_mine_id] # sort mine_id in ascending order w.r.t predication portions
# save mine ids
np.save(save_stats_path + '/rare_id_round' + str(round_idx) + '.npy', rare_id)
np.save(save_stats_path + '/mine_id_round' + str(round_idx) + '.npy', mine_id)
logger.info('Mining ids : {}! {} rarest ids: {}!'.format(mine_id,args.rare_cls_num,rare_id))
# save thresholds
np.save(save_stats_path + '/cls_thresh_round' + str(round_idx) + '.npy', cls_thresh)
np.save(save_stats_path + '/cls_sel_size_round' + str(round_idx) + '.npy', cls_sel_size)
logger.info('###### Finish kc generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_kc))
return cls_thresh
def label_selection(cls_thresh, tgt_num, image_name_tgt_list, id_2_label, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, args, logger):
logger.info('###### Start pseudo-label generation in round {} ! ######'.format(round_idx))
start_pl = time.time()
for idx in range(tgt_num):
sample_name = image_name_tgt_list[idx].split('.')[0]
probmap_path = osp.join(save_prob_path, '{}.npy'.format(sample_name))
pred_path = osp.join(save_pred_path, '{}.png'.format(sample_name))
pred_prob = np.load(probmap_path)
pred_label_trainIDs = np.asarray(Image.open(pred_path))
pred_label_labelIDs = id_2_label[pred_label_trainIDs]
pred_label_trainIDs = pred_label_trainIDs.copy()
if args.kc_policy == 'cb' and args.lr_weight_ent == 0.0:
save_wpred_vis_path = osp.join(save_round_eval_path, 'weighted_pred_vis')
if not os.path.exists(save_wpred_vis_path):
os.makedirs(save_wpred_vis_path)
weighted_prob = pred_prob/cls_thresh
weighted_pred_trainIDs = np.asarray(np.argmax(weighted_prob, axis=2), dtype=np.uint8)
# save weighted predication
wpred_label_col = weighted_pred_trainIDs.copy()
wpred_label_col = colorize_mask(wpred_label_col)
wpred_label_col.save('%s/%s_color.png' % (save_wpred_vis_path, sample_name))
weighted_conf = np.amax(weighted_prob, axis=2)
pred_label_trainIDs = weighted_pred_trainIDs.copy()
pred_label_labelIDs = id_2_label[pred_label_trainIDs]
pred_label_labelIDs[weighted_conf < 1] = 0 # '0' in cityscapes indicates 'unlabaled' for labelIDs
pred_label_trainIDs[weighted_conf < 1] = 255 # '255' in cityscapes indicates 'unlabaled' for trainIDs
elif args.kc_policy == 'cb' and args.lr_weight_ent > 0.0: # check if cb can be combined with kc_value == conf or prob; also check if \alpha can be larger than 1
save_wpred_vis_path = osp.join(save_round_eval_path, 'weighted_pred_vis')
if not os.path.exists(save_wpred_vis_path):
os.makedirs(save_wpred_vis_path)
# soft pseudo-label
soft_pseudo_label = np.power(pred_prob/cls_thresh,1.0/args.lr_weight_ent) # weighted softmax with temperature
soft_pseudo_label_sum = soft_pseudo_label.sum(2)
soft_pseudo_label = soft_pseudo_label.transpose(2,0,1)/soft_pseudo_label_sum
soft_pseudo_label = soft_pseudo_label.transpose(1,2,0).astype(np.float32)
np.save('%s/%s.npy' % (save_pseudo_label_path, sample_name), soft_pseudo_label)
# hard pseudo-label
weighted_pred_trainIDs = np.asarray(np.argmax(soft_pseudo_label, axis=2), dtype=np.uint8)
reg_score = np.sum( -soft_pseudo_label*np.log(pred_prob+1e-32) + args.lr_weight_ent*soft_pseudo_label*np.log(soft_pseudo_label+1e-32), axis=2)
sel_score = np.sum( -soft_pseudo_label*np.log(cls_thresh+1e-32), axis=2)
# save weighted predication
wpred_label_col = weighted_pred_trainIDs.copy()
wpred_label_col = colorize_mask(wpred_label_col)
wpred_label_col.save('%s/%s_color.png' % (save_wpred_vis_path, sample_name))
pred_label_trainIDs = weighted_pred_trainIDs.copy()
pred_label_labelIDs = id_2_label[pred_label_trainIDs]
pred_label_labelIDs[reg_score >= sel_score] = 0 # '0' in cityscapes indicates 'unlabaled' for labelIDs
pred_label_trainIDs[reg_score >= sel_score] = 255 # '255' in cityscapes indicates 'unlabaled' for trainIDs
# pseudo-labels with labelID
pseudo_label_labelIDs = pred_label_labelIDs.copy()
pseudo_label_trainIDs = pred_label_trainIDs.copy()
# save colored pseudo-label map
pseudo_label_col = colorize_mask(pseudo_label_trainIDs)
pseudo_label_col.save('%s/%s_color.png' % (save_pseudo_label_color_path, sample_name))
# save pseudo-label map with label IDs
pseudo_label_save = Image.fromarray(pseudo_label_labelIDs.astype(np.uint8))
pseudo_label_save.save('%s/%s.png' % (save_pseudo_label_path, sample_name))
# remove probability maps
if args.rm_prob:
shutil.rmtree(save_prob_path)
logger.info('###### Finish pseudo-label generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_pl))
def parse_split_list(list_name):
image_list = []
image_name_list = []
label_list = []
file_num = 0
with open(list_name) as f:
for item in f.readlines():
fields = item.strip().split('\t')
image_name = fields[0].split('/')[-1]
image_list.append(fields[0])
image_name_list.append(image_name)
label_list.append(fields[1])
file_num += 1
return image_list, image_name_list, label_list, file_num
def savelst_SrcTgt(src_portion, image_tgt_list, image_name_tgt_list, image_src_list, label_src_list, save_lst_path, save_pseudo_label_path, src_num, tgt_num, randseed, args):
src_num_sel = int(np.floor(src_num*src_portion))
np.random.seed(randseed)
sel_idx = list( np.random.choice(src_num, src_num_sel, replace=False) )
sel_src_img_list = list( itemgetter(*sel_idx)(image_src_list) )
sel_src_label_list = list(itemgetter(*sel_idx)(label_src_list))
src_train_lst = osp.join(save_lst_path,'src_train.lst')
tgt_train_lst = osp.join(save_lst_path, 'tgt_train.lst')
# generate src train list
with open(src_train_lst, 'w') as f:
for idx in range(src_num_sel):
f.write("%s\t%s\n" % (sel_src_img_list[idx], sel_src_label_list[idx]))
# generate tgt train list
if args.lr_weight_ent > 0:
with open(tgt_train_lst, 'w') as f:
for idx in range(tgt_num):
softlabel_name = image_name_tgt_list[idx].split('.')[0] + '.npy'
soft_label_tgt_path = osp.join(save_pseudo_label_path, softlabel_name)
image_tgt_path = osp.join(save_pseudo_label_path,image_name_tgt_list[idx])
f.write("%s\t%s\t%s\n" % (image_tgt_list[idx], image_tgt_path, soft_label_tgt_path))
elif args.lr_weight_ent == 0:
with open(tgt_train_lst, 'w') as f:
for idx in range(tgt_num):
image_tgt_path = osp.join(save_pseudo_label_path,image_name_tgt_list[idx])
f.write("%s\t%s\n" % (image_tgt_list[idx], image_tgt_path))
return src_train_lst, tgt_train_lst, src_num_sel
class ScoreUpdater(object):
# only IoU are computed. accu, cls_accu, etc are ignored.
def __init__(self, valid_labels, c_num, x_num, logger=None, label=None, info=None):
self._valid_labels = valid_labels
self._confs = np.zeros((c_num, c_num))
self._per_cls_iou = np.zeros(c_num)
self._logger = logger
self._label = label
self._info = info
self._num_class = c_num
self._num_sample = x_num
@property
def info(self):
return self._info
def reset(self):
self._start = time.time()
self._computed = np.zeros(self._num_sample) # one-dimension
self._confs[:] = 0
def fast_hist(self,label, pred_label, n):
k = (label >= 0) & (label < n)
return np.bincount(n * label[k].astype(int) + pred_label[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(self,hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def do_updates(self, conf, i, computed=True):
if computed:
self._computed[i] = 1
self._per_cls_iou = self.per_class_iu(conf)
def update(self, pred_label, label, i, computed=True):
conf = self.fast_hist(label, pred_label, self._num_class)
self._confs += conf
self.do_updates(self._confs, i, computed)
self.scores(i)
def scores(self, i=None, logger=None):
x_num = self._num_sample
ious = np.nan_to_num( self._per_cls_iou )
logger = self._logger if logger is None else logger
if logger is not None:
if i is not None:
speed = 1. * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}mean iou: {:.2f}%'. \
format(name, np.mean(ious) * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(ious * 100))
return ious
def loss_calc(pred, label):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
criterion = torch.nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL).cuda()
return criterion(pred, label)
def reg_loss_calc(pred, label, reg_weights, args):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
mr_weight_kld = args.mr_weight_kld
num_class = float(args.num_classes)
valid_num = torch.sum(label != IGNORE_LABEL).float()
label_reg = label[reg_weights != 0,:,:]
valid_reg_num = torch.sum(label_reg != IGNORE_LABEL).float()
softmax = F.softmax(pred, dim=1) # compute the softmax values
logsoftmax = F.log_softmax(pred,dim=1) # compute the log of softmax values
label_expand = torch.unsqueeze(label, 1).repeat(1,int(num_class),1,1)
labels = label_expand.clone()
labels[labels != IGNORE_LABEL] = 1.0
labels[labels == IGNORE_LABEL] = 0.0
labels_valid = labels.clone()
# labels = torch.unsqueeze(labels, 1).repeat(1,num_class,1,1)
labels = torch.cumsum(labels, dim=1)
labels[labels != label_expand + 1] = 0.0
del label_expand
labels[labels != 0 ] = 1.0
### check the vectorized labels
# check_labels = torch.argmax(labels, dim=1)
# label[label == 255] = 0
# print(torch.sum(check_labels.float() - label))
reg_weights = reg_weights.float().view(len(reg_weights),1,1,1)
ce = torch.sum( -logsoftmax*labels ) # cross-entropy loss with vector-form softmax
softmax_val = softmax*labels_valid
logsoftmax_val = logsoftmax*labels_valid
kld = torch.sum( -logsoftmax_val/num_class*reg_weights )
if valid_reg_num > 0:
reg_ce = ce/valid_num + (mr_weight_kld*kld)/valid_reg_num
else:
reg_ce = ce/valid_num
return reg_ce
def reg_loss_calc_expand(pred, label, reg_weights, args):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
mr_weight_kld = args.mr_weight_kld
num_class = float(args.num_classes)
# soft labels regard ignored labels as zero soft labels in data loader
# C = label.cpu().numpy()
label_sum = torch.sum(label,1)
# D = label_sum.cpu().numpy()
valid_num = torch.sum(label_sum != 0.0).float()
label_reg = label_sum[reg_weights != 0,:,:]
valid_reg_num = torch.sum(label_reg != 0.0).float()
softmax = F.softmax(pred, dim=1) # compute the softmax values
logsoftmax = F.log_softmax(pred,dim=1) # compute the log of softmax values
label_expand = torch.unsqueeze(label_sum, 1).repeat(1,num_class,1,1)
label_valid = label_expand.clone()
label_valid[label_valid != 0] = 1.0
label_valid = label_valid.clone()
# # check the vectorized labels
# check_labels = torch.argmax(labels, dim=1)
# label[label == 255] = 0
# print(torch.sum(check_labels.float() - label))
#
reg_weights = reg_weights.float().view(len(reg_weights),1,1,1)
ce = torch.sum( -logsoftmax*label ) # cross-entropy loss with vector-form softmax
softmax_val = softmax*label_valid
logsoftmax_val = logsoftmax*label_valid
kld = torch.sum( -logsoftmax_val/num_class*reg_weights )
if valid_reg_num > 0:
reg_ce = ce/valid_num + (mr_weight_kld*kld)/valid_reg_num
else:
reg_ce = ce/valid_num
return reg_ce
def lr_poly(base_lr, iter, max_iter, power):
return base_lr * ((1 - float(iter) / max_iter) ** (power))
def get_1x_lr_params_NOscale(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(model.conv1)
b.append(model.bn1)
b.append(model.layer1)
b.append(model.layer2)
b.append(model.layer3)
b.append(model.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
b.append(model.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def adjust_learning_rate(optimizer, i_iter, tot_iter):
lr = lr_poly(args.learning_rate, i_iter, tot_iter, args.power)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr * 10
if __name__ == '__main__':
main()
| 47,013 | 48.229319 | 225 | py |
CRST | CRST-master/train.py | import argparse
import torch
import torch.nn as nn
from torch.utils import data
import numpy as np
import pickle
import cv2
import torch.optim as optim
import scipy.misc
import torch.backends.cudnn as cudnn
import sys
import os
import os.path as osp
from deeplab.model import Res_Deeplab
from deeplab.loss import CrossEntropy2d
from deeplab.datasets import GTA5DataSet
import matplotlib.pyplot as plt
import random
import timeit
import torchvision.transforms as transforms
import util
start = timeit.default_timer()
#IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
# IMG_MEAN = [0.485, 0.456, 0.406]
# IMG_STD = [0.229, 0.224, 0.225]
BATCH_SIZE = 4
DATA_DIRECTORY = './datasets/gta5'
DATA_LIST_PATH = './dataset/list/gta5/train.lst'
NUM_CLASSES = 19
IGNORE_LABEL = 255
INPUT_SIZE = '500,500'
TRAIN_SCALE = '0.5,1.5'
LEARNING_RATE = 2.5e-4
MOMENTUM = 0.9
NUM_STEPS = 100000
POWER = 0.9
RANDOM_SEED = 1234
RESTORE_FROM = ''
SAVE_NUM_IMAGES = 2
SAVE_PRED_EVERY = 5000
SNAPSHOT_DIR = './gta_src_train/'
WEIGHT_DECAY = 0.0005
MODEL = 'DeeplabRes101'
LOG_FILE = 'log'
PIN_MEMORY = True
GPU = '0'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--model", type=str, default=MODEL,
help="The base network.")
parser.add_argument("--not-restore-last", action="store_true",
help="Whether to not restore last (FC) layers.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of training steps.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--train-scale", type=str, default=TRAIN_SCALE,
help="The scale for multi-scale training.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random seed to have reproducible results.")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES,
help="How many images to save.")
parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY,
help="Save summaries and checkpoint every often.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
parser.add_argument("--gpu", type=str, default=GPU,
help="choose gpu device.")
parser.add_argument("--pin-memory", type=bool, default=PIN_MEMORY,
help="Whether to pin memory in train & eval.")
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
return parser.parse_args()
args = get_arguments()
def loss_calc(pred, label):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
criterion = torch.nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL).cuda()
return criterion(pred, label)
def lr_poly(base_lr, iter, max_iter, power):
return base_lr*((1-float(iter)/max_iter)**(power))
def get_1x_lr_params_NOscale(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(model.conv1)
b.append(model.bn1)
b.append(model.layer1)
b.append(model.layer2)
b.append(model.layer3)
b.append(model.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj+=1
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
b.append(model.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def adjust_learning_rate(optimizer, i_iter):
"""Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs"""
lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr * 10
def main():
"""Create the model and start the training."""
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
logger = util.set_logger(args.snapshot_dir, args.log_file, args.debug)
logger.info('start with arguments %s', args)
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
lscale, hscale = map(float, args.train_scale.split(','))
train_scale = (lscale, hscale)
cudnn.enabled = True
# Create network.
model = Res_Deeplab(num_classes=args.num_classes)
#saved_state_dict = torch.load(args.restore_from)
#new_params = model.state_dict().copy()
#for i in saved_state_dict:
# #Scale.layer5.conv2d_list.3.weight
# i_parts = i.split('.')
# # print i_parts
# if not args.num_classes == 21 or not i_parts[1]=='layer5':
# new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
saved_state_dict = torch.utils.model_zoo.load_url(model_urls['resnet101'])
# coco pretrained parameters:
# saved_state_dict = torch.load(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
#Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0]=='fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
model.load_state_dict(new_params)
#model.float()
model.eval() # use_global_stats = True
#model.train()
device = torch.device("cuda:" + str(args.gpu))
model.to(device)
cudnn.benchmark = True
trainloader = data.DataLoader(GTA5DataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,train_scale=train_scale,
scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN, std = IMG_STD),
batch_size=args.batch_size, shuffle=True, num_workers=5, pin_memory=args.pin_memory)
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate },
{'params': get_10x_lr_params(model), 'lr': 10*args.learning_rate}],
lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
optimizer.zero_grad()
interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
for i_iter, batch in enumerate(trainloader):
images, labels, _, _ = batch
images = images.to(device)
labels = labels.long().to(device)
optimizer.zero_grad()
adjust_learning_rate(optimizer, i_iter)
pred = interp(model(images))
loss = loss_calc(pred, labels)
loss.backward()
optimizer.step()
# print('iter = ', i_iter, 'of', args.num_steps,'completed, loss = ', loss.data.cpu().numpy())
logger.info('iter = {} of {} completed, loss = {:.4f}'.format(i_iter,args.num_steps,loss.data.cpu().numpy()))
if i_iter >= args.num_steps-1:
print('save model ...')
torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC12_scenes_'+str(args.num_steps)+'.pth'))
break
if i_iter % args.save_pred_every == 0 and i_iter!=0:
print('taking snapshot ...')
torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC12_scenes_'+str(i_iter)+'.pth'))
end = timeit.default_timer()
print(end-start,'seconds')
if __name__ == '__main__':
main()
| 10,806 | 39.324627 | 164 | py |
CRST | CRST-master/dataset/helpers/labels.py | #!/usr/bin/python
#
# Cityscapes labels
#
from collections import namedtuple
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
#--------------------------------------------------------------------------------
# Create dictionaries for a fast lookup
#--------------------------------------------------------------------------------
# Please refer to the main method below for example usages!
# name to label object
name2label = { label.name : label for label in labels }
# id to label object
id2label = { label.id : label for label in labels }
# trainId to label object
trainId2label = { label.trainId : label for label in reversed(labels) }
# category to list of label objects
category2labels = {}
for label in labels:
category = label.category
if category in category2labels:
category2labels[category].append(label)
else:
category2labels[category] = [label]
#--------------------------------------------------------------------------------
# Assure single instance name
#--------------------------------------------------------------------------------
# returns the label name that describes a single instance (if possible)
# e.g. input | output
# ----------------------
# car | car
# cargroup | car
# foo | None
# foogroup | None
# skygroup | None
def assureSingleInstanceName( name ):
# if the name is known, it is not a group
if name in name2label:
return name
# test if the name actually denotes a group
if not name.endswith("group"):
return None
# remove group
name = name[:-len("group")]
# test if the new name exists
if not name in name2label:
return None
# test if the new name denotes a label that actually has instances
if not name2label[name].hasInstances:
return None
# all good then
return name
#--------------------------------------------------------------------------------
# Main for testing
#--------------------------------------------------------------------------------
# just a dummy main
if __name__ == "__main__":
# Print all the labels
print("List of cityscapes labels:")
print("")
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( 'name', 'id', 'trainId', 'category', 'categoryId', 'hasInstances', 'ignoreInEval' ))
print(" " + ('-' * 98))
for label in labels:
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( label.name, label.id, label.trainId, label.category, label.categoryId, label.hasInstances, label.ignoreInEval ))
print("")
print("Example usages:")
# Map from name to label
name = 'car'
id = name2label[name].id
print("ID of label '{name}': {id}".format( name=name, id=id ))
# Map from ID to label
category = id2label[id].category
print("Category of label with ID '{id}': {category}".format( id=id, category=category ))
# Map from trainID to label
trainId = 0
name = trainId2label[trainId].name
print("Name of label with trainID '{id}': {name}".format( id=trainId, name=name ))
| 10,597 | 57.230769 | 199 | py |
CRST | CRST-master/dataset/helpers/labels_synthia.py | #!/usr/bin/python
#
# Cityscapes labels
#
from collections import namedtuple
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'void' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'sky' , 1 , 9 , 'void' , 0 , False , True , ( 70,130,180) ),
Label( 'building' , 2 , 2 , 'void' , 0 , False , True , ( 70,70,70) ),
Label( 'road' , 3 , 0 , 'void' , 0 , False , True , ( 128,64,128) ),
Label( 'sidewalk' , 4 , 1 , 'void' , 0 , False , True , ( 244,35,232) ),
Label( 'fence' , 5 , 4 , 'void' , 0 , False , True , ( 64,64,128) ),
Label( 'vegetation' , 6 , 8 , 'void' , 0 , False , True , ( 107,142,35) ),
Label( 'pole' , 7 , 5 , 'flat' , 1 , False , False , ( 153,153,153) ),
Label( 'car' , 8 , 12 , 'flat' , 1 , False , False , ( 0,0,142) ),
Label( 'traffic sign' , 9 , 7 , 'flat' , 1 , False , True , ( 220,220,0) ),
Label( 'pedestrian' , 10 , 10 , 'flat' , 1 , False , True , ( 220,20,60) ),
Label( 'bicycle' , 11 , 15 , 'construction' , 2 , False , False , ( 119,11,32) ),
Label( 'motorcycle' , 12 , 14 , 'construction' , 2 , False , False , ( 0,0,230) ),
Label( 'parking-slot' , 13 , 255 , 'construction' , 2 , False , False , (250,170,160) ),
Label( 'road-work' , 14 , 255 , 'construction' , 2 , False , True , (128,64,64) ),
Label( 'traffic light' , 15 , 6 , 'construction' , 2 , False , True , (250,170,30) ),
Label( 'terrain' , 16 , 255 , 'construction' , 2 , False , True , (152,251,152) ),
Label( 'rider' , 17 , 11 , 'object' , 3 , False , False , (255,0,0) ),
Label( 'truck' , 18 , 255 , 'object' , 3 , False , True , (0,0,70) ),
Label( 'bus' , 19 , 13 , 'object' , 3 , False , False , (0,60,100) ),
Label( 'train' , 20 , 255 , 'object' , 3 , False , False , (0,80,100) ),
Label( 'wall' , 21 , 3 , 'nature' , 4 , False , False , (102,102,156) ),
Label( 'lanemarking' , 22 , 0 , 'nature' , 4 , False , False , (102,102,156) ),
]
#--------------------------------------------------------------------------------
# Create dictionaries for a fast lookup
#--------------------------------------------------------------------------------
# Please refer to the main method below for example usages!
# name to label object
name2label = { label.name : label for label in labels }
# id to label object
id2label = { label.id : label for label in labels }
# trainId to label object
trainId2label = { label.trainId : label for label in reversed(labels) }
# category to list of label objects
category2labels = {}
for label in labels:
category = label.category
if category in category2labels:
category2labels[category].append(label)
else:
category2labels[category] = [label]
#--------------------------------------------------------------------------------
# Assure single instance name
#--------------------------------------------------------------------------------
# returns the label name that describes a single instance (if possible)
# e.g. input | output
# ----------------------
# car | car
# cargroup | car
# foo | None
# foogroup | None
# skygroup | None
def assureSingleInstanceName( name ):
# if the name is known, it is not a group
if name in name2label:
return name
# test if the name actually denotes a group
if not name.endswith("group"):
return None
# remove group
name = name[:-len("group")]
# test if the new name exists
if not name in name2label:
return None
# test if the new name denotes a label that actually has instances
if not name2label[name].hasInstances:
return None
# all good then
return name
#--------------------------------------------------------------------------------
# Main for testing
#--------------------------------------------------------------------------------
# just a dummy main
if __name__ == "__main__":
# Print all the labels
print("List of cityscapes labels:")
print("")
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( 'name', 'id', 'trainId', 'category', 'categoryId', 'hasInstances', 'ignoreInEval' ))
print(" " + ('-' * 98))
for label in labels:
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( label.name, label.id, label.trainId, label.category, label.categoryId, label.hasInstances, label.ignoreInEval ))
print("")
print("Example usages:")
# Map from name to label
name = 'car'
id = name2label[name].id
print("ID of label '{name}': {id}".format( name=name, id=id ))
# Map from ID to label
category = id2label[id].category
print("Category of label with ID '{id}': {category}".format( id=id, category=category ))
# Map from trainID to label
trainId = 0
name = trainId2label[trainId].name
print("Name of label with trainID '{id}': {name}".format( id=trainId, name=name ))
| 9,012 | 51.707602 | 199 | py |
CRST | CRST-master/dataset/helpers/labels_cityscapes_synthia.py | #!/usr/bin/python
#
# Cityscapes labels
#
from collections import namedtuple
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 255 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 9 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 10 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 11 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 12 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 255 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 13 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 255 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 14 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 15 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
#--------------------------------------------------------------------------------
# Create dictionaries for a fast lookup
#--------------------------------------------------------------------------------
# Please refer to the main method below for example usages!
# name to label object
name2label = { label.name : label for label in labels }
# id to label object
id2label = { label.id : label for label in labels }
# trainId to label object
trainId2label = { label.trainId : label for label in reversed(labels) }
# category to list of label objects
category2labels = {}
for label in labels:
category = label.category
if category in category2labels:
category2labels[category].append(label)
else:
category2labels[category] = [label]
#--------------------------------------------------------------------------------
# Assure single instance name
#--------------------------------------------------------------------------------
# returns the label name that describes a single instance (if possible)
# e.g. input | output
# ----------------------
# car | car
# cargroup | car
# foo | None
# foogroup | None
# skygroup | None
def assureSingleInstanceName( name ):
# if the name is known, it is not a group
if name in name2label:
return name
# test if the name actually denotes a group
if not name.endswith("group"):
return None
# remove group
name = name[:-len("group")]
# test if the new name exists
if not name in name2label:
return None
# test if the new name denotes a label that actually has instances
if not name2label[name].hasInstances:
return None
# all good then
return name
#--------------------------------------------------------------------------------
# Main for testing
#--------------------------------------------------------------------------------
# just a dummy main
if __name__ == "__main__":
# Print all the labels
print("List of cityscapes labels:")
print("")
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( 'name', 'id', 'trainId', 'category', 'categoryId', 'hasInstances', 'ignoreInEval' ))
print(" " + ('-' * 98))
for label in labels:
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( label.name, label.id, label.trainId, label.category, label.categoryId, label.hasInstances, label.ignoreInEval ))
print("")
print("Example usages:")
# Map from name to label
name = 'car'
id = name2label[name].id
print("ID of label '{name}': {id}".format( name=name, id=id ))
# Map from ID to label
category = id2label[id].category
print("Category of label with ID '{id}': {category}".format( id=id, category=category ))
# Map from trainID to label
trainId = 0
name = trainId2label[trainId].name
print("Name of label with trainID '{id}': {name}".format( id=trainId, name=name ))
| 10,597 | 57.230769 | 199 | py |
CRST | CRST-master/deeplab/loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss | 1,585 | 44.314286 | 103 | py |
CRST | CRST-master/deeplab/model.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
affine_par = True
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
dow1 = self.conv1(x)
dow1 = self.relu(dow1)
seg = self.conv2(dow1)
inc1 = self.conv3(seg)
add1 = dow1 + self.relu(inc1)
inc2 = self.conv4(add1)
out = x + self.relu(inc2)
return out, seg
class Residual_Refinement_Module(nn.Module):
def __init__(self, num_classes):
super(Residual_Refinement_Module, self).__init__()
self.RC1 = Residual_Covolution(2048, 512, num_classes)
self.RC2 = Residual_Covolution(2048, 512, num_classes)
def forward(self, x):
x, seg1 = self.RC1(x)
_, seg2 = self.RC2(x)
return [seg1, seg1+seg2]
class ResNet_Refine(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet_Refine, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = Residual_Refinement_Module(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
class MS_Deeplab(nn.Module):
def __init__(self,block,num_classes):
super(MS_Deeplab,self).__init__()
self.Scale = ResNet(block,[3, 4, 23, 3],num_classes) #changed to fix #4
def forward(self,x):
output = self.Scale(x) # for original scale
output_size = output.size()[2]
input_size = x.size()[2]
self.interp1 = nn.Upsample(size=(int(input_size*0.75)+1, int(input_size*0.75)+1), mode='bilinear')
self.interp2 = nn.Upsample(size=(int(input_size*0.5)+1, int(input_size*0.5)+1), mode='bilinear')
self.interp3 = nn.Upsample(size=(output_size, output_size), mode='bilinear')
x75 = self.interp1(x)
output75 = self.interp3(self.Scale(x75)) # for 0.75x scale
x5 = self.interp2(x)
output5 = self.interp3(self.Scale(x5)) # for 0.5x scale
out_max = torch.max(torch.max(output, output75), output5)
return [output, output75, output5, out_max]
def Res_Ms_Deeplab(num_classes=21):
model = MS_Deeplab(Bottleneck, num_classes)
return model
def Res_Deeplab(num_classes=21, is_refine=False):
if is_refine:
model = ResNet_Refine(Bottleneck,[3, 4, 23, 3], num_classes)
else:
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
| 11,127 | 36.217391 | 139 | py |
CRST | CRST-master/deeplab/datasets.py | import os
import os.path as osp
import numpy as np
import random
import matplotlib.pyplot as plt
import collections
import torch
import torchvision.transforms as transforms
import torchvision
import cv2
from torch.utils import data
import sys
from PIL import Image
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
class VOCDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "img/%s.jpg" % name)
label_file = osp.join(self.root, "gt/%s.png" % name)
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
size = image.shape
name = datafiles["name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
#image = image[:, :, ::-1] # change to BGR
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class GTA5DataSet(data.Dataset):
def __init__(self, root, list_path, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name
class SYNTHIADataSet(data.Dataset):
def __init__(self, root, list_path, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels_synthia import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name
class SYNTHIASTDataSet(data.Dataset):
def __init__(self, root, list_path, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels_synthia import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class GTA5STDataSet(data.Dataset):
def __init__(self, root, list_path, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class SrcSTDataSet(data.Dataset):
def __init__(self, root, list_path, data_src=None, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
self.data_src = data_src
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_synthia import id2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class SoftSrcSTDataSet(data.Dataset):
def __init__(self, root, list_path, data_src = None, num_classes = None, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
self.data_src = data_src
self.num_classes = num_classes
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_synthia import id2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
label_expand = np.tile( np.expand_dims(label, axis=2), (1, 1, self.num_classes) )
labels = label_expand.copy()
labels[labels != self.ignore_label] = 1.0
labels[labels == self.ignore_label] = 0.0
labels = np.cumsum(labels, axis=2)
labels[labels != label_expand + 1] = 0.0
del label_expand
labels[labels != 0.0] = 1.0
labels = labels.transpose((2,0,1))
# weighted_pred_trainIDs = np.asarray(np.argmax(labels, axis=0), dtype=np.uint8)
# # save weighted predication
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_color.png' % (index))
#
# labels_sum = np.sum(labels,axis=0)
# # save weighted predication
# weighted_pred_trainIDs[labels_sum == 0] = 255
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_pseudo_color.png' % (index))
return image.copy(), labels.copy(), np.array(size), img_name, self.reg_weight
class SoftGTA5StMineDataSet(data.Dataset):
def __init__(self, root, list_path, data_src=None, num_classes = None, reg_weight = 0.0, rare_id = None, mine_id = None, mine_chance = None, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.softlabel_ids = []
self.reg_weight = reg_weight
self.rare_id = rare_id
self.mine_id = mine_id
self.mine_chance = mine_chance
self.data_src = data_src
self.num_classes = num_classes
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
self.softlabel_ids.append(fields[2])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.softlabel_ids = self.softlabel_ids * int(np.ceil(float(max_iters) / len(self.softlabel_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
softlabel_name = self.softlabel_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
softlabel_file = osp.join(self.root, softlabel_name)
else:
label_file = label_name
softlabel_file = softlabel_name
self.files.append({
"img": img_file,
"label": label_file,
"softlabel": softlabel_file,
"img_name": img_name,
"label_name": label_name,
"softlabel_name": softlabel_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label, input_softlabel):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
# interpolate the softlabel by 3-channel groups
h,w = label.shape
num_group = int(np.ceil(self.num_classes/3.0))
softlabel = np.zeros((h,w,self.num_classes), dtype=np.float32)
start_idx = 0
for idx in range(num_group):
clabel = input_softlabel[:,:,start_idx:start_idx+3]
clabel = cv2.resize(clabel, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
softlabel[:,:,start_idx:start_idx+3] = clabel.reshape(h,w,-1)
start_idx = start_idx + 3
softlabel = softlabel.transpose(2,0,1)/np.sum(softlabel,2)
softlabel = softlabel.transpose(1,2,0)
return image, label, softlabel
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
softlabel = np.load(datafiles["softlabel"])
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_cityscapes_synthia import id2label
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label, softlabel = self.generate_scale_label(image, label, softlabel)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std # np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
# softlabel_pad
h_pad, w_pad = label_pad.shape
num_group = int(np.ceil(self.num_classes / 3.0))
softlabel_pad = np.zeros((h_pad, w_pad, self.num_classes), dtype=np.float32)
start_idx = 0
for idx in range(num_group):
clabel_pad = softlabel[:, :, start_idx:start_idx + 3]
clabel_pad = cv2.copyMakeBorder(clabel_pad, 0, pad_h, 0,pad_w, cv2.BORDER_CONSTANT,value=(0.0, 0.0, 0.0))
softlabel_pad[:, :, start_idx:start_idx + 3] = clabel_pad.reshape(h_pad,w_pad,-1)
start_idx = start_idx + 3
else:
img_pad, label_pad, softlabel_pad = image, label, softlabel
img_h, img_w = label_pad.shape
# mining or not
mine_flag = random.uniform(0, 1) < self.mine_chance
if mine_flag and len(self.mine_id) > 0:
label_unique = np.unique(label_pad)
mine_id_temp = np.array([a for a in self.mine_id if a in label_unique]) # if this image has the mine id
if mine_id_temp.size != 0:
# decide the single id to be mined
mine_id_img = mine_id_temp
sel_idx = random.randint(0, mine_id_temp.size-1)
sel_mine_id = mine_id_img[sel_idx]
# seed for the mined id
mine_id_loc = np.where(label_pad == sel_mine_id) # tuple
mine_id_len = len(mine_id_loc[0])
seed_loc = random.randint(0, mine_id_len-1)
hseed = mine_id_loc[0][seed_loc]
wseed = mine_id_loc[1][seed_loc]
# patch crop
half_crop_h = self.crop_h/2
half_crop_w = self.crop_w/2
# center crop at the seed
left_idx = wseed - half_crop_w
right_idx = wseed + half_crop_w -1
up_idx = hseed - half_crop_h
bottom_idx = hseed + half_crop_h - 1
# shift the left_idx or right_idx if they go beyond the pad margins
if left_idx < 0:
left_idx = 0
elif right_idx > img_w - 1:
left_idx = left_idx - ( ( half_crop_w - 1 ) - (img_w - 1 - wseed) ) # left_idx shifts to the left by the right beyond length
if up_idx < 0:
up_idx = 0
elif bottom_idx > img_h - 1:
up_idx = up_idx - ( ( half_crop_h - 1 ) - (img_h - 1 - hseed) ) # up_idx shifts to the up by the bottom beyond length
h_off = up_idx
w_off = left_idx
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
softlabel = np.asarray(softlabel_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
# set ignored label vector to be all zeros
label_expand = np.tile( np.expand_dims(label, axis=2), (1, 1, self.num_classes) )
labels_ = label_expand.copy()
labels_[labels_ != self.ignore_label] = 1.0
labels_[labels_ == self.ignore_label] = 0.0
labels = labels_*softlabel
labels = labels.transpose((2,0,1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
labels = labels[:,:,::flip]
# weighted_pred_trainIDs = np.asarray(np.argmax(labels, axis=0), dtype=np.uint8)
# # save weighted predication
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_color.png' % (index))
#
# labels_sum = np.sum(labels,axis=0)
# # save weighted predication
# weighted_pred_trainIDs[labels_sum == 0] = 255
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_pseudo_color.png' % (index))
return image.copy(), labels.copy(), np.array(size), img_name, self.reg_weight
class GTA5StMineDataSet(data.Dataset):
def __init__(self, root, list_path, data_src=None, reg_weight = 0.0, rare_id = None, mine_id = None, mine_chance = None, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
self.rare_id = rare_id
self.mine_id = mine_id
self.mine_chance = mine_chance
self.data_src = data_src
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_cityscapes_synthia import id2label
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std # np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
# mining or not
mine_flag = random.uniform(0, 1) < self.mine_chance
if mine_flag and len(self.mine_id) > 0:
label_unique = np.unique(label_pad)
mine_id_temp = np.array([a for a in self.mine_id if a in label_unique]) # if this image has the mine id
if mine_id_temp.size != 0:
# decide the single id to be mined
mine_id_img = mine_id_temp
sel_idx = random.randint(0, mine_id_temp.size-1)
sel_mine_id = mine_id_img[sel_idx]
# seed for the mined id
mine_id_loc = np.where(label_pad == sel_mine_id) # tuple
mine_id_len = len(mine_id_loc[0])
seed_loc = random.randint(0, mine_id_len-1)
hseed = mine_id_loc[0][seed_loc]
wseed = mine_id_loc[1][seed_loc]
# patch crop
half_crop_h = self.crop_h/2
half_crop_w = self.crop_w/2
# center crop at the seed
left_idx = wseed - half_crop_w
right_idx = wseed + half_crop_w -1
up_idx = hseed - half_crop_h
bottom_idx = hseed + half_crop_h - 1
# shift the left_idx or right_idx if they go beyond the pad margins
if left_idx < 0:
left_idx = 0
elif right_idx > img_w - 1:
left_idx = left_idx - ( ( half_crop_w - 1 ) - (img_w - 1 - wseed) ) # left_idx shifts to the left by the right beyond length
if up_idx < 0:
up_idx = 0
elif bottom_idx > img_h - 1:
up_idx = up_idx - ( ( half_crop_h - 1 ) - (img_h - 1 - hseed) ) # up_idx shifts to the up by the bottom beyond length
h_off = up_idx
w_off = left_idx
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class GTA5TestDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale = 1.0, mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.test_h, self.test_w = test_size
self.scale = scale
self.test_scale = test_scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
label_file = osp.join(self.root, label_name)
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
#
# sys.path.insert(0, 'dataset/helpers')
# from labels import id2label, trainId2label
# #
# label_2_id = 255 * np.ones((256,))
# for l in id2label:
# if l in (-1, 255):
# continue
# label_2_id[l] = id2label[l].trainId
# # id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# # valid_labels = sorted(set(id_2_label.ravel()))
# label = label_2_id[label]
#
# resize
img_name = datafiles["img_name"]
# image = cv2.resize(image, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_LINEAR)
# label = cv2.resize(label, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation = cv2.INTER_LINEAR)
# always keep the resolution of label unchanged
# label = cv2.resize(label, None, fx=1, fy=1, interpolation = cv2.INTER_NEAREST)
image = np.asarray(image, np.float32)
label = np.asarray(label, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
size = image.shape
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), img_name
class GTA5MSTDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale = 1.0, mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.test_h, self.test_w = test_size
self.scale = scale
self.test_scale = test_scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
label_file = osp.join(self.root, label_name)
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
#
# sys.path.insert(0, 'dataset/helpers')
# from labels import id2label, trainId2label
# #
# label_2_id = 255 * np.ones((256,))
# for l in id2label:
# if l in (-1, 255):
# continue
# label_2_id[l] = id2label[l].trainId
# # id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# # valid_labels = sorted(set(id_2_label.ravel()))
# label = label_2_id[label]
#
# resize
img_name = datafiles["img_name"]
# image = cv2.resize(image, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_LINEAR)
# label = cv2.resize(label, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation = cv2.INTER_LINEAR)
# always keep the resolution of label unchanged
# label = cv2.resize(label, None, fx=1, fy=1, interpolation = cv2.INTER_NEAREST)
image = np.asarray(image, np.float32)
label = np.asarray(label, np.float32)
# image = image/255.0 # scale to [0,1]
# image -= self.mean # BGR
# image = image/self.std#np.reshape(self.std,(1,1,3))
size = image.shape
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), img_name
class GTA5TestCRFDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale = 1.0, mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.test_h, self.test_w = test_size
self.scale = scale
self.test_scale = test_scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
label_file = osp.join(self.root, label_name)
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
#
# sys.path.insert(0, 'dataset/helpers')
# from labels import id2label, trainId2label
# #
# label_2_id = 255 * np.ones((256,))
# for l in id2label:
# if l in (-1, 255):
# continue
# label_2_id[l] = id2label[l].trainId
# # id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# # valid_labels = sorted(set(id_2_label.ravel()))
# label = label_2_id[label]
#
# resize
img_name = datafiles["img_name"]
# image = cv2.resize(image, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_LINEAR)
# label = cv2.resize(label, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
image_crf = np.asarray(image, np.float32)
image_crf = image_crf[:, :, ::-1] # change to RGB
image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation = cv2.INTER_LINEAR)
# always keep the resolution of label unchanged
# label = cv2.resize(label, None, fx=1, fy=1, interpolation = cv2.INTER_NEAREST)
image = np.asarray(image, np.float32)
label = np.asarray(label, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
size = image.shape
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), image_crf.copy(), np.array(size), img_name
class VOCDataTestSet(data.Dataset):
def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.mean = mean
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "img/%s.jpg" % name)
self.files.append({
"img": img_file
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, name, size
if __name__ == '__main__':
dst = VOCDataSet("./data", is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
| 62,075 | 43.276748 | 309 | py |
CRST | CRST-master/deeplab/__init__.py | 1 | 0 | 0 | py | |
CRST | CRST-master/deeplab/metric.py | import os, sys
import numpy as np
from multiprocessing import Pool
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
class ConfusionMatrix(object):
def __init__(self, nclass, classes=None):
self.nclass = nclass
self.classes = classes
self.M = np.zeros((nclass, nclass))
def add(self, gt, pred):
assert(np.max(pred) <= self.nclass)
assert(len(gt) == len(pred))
for i in range(len(gt)):
if not gt[i] == 255:
self.M[gt[i], pred[i]] += 1.0
def addM(self, matrix):
assert(matrix.shape == self.M.shape)
self.M += matrix
def __str__(self):
pass
def recall(self):
recall = 0.0
for i in xrange(self.nclass):
recall += self.M[i, i] / np.sum(self.M[:, i])
return recall/self.nclass
def accuracy(self):
accuracy = 0.0
for i in xrange(self.nclass):
accuracy += self.M[i, i] / np.sum(self.M[i, :])
return accuracy/self.nclass
def jaccard(self):
jaccard = 0.0
jaccard_perclass = []
for i in xrange(self.nclass):
jaccard_perclass.append(self.M[i, i] / (np.sum(self.M[i, :]) + np.sum(self.M[:, i]) - self.M[i, i]))
return np.sum(jaccard_perclass)/len(jaccard_perclass), jaccard_perclass, self.M
def generateM(self, item):
gt, pred = item
m = np.zeros((self.nclass, self.nclass))
assert(len(gt) == len(pred))
for i in range(len(gt)):
if gt[i] < self.nclass: #and pred[i] < self.nclass:
m[gt[i], pred[i]] += 1.0
return m
if __name__ == '__main__':
args = parse_args()
m_list = []
data_list = []
test_ids = [i.strip() for i in open(args.test_ids) if not i.strip() == '']
for index, img_id in enumerate(test_ids):
if index % 100 == 0:
print('%d processd'%(index))
pred_img_path = os.path.join(args.pred_dir, img_id+'.png')
gt_img_path = os.path.join(args.gt_dir, img_id+'.png')
pred = cv2.imread(pred_img_path, cv2.IMREAD_GRAYSCALE)
gt = cv2.imread(gt_img_path, cv2.IMREAD_GRAYSCALE)
# show_all(gt, pred)
data_list.append([gt.flatten(), pred.flatten()])
ConfM = ConfusionMatrix(args.class_num)
f = ConfM.generateM
pool = Pool()
m_list = pool.map(f, data_list)
pool.close()
pool.join()
for m in m_list:
ConfM.addM(m)
aveJ, j_list, M = ConfM.jaccard()
with open(args.save_path, 'w') as f:
f.write('meanIOU: ' + str(aveJ) + '\n')
f.write(str(j_list)+'\n')
f.write(str(M)+'\n')
| 2,856 | 27.858586 | 112 | py |
quaterny_opvs | quaterny_opvs-master/emulators/emulator_ptb7_th.py | #!/usr/bin/env python
#==========================================================================
from abstract_emulator import AbstractEmulator
#==========================================================================
class Emulator_PTB7_TH(AbstractEmulator):
PATH = 'details_ptb7_th'
HYPERPARAMS = {
'ACT_FUNC': 'leaky_relu',
'ACT_FUNC_OUT': 'relu',
'LEARNING_RATE': 0.001,
'MLP_SIZE': 120,
'REG': 10**-2.5,
'DROP': 0.2}
def __init__(self, home = './'):
self.home = home
self.path = f'{self.home}/{self.PATH}'
AbstractEmulator.__init__(self)
self._load_config(path = self.path)
def load_dataset(self):
self._load_indices(path = self.path)
self._load_dataset(path = self.path)
def run_experiment(self, features, full_stats = False):
prediction = self.predict(features)
if full_stats:
return prediction
else:
return prediction[self.config['objective']['name']]
#===============================================================================
def train():
''' train the emulator on the dataset
'''
emulator = Emulator_PTB7_TH()
emulator.load_dataset()
emulator.initialize_models(batch_size=len(emulator.train_features[0]))
emulator.set_hyperparameters(emulator.HYPERPARAMS)
emulator.construct_models()
emulator.train(path=emulator.path, plot=True)
def predict_test_set(plot=True):
''' runs predictions on the test set
'''
emulator = Emulator_PTB7_TH()
emulator.load_dataset()
emulator.initialize_models(batch_size=len(emulator.train_features[0]))
emulator.set_hyperparameters(emulator.HYPERPARAMS)
pred_dict = emulator.predict(emulator.test_features, reshaped = True)
if plot:
from sklearn.metrics import r2_score
test_targets = emulator.test_targets
pred_targets = pred_dict['averages']
r2 = r2_score(emulator.test_targets, pred_targets)
print('R2', r2)
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure(figsize = (4, 4))
plt.plot([0.0, 0.7], [0.0, 0.7], color = 'k')
plt.plot(test_targets, pred_targets, ls = '', marker = 'o', color = 'k', markersize = 4)
plt.plot(test_targets, pred_targets, ls = '', marker = 'o', color = 'b', markersize = 2)
plt.title('PTB7-TH (%.3f)' % r2)
plt.tight_layout()
plt.show()
#===============================================================================
if __name__ == '__main__':
predict_test_set()
| 2,370 | 27.914634 | 90 | py |
quaterny_opvs | quaterny_opvs-master/emulators/model_probabilistic.py | #!/usr/bin/env python
import os
import pickle
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tf_bijs = tfp.bijectors
tf_dist = tfp.distributions
tf_mean_field = tfp.layers.default_mean_field_normal_fn
#==============================================================
class RegressionModel(object):
NUM_SAMPLES = 1
ACT_FUNC = 'leaky_relu'
ACT_FUNC_OUT = 'linear'
LEARNING_RATE = 0.75 * 10**-3
MLP_SIZE = 48
REG = 1e-3
DROP = 0.1
def __init__(self, graph, dataset_details, config, scope, batch_size, max_iter = 10**8):
self.graph = graph
self.scope = scope
self.config = config
self.batch_size = batch_size
self.dataset_details = dataset_details
self.max_iter = max_iter
self.is_graph_constructed = False
self._read_scaling_details()
def _generator(self, features, targets, batch_size):
indices = np.arange(len(features))
while True:
np.random.shuffle(indices)
batch_features = features[indices[:batch_size]]
batch_targets = targets[indices[:batch_size]]
yield (batch_features, batch_targets)
def _read_scaling_details(self):
with open(self.dataset_details, 'rb') as content:
details = pickle.load(content)
self.scaling = {key: details[key] for key in details}
self.features_shape = self.scaling['features_shape']
self.targets_shape = self.scaling['targets_shape']
def get_scaled_features(self, features):
if self.config['feature_rescaling'] == 'standardization':
scaled = (features - self.scaling['mean_features']) / self.scaling['std_features']
elif self.config['feature_rescaling'] == 'unit_cube':
scaled = (features - self.scaling['min_features']) / (self.scaling['max_features'] - self.scaling['min_features'])
return scaled
def get_scaled_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
scaled = (targets - self.scaling['mean_targets']) / self.scaling['std_targets']
elif self.config['target_rescaling'] == 'unit_cube':
scaled = (targets - self.scaling['min_targets']) / (self.scaling['max_targets'] - self.scaling['min_targets'])
elif self.config['target_rescaling'] == 'mean':
scaled = targets / self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
scaled = targets
return scaled
def get_raw_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
raw = targets * self.scaling['std_targets'] + self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'unit_cube':
raw = (self.scaling['max_targets'] - self.scaling['min_targets']) * targets + self.scaling['min_targets']
elif self.config['target_rescaling'] == 'mean':
raw = targets * self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
raw = targets
return raw
def set_hyperparameters(self, hyperparam_dict):
for key, value in hyperparam_dict.items():
setattr(self, key, value)
def construct_graph(self):
act_funcs = {
'linear': lambda y: y,
'leaky_relu': lambda y: tf.nn.leaky_relu(y, 0.2),
'relu': lambda y: tf.nn.relu(y),
'softmax': lambda y: tf.nn.softmax(y),
'softplus': lambda y: tf.nn.softplus(y),
'softsign': lambda y: tf.nn.softsign(y),
'sigmoid': lambda y: tf.nn.sigmoid(y),
}
mlp_activation = act_funcs[self.ACT_FUNC]
out_activation = act_funcs[self.ACT_FUNC_OUT]
with self.graph.as_default():
with tf.name_scope(self.scope):
self.is_training = tf.compat.v1.placeholder(tf.bool, shape = ())
self.x_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.features_shape[1]])
self.y_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.targets_shape[1]])
self.layer_0 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_0_act = self.layer_0(self.x_ph)
layer_0_out = tf.layers.dropout(layer_0_act, rate = self.DROP, training = self.is_training)
self.layer_1 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_1_act = self.layer_1(layer_0_out)
layer_1_out = tf.layers.dropout(layer_1_act, rate = self.DROP, training = self.is_training)
self.layer_2 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_2_act = self.layer_2(layer_1_out)
layer_2_out = layer_2_act
self.layer_3 = tfp.layers.DenseLocalReparameterization(
self.targets_shape[1],
activation = out_activation,
)
layer_3_out = self.layer_3(layer_2_out)
self.net_out = layer_3_out
self.scales = tf.nn.softplus(tf.Variable(tf.zeros(1)))
self.y_pred = tf_dist.Normal(self.net_out, scale = self.scales)
def construct_inference(self):
self.is_graph_constructed = True
with self.graph.as_default():
self.kl = sum(self.layer_0.losses) / float(self.batch_size)
self.kl += sum(self.layer_1.losses) / float(self.batch_size)
self.kl += sum(self.layer_2.losses) / float(self.batch_size)
self.kl += sum(self.layer_3.losses) / float(self.batch_size)
self.reg_loss = - tf.reduce_mean( self.y_pred.log_prob(self.y_ph) )
self.loss = self.reg_loss + self.REG * self.kl
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.LEARNING_RATE)
self.train_op = self.optimizer.minimize(self.loss)
self.init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
self.sess = tf.compat.v1.Session(graph = self.graph)
with self.sess.as_default():
self.sess.run(self.init_op)
def train(self, train_features, train_targets, valid_features, valid_targets, model_path, plot = False, targets = 'same'):
from sklearn.metrics import r2_score
if not os.path.isdir(model_path): os.mkdir(model_path)
logfile = open('%s/logfile.dat' % model_path, 'w')
logfile.close()
if not self.is_graph_constructed: self.construct_inference()
train_feat_scaled = self.get_scaled_features(train_features)
train_targ_scaled = self.get_scaled_targets(train_targets)
valid_feat_scaled = self.get_scaled_features(valid_features)
valid_targ_scaled = self.get_scaled_targets(valid_targets)
min_target, max_target = np.minimum(np.amin(train_targets, axis = 0), np.amin(valid_targets, axis = 0)), np.maximum(np.amax(train_targets, axis = 0), np.amax(valid_targets, axis = 0))
if targets == 'probs':
min_target = 1. / (1. + np.exp( - min_target))
max_target = 1. / (1. + np.exp( - max_target))
batch_train_gen = self._generator(train_feat_scaled, train_targ_scaled, self.batch_size)
batch_valid_gen = self._generator(valid_feat_scaled, valid_targ_scaled, self.batch_size)
train_errors, valid_errors = [], []
with self.graph.as_default():
with self.sess.as_default():
self.saver = tf.compat.v1.train.Saver()
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
colors = sns.color_palette('RdYlGn', 4)
plt.ion()
plt.style.use('dark_background')
fig = plt.figure(figsize = (14, 5))
ax0 = plt.subplot2grid((1, 3), (0, 0))
ax1 = plt.subplot2grid((1, 3), (0, 1))
ax2 = plt.subplot2grid((1, 3), (0, 2))
for epoch in range(self.max_iter):
train_x, train_y = next(batch_train_gen)
valid_x, valid_y = next(batch_valid_gen)
self.sess.run(self.train_op, feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: True})
if epoch % 200 == 0:
valid_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: valid_x, self.is_training: False})
valid_y = self.get_raw_targets(valid_y)
valid_preds = self.get_raw_targets(valid_preds)
if targets == 'probs':
valid_y = 1. / (1. + np.exp( - valid_y))
valid_preds = 1. / (1. + np.exp( - valid_preds))
try:
valid_r2 = r2_score(valid_y, valid_preds)
except:
valid_r2 = np.nan
valid_errors.append(valid_r2)
_1_, _2_ = self.sess.run([self.reg_loss, self.kl], feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: False})
print('...', _1_, _2_)
train_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: train_x, self.is_training: False})
train_y = self.get_raw_targets(train_y)
train_preds = self.get_raw_targets(train_preds)
try:
train_r2 = r2_score(train_y, train_preds)
except:
train_r2 = np.nan
train_errors.append(train_r2)
if targets == 'probs':
train_y = 1. / (1. + np.exp( - train_y))
train_preds = 1. / (1. + np.exp( - train_preds))
logfile = open('%s/logfile.dat' % model_path, 'a')
logfile.write('%d\t%.5f\t%.5f\n' % (epoch, train_r2, valid_r2))
logfile.close()
# define break condition --> last improvement happened more than 100 epochs ago
max_r2_index = np.argmax(valid_errors)
if len(valid_errors) - max_r2_index > 100: break
if max_r2_index == len(valid_errors) - 1:
self.saver.save(self.sess, '%s/model.ckpt' % model_path)
new_line = 'EVALUATION: %d (%d)\t%.5f\t%.5f' % ( len(valid_errors) - max_r2_index, len(valid_errors), train_errors[-1], valid_errors[-1])
print(new_line)
if plot:
train_preds_scaled = train_preds
train_trues_scaled = train_y
valid_preds_scaled = valid_preds
valid_trues_scaled = valid_y
ax0.cla()
ax1.cla()
ax2.cla()
ax0.plot([min_target[0], max_target[0]], [min_target[0], max_target[0]], lw = 3, color = 'w', alpha = 0.5)
ax0.plot(train_trues_scaled[:, 0], train_preds_scaled[:, 0], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax0.plot(valid_trues_scaled[:, 0], valid_preds_scaled[:, 0], marker = '.', ls = '', color = colors[0], alpha = 0.5)
if len(min_target) > 1:
ax1.plot([min_target[1], max_target[1]], [min_target[1], max_target[1]], lw = 3, color = 'w', alpha = 0.5)
ax1.plot(train_trues_scaled[:, 1], train_preds_scaled[:, 1], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax1.plot(valid_trues_scaled[:, 1], valid_preds_scaled[:, 1], marker = '.', ls = '', color = colors[0], alpha = 0.5)
RANGE = 50
ax2.plot(np.arange(len(train_errors[-RANGE:])) + len(train_errors[-RANGE:]), train_errors[-RANGE:], lw = 3, color = colors[-1])
ax2.plot(np.arange(len(valid_errors[-RANGE:])) + len(valid_errors[-RANGE:]), valid_errors[-RANGE:], lw = 3, color = colors[0])
plt.pause(0.05)
def restore(self, model_path):
if not self.is_graph_constructed: self.construct_inference()
self.sess = tf.compat.v1.Session(graph = self.graph)
self.saver = tf.compat.v1.train.Saver()
try:
self.saver.restore(self.sess, model_path)
return True
except AttributeError:
return False
def predict(self, input_raw):
input_scaled = self.get_scaled_features(input_raw)
with self.sess.as_default():
output_scaled = []
for _ in range(self.NUM_SAMPLES):
output_scaled.append(self.sess.run(self.net_out, feed_dict = {self.x_ph: input_scaled, self.is_training: False}))
output_scaled = np.array(output_scaled)
output_raw = self.get_raw_targets(output_scaled)
output_raw_mean = np.mean(output_raw, axis = 0)
output_raw_std = np.std(output_raw, axis = 0)
return {'samples': output_raw, 'averages': output_raw_mean, 'uncertainties': output_raw_std}
#==============================================================
| 11,713 | 35.154321 | 185 | py |
quaterny_opvs | quaterny_opvs-master/emulators/abstract_emulator.py | #!/usr/bin/env python
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
sys.path.append('Emulators')
import json
import pickle
import numpy as np
import tensorflow as tf
#=====================================================================
class AbstractEmulator(object):
def __init__(self, num_folds = 5):
self.num_folds = num_folds
self.dataset = None
self.dataset_stats = None
self.indices = None
self.model_batch_size = None
self.file_path = None
def set_file_path(self, path, file_name):
self.file_name = file_name
self.path = path
def _load_config(self, path = './'):
file_name = '%s/config.json' % path
with open(file_name) as content:
self.config = json.loads(content.read())
self.config['num_params'] = len(self.config['parameters'])
self.hyperparameters = self.config['emulator_parameters']
self.path = path
def set_hyperparameters(self, hyperparameters):
self.hyperparameters = hyperparameters
def _load_indices(self, path = './'):
file_name = '%s/indices.pkl' % path
try:
with open(file_name, 'rb') as content:
self.indices = pickle.load(content)
except UnicodeDecodeError:
with open(file_name, 'rb') as content:
self.indices = pickle.load(content, encoding = 'latin1')
# self.indices = pickle.load(open(file_name, 'rb'))
self.work_indices = self.indices['work_indices']
self.test_indices = self.indices['test_indices']
self.train_indices = [self.indices['cross_validation_sets'][index]['train_indices'] for index in range(self.num_folds)]
self.valid_indices = [self.indices['cross_validation_sets'][index]['valid_indices'] for index in range(self.num_folds)]
def _store_dataset_stats(self, path = './'):
max_features = np.amax(self.features, axis = 0)
min_features = np.amin(self.features, axis = 0)
max_targets = np.amax(self.targets, axis = 0)
min_targets = np.amin(self.targets, axis = 0)
mean_features = np.mean(self.features, axis = 0)
std_features = np.std(self.features, axis = 0)
mean_targets = np.mean(self.targets, axis = 0)
std_targets = np.std(self.targets, axis = 0)
stats_dict = {'min_features': min_features, 'max_features': max_features,
'min_targets': min_targets, 'max_targets': max_targets,
'mean_features': mean_features, 'std_features': std_features,
'mean_targets': mean_targets, 'std_targets': std_targets,
'features_shape': self.features.shape, 'targets_shape': self.targets.shape}
with open('%s/dataset_stats.pkl' % path, 'wb') as content:
pickle.dump(stats_dict, content)
self.dataset_stats = '%s/dataset_stats.pkl' % path
def _load_dataset(self, path = './'):
file_name = '%s/dataset.pkl' % path
with open(file_name, 'rb') as content:
self.dataset = pickle.load(content)
values = self.dataset['values']
raw_values = self.dataset['values']
if self.config['general']['domain'] == 'simplex':
params = []
for element in self.dataset['params']:
vector = element / np.sum(element)
params.append(vector[:-1])
params = np.array(params)
else:
params = self.dataset['params']
raw_params = self.dataset['params']
self.all_features = raw_params
self.all_targets = raw_values
self.features = params[self.work_indices]
self.targets = values[self.work_indices]
if len(self.targets.shape) == 1:
self.targets = np.reshape(self.targets, (len(self.targets), 1))
self._store_dataset_stats(path = path)
self.test_features = params[self.test_indices]
self.test_targets = values[self.test_indices]
if len(self.targets.shape) == 1:
self.test_targets = np.reshape(self.test_targets, (len(self.test_targets), 1))
self.train_features, self.train_targets = [], []
self.valid_features, self.valid_targets = [], []
for index in range(self.num_folds):
train_features = params[self.train_indices[index]]
valid_features = params[self.valid_indices[index]]
train_targets = values[self.train_indices[index]]
if len(train_targets.shape) == 1:
train_targets = np.reshape(train_targets, (len(train_targets), 1))
valid_targets = values[self.valid_indices[index]]
if len(valid_targets.shape) == 1:
valid_targets = np.reshape(valid_targets, (len(valid_targets), 1))
self.train_features.append(train_features)
self.train_targets.append(train_targets)
self.valid_features.append(np.concatenate([valid_features for i in range(len(train_features) // len(valid_features))]))
self.valid_targets.append(np.concatenate([valid_targets for i in range(len(train_targets) // len(valid_targets))]))
def initialize_models(self, batch_size = 1):
if self.config['general']['model'] == 'probabilistic':
from model_probabilistic import RegressionModel as Model
elif self.config['general']['model'] == 'deterministic':
from model_deterministic import RegressionModel as Model
else:
raise NotImplementedError
self.models = []
self.graphs = [tf.Graph() for i in range(self.num_folds)]
for fold_index in range(self.num_folds):
with self.graphs[fold_index].as_default():
model = Model(self.graphs[fold_index], self.dataset_stats, self.config['general'], scope = 'fold_%d' % fold_index, batch_size = batch_size)
self.models.append(model)
def construct_models(self, hyperparameters = None):
if hyperparameters is None:
hyperparameters = self.hyperparameters
for model_index, model in enumerate(self.models):
with self.graphs[model_index].as_default():
model.set_hyperparameters(hyperparameters)
model.construct_graph()
def load_models(self, path = None, batch_size = 1):
print('... loading models')
if path is None:
path = self.path
if self.dataset_stats is None:
self.dataset_stats = '%s/dataset_stats.pkl' % path
if self.config['general']['model'] == 'probabilistic':
from model_probabilistic import RegressionModel as Model
elif self.config['general']['model'] == 'deterministic':
from model_deterministic import RegressionModel as Model
else:
raise NotImplementedError
self.models = []
self.graphs = [tf.Graph() for i in range(self.num_folds)]
for fold_index in range(self.num_folds):
with self.graphs[fold_index].as_default():
model = Model(self.graphs[fold_index], self.dataset_stats, self.config['general'], scope = 'fold_%d' % fold_index, batch_size = batch_size)
model.set_hyperparameters(self.hyperparameters)
model.construct_graph()
if model.restore('%s/Fold_%d/model.ckpt' % (path, fold_index)):
self.models.append(model)
else:
print('could not restore model: ', fold_index)
break
else:
self.model_batch_size = batch_size
return True
self.model_batch_size = None
return False
def train(self, path = './', plot = False):
for model_index, model in enumerate(self.models):
model.train(self.train_features[model_index], self.train_targets[model_index],
self.valid_features[model_index], self.valid_targets[model_index],
model_path = '%s/Fold_%d' % (path, model_index), plot = plot, targets = self.config['general']['targets'])
def predict(self, params, reshaped = False):
if len(params.shape) == 1:
params = np.reshape(params, (1, len(params)))
if self.config['general']['domain'] == 'simplex' and not reshaped:
features = []
for element in params:
vector = element / np.sum(element)
features.append(vector[:-1])
features = np.array(features)
else:
features = params
polar_features = []
for feature_index, feature in enumerate(features):
polar_feature = []
for elem_index, element in enumerate(feature):
if 'rep' in self.config['parameters'][elem_index]:
polar_feature.extend([np.sin(element), np.cos(element)])
else:
polar_feature.append(element)
polar_features.append(polar_feature)
features = np.array(polar_features)
if not len(features) == self.model_batch_size:
self.load_models(batch_size = len(features))
samples = []
for fold_index in range(self.num_folds):
single_pred_dict = self.models[fold_index].predict(features) # samples, averages, uncertainties
samples.append(single_pred_dict['samples'])
samples = np.array(samples)
# if self.config['general']['targets'] == 'simplex':
# samples = 1. / (1. + np.exp( - samples) )
# shape of samples: (# models, # draws, # features, # dim features)
averages = np.mean(np.mean(samples, axis = 0), axis = 0)
uncertainties = np.std(np.mean(samples, axis = 0), axis = 0)
pred_dict = {'samples': samples, 'averages': averages, 'uncertainties': uncertainties}
pred_dict[self.config['objective']['name']] = np.squeeze(pred_dict['averages'])
return pred_dict
#=====================================================================
if __name__ == '__main__':
pass
| 8,804 | 34.22 | 143 | py |
quaterny_opvs | quaterny_opvs-master/emulators/emulator_pbq_qf.py | #!/usr/bin/env python
#===============================================================================
from abstract_emulator import AbstractEmulator
#===============================================================================
class Emulator_PBQ_QF(AbstractEmulator):
PATH = 'details_pbq_qf'
HYPERPARAMS = {
'ACT_FUNC': 'leaky_relu',
'ACT_FUNC_OUT': 'relu',
'LEARNING_RATE': 0.001,
'MLP_SIZE': 120,
'REG': 10**-2.0,
'DROP': 0.2}
def __init__(self, home = './'):
self.home = home
self.path = f'{self.home}/{self.PATH}'
AbstractEmulator.__init__(self)
self._load_config(path = self.path)
def load_dataset(self):
self._load_indices(path = self.path)
self._load_dataset(path = self.path)
def run_experiment(self, features, full_stats = False):
prediction = self.predict(features)
if full_stats:
return prediction
else:
return prediction[self.config['objective']['name']]
#===============================================================================
def train():
''' train the emulator on the dataset
'''
emulator = Emulator_PBQ_QF()
emulator.load_dataset()
emulator.initialize_models(batch_size=len(emulator.train_features[0]))
emulator.set_hyperparameters(emulator.HYPERPARAMS)
emulator.construct_models()
emulator.train(path=emulator.path, plot=True)
def predict_test_set(plot=True):
''' runs predictions on the test set
'''
emulator = Emulator_PBQ_QF()
emulator.load_dataset()
emulator.initialize_models(batch_size=len(emulator.train_features[0]))
emulator.set_hyperparameters(emulator.HYPERPARAMS)
pred_dict = emulator.predict(emulator.test_features, reshaped = True)
if plot:
from sklearn.metrics import r2_score
test_targets = emulator.test_targets
pred_targets = pred_dict['averages']
r2 = r2_score(emulator.test_targets, pred_targets)
print('R2', r2)
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure(figsize = (4, 4))
plt.plot([0.0, 0.7], [0.0, 0.7], color = 'k')
plt.plot(test_targets, pred_targets, ls = '', marker = 'o', color = 'k', markersize = 4)
plt.plot(test_targets, pred_targets, ls = '', marker = 'o', color = 'b', markersize = 2)
plt.title('PBQ-QF (%.3f)' % r2)
plt.tight_layout()
plt.show()
#===============================================================================
if __name__ == '__main__':
predict_test_set()
| 2,408 | 28.378049 | 90 | py |
quaterny_opvs | quaterny_opvs-master/emulators/details_ptb7_th/parse_grid.py | #!/usr/bin/env python
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
file_name = 'photobleaching_mixture01_grid.csv'
features = []
targets = []
with open(file_name, 'r') as content:
for line in content:
linecontent = line.strip().strip('\xef').strip('\xbb').strip('\xbf').strip('\ufeff').split(',')
feature = np.array([float(element) for element in linecontent[:4]])
target = float(linecontent[-1])
if target > 0.:
features.append(feature)
targets.append(target)
features = np.array(features)
targets = np.array(targets)
d_features = []
for index in range(len(features)):
for jndex in range(index + 1, len(features)):
d_features.append(np.linalg.norm(features[index] - features[jndex]))
d_targets = []
for index in range(len(targets)):
for jndex in range(index + 1, len(targets)):
d_targets.append(np.abs(targets[index] - targets[jndex]))
plt.scatter(d_features, d_targets)
plt.show()
print(features.shape)
data = {'params': np.array(features), 'values': np.array(targets)}
with open('dataset.pkl', 'wb') as content:
pickle.dump(data, content) | 1,125 | 23.478261 | 97 | py |
quaterny_opvs | quaterny_opvs-master/emulators/details_ptb7_th/generate_indices.py | #!/usr/bin/env python
import numpy as np
import pickle
#=========================================================
NUM_SPECTRA = 1040
FOLD_SIZE = 170
np.random.seed(120415)
#=========================================================
indices = np.arange(NUM_SPECTRA)
np.random.shuffle(indices)
test_indices = indices[850:]
work_indices = indices[:850]
#=========================================================
rotation_indices = work_indices.copy()
cross_validation_sets = []
for index in range(len(work_indices) // FOLD_SIZE):
valid_indices = rotation_indices[:FOLD_SIZE]
train_indices = rotation_indices[FOLD_SIZE:]
cross_validation_dict = {'train_indices': train_indices.copy(), 'valid_indices': valid_indices.copy()}
cross_validation_sets.append(cross_validation_dict)
rotation_indices = np.roll(rotation_indices, FOLD_SIZE)
print('generated %d folds' % (len(cross_validation_sets)))
data_set = {'test_indices': test_indices, 'work_indices': work_indices, 'cross_validation_sets': cross_validation_sets}
pickle.dump(data_set, open('indices.pkl', 'wb'))
| 1,082 | 25.414634 | 119 | py |
quaterny_opvs | quaterny_opvs-master/emulators/details_pbq_qf/parse_grid.py | #!/usr/bin/env python
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
file_name = 'photobleaching_mixture00_grid.csv'
features = []
targets = []
with open(file_name, 'r') as content:
for line in content:
linecontent = line.strip().strip('\xef').strip('\xbb').strip('\xbf').strip('\ufeff').split(',')
print(linecontent)
feature = np.array([float(element) for element in linecontent[:4]])
target = float(linecontent[-1])
if target > 0.:
features.append(feature)
targets.append(target)
features = np.array(features)
targets = np.array(targets)
d_features = []
for index in range(len(features)):
for jndex in range(index + 1, len(features)):
d_features.append(np.linalg.norm(features[index] - features[jndex]))
d_targets = []
for index in range(len(targets)):
for jndex in range(index + 1, len(targets)):
d_targets.append(np.abs(targets[index] - targets[jndex]))
plt.scatter(d_features, d_targets)
plt.show()
print(features.shape)
data = {'params': np.array(features), 'values': np.array(targets)}
with open('dataset.pkl', 'wb') as content:
pickle.dump(data, content) | 1,146 | 23.404255 | 97 | py |
quaterny_opvs | quaterny_opvs-master/emulators/details_pbq_qf/generate_indices.py | #!/usr/bin/env python
import numpy as np
import pickle
#=========================================================
NUM_SPECTRA = 1040
FOLD_SIZE = 170
np.random.seed(120415)
#=========================================================
indices = np.arange(NUM_SPECTRA)
np.random.shuffle(indices)
test_indices = indices[850:]
work_indices = indices[:850]
#=========================================================
rotation_indices = work_indices.copy()
cross_validation_sets = []
for index in range(len(work_indices) // FOLD_SIZE):
valid_indices = rotation_indices[:FOLD_SIZE]
train_indices = rotation_indices[FOLD_SIZE:]
cross_validation_dict = {'train_indices': train_indices.copy(), 'valid_indices': valid_indices.copy()}
cross_validation_sets.append(cross_validation_dict)
rotation_indices = np.roll(rotation_indices, FOLD_SIZE)
print('generated %d folds' % (len(cross_validation_sets)))
data_set = {'test_indices': test_indices, 'work_indices': work_indices, 'cross_validation_sets': cross_validation_sets}
pickle.dump(data_set, open('indices.pkl', 'wb'))
| 1,082 | 25.414634 | 119 | py |
imf | imf-master/setup.py | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# NOTE: The configuration for the package, including the name, version, and
# other information are set in the setup.cfg file.
import os
import sys
from setuptools import setup
# First provide helpful messages if contributors try and run legacy commands
# for tests or docs.
TEST_HELP = """
Note: running tests is no longer done using 'python setup.py test'. Instead
you will need to run:
tox -e test
If you don't already have tox installed, you can install it with:
pip install tox
If you only want to run part of the test suite, you can also use pytest
directly with::
pip install -e .[test]
pytest
For more information, see:
http://docs.astropy.org/en/latest/development/testguide.html#running-tests
"""
if 'test' in sys.argv:
print(TEST_HELP)
sys.exit(1)
DOCS_HELP = """
Note: building the documentation is no longer done using
'python setup.py build_docs'. Instead you will need to run:
tox -e build_docs
If you don't already have tox installed, you can install it with:
pip install tox
You can also build the documentation with Sphinx directly using::
pip install -e .[docs]
cd docs
make html
For more information, see:
http://docs.astropy.org/en/latest/install.html#builddocs
"""
if 'build_docs' in sys.argv or 'build_sphinx' in sys.argv:
print(DOCS_HELP)
sys.exit(1)
VERSION_TEMPLATE = """
# Note that we need to fall back to the hard-coded version if either
# setuptools_scm can't be imported or setuptools_scm can't determine the
# version, so we catch the generic 'Exception'.
try:
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
except Exception:
version = '{version}'
""".lstrip()
setup(use_scm_version={'write_to': os.path.join('imf', 'version.py'),
'write_to_template': VERSION_TEMPLATE})
| 1,952 | 23.721519 | 76 | py |
imf | imf-master/examples/imf_schematic.py | """
I'm not entirely sure what this figure is supposed to show; it just plots a few variants of the lognormal IMF
"""
from imf import imf
import numpy as np
if __name__ == "__main__":
import pylab as pl
pl.matplotlib.rc_file("/Users/adam/.matplotlib/pubfiguresrc")
pl.rc('font', family='cmr10')
x = np.logspace(-2,2)
pl.clf()
chabrier = imf.chabrier(x)
chabrier_n = chabrier/chabrier.sum()
chabrier3x = imf.ChabrierLogNormal(lognormal_center=0.66)(x)
chabrier3x_n = chabrier3x/chabrier3x.sum()
seed = imf.ChabrierLogNormal(lognormal_width=0.3)(x)
seed_n = seed/seed.sum()
wider = imf.ChabrierLogNormal(lognormal_width=1)(x)
wider_n = wider/wider.sum()
pl.loglog(x, chabrier_n, linewidth=3, alpha=0.8, label='Chabrier')
#pl.loglog(x, imf.kroupa(x), linewidth=3, alpha=0.8, label='Kroupa')
pl.loglog(x, chabrier3x_n, linestyle='dashed', linewidth=3, alpha=0.8, label='$3\\times$ Chabrier')
pl.loglog(x, wider_n, linestyle='dotted', linewidth=3, alpha=0.8, label='Wider MF')
pl.loglog(x, seed_n, linestyle='-.', linewidth=3, alpha=0.8, label='Seed MF')
pl.xlim(1e-1, 1e2)
pl.ylim(1e-6, 1.15)
pl.xlabel("Stellar Mass $M_*$ ($M_{\odot}$)")
pl.ylabel("$P(M_*)$")
pl.legend(loc='best', fontsize=22)
import matplotlib.ticker
ax = pl.gca()
ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
pl.savefig("imf_schematic.png")
pl.draw()
pl.show()
| 1,545 | 32.608696 | 110 | py |
imf | imf-master/examples/clustermf_figure.py | from imf import color_of_cluster,make_cluster,lum_of_cluster
import numpy as np
if __name__ == "__main__":
import pylab as pl
alpha = 2
m0 = 5e2
mmax = 5e5
cluster_mass_xax = np.logspace(np.log10(m0),np.log10(mmax),1e4)
def pr(m):
return (m/m0)**-alpha
probabilities = pr(cluster_mass_xax)
cdf = probabilities.cumsum()
cdf /= cdf.max() # normalize to sum (cdf)
nclusters = 5000
cluster_masses = np.array([np.interp(p, cdf, cluster_mass_xax) for p in np.random.rand(nclusters)])
clusters = [make_cluster(m,mmax=m) for m in cluster_masses]
luminosities = np.array([lum_of_cluster(c) for c in clusters])
# no contrast
# colors = [color_of_cluster(c) for c in clusters]
def ctable(mass, mmin=0.08, mmax=120):
return pl.cm.RdBu((mass-mmin)/(mmax-mmin))
cr = np.log10(mmax)-np.log10(mmin)
lm = np.log10(mass)-np.log10(mmin)
return pl.cm.RdBu(lm/cr)
colors = [color_of_cluster(c,ctable) for c in clusters]
yax = [np.random.rand()*(np.log10(pr(m))-np.log10(pr(mmax))) + np.log10(pr(mmax)) for m in cluster_masses]
pl.rc('font',size=30)
pl.figure(1)
pl.clf()
pl.gca().set_xscale('log')
sizes = 10**luminosities/1e5
sizes[sizes < 10] = 10
S = pl.scatter(cluster_masses, yax, c=colors, s=sizes, alpha=0.8)
sm = pl.cm.ScalarMappable(cmap=pl.cm.RdBu, norm=pl.Normalize(vmin=0.08, vmax=120))
sm._A = []
cb = pl.colorbar(sm)
cb.set_label("Luminosity-weighted\nMean Stellar Mass")
pl.gca().axis([min(cluster_masses)/1.1,max(cluster_masses)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.xlabel("Cluster Mass ($M_\odot$)")
pl.ylabel("Log(dN(M)/dM)")
pl.savefig("plots/clusterMF_lumcolor_lumsize.png",bbox_inches='tight')
pl.figure(2)
pl.clf()
pl.gca().set_xscale('log')
sizes = cluster_masses / 50
sizes[sizes < 10] = 10
S = pl.scatter(cluster_masses, yax, c=colors, s=sizes, alpha=0.8)
sm = pl.cm.ScalarMappable(cmap=pl.cm.RdBu, norm=pl.Normalize(vmin=0.08, vmax=120))
sm._A = []
cb = pl.colorbar(sm)
cb.set_label("Luminosity-weighted\nMean Stellar Mass")
pl.gca().axis([min(cluster_masses)/1.1,max(cluster_masses)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.xlabel("Cluster Mass ($M_\odot$)")
pl.ylabel("Log(dN(M)/dM)")
pl.savefig("plots/clusterMF_lumcolor_massize.png",bbox_inches='tight')
pl.figure(3)
pl.clf()
pl.gca().set_xscale('log')
sizes = 20*np.log(10**luminosities / cluster_masses)
sizes[sizes < 10] = 10
S = pl.scatter(cluster_masses, yax, c=colors, s=sizes, alpha=0.8)
sm = pl.cm.ScalarMappable(cmap=pl.cm.RdBu, norm=pl.Normalize(vmin=0.08, vmax=120))
sm._A = []
cb = pl.colorbar(sm)
cb.set_label("Luminosity-weighted\nMean Stellar Mass")
pl.gca().axis([min(cluster_masses)/1.1,max(cluster_masses)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.xlabel("Cluster Mass ($M_\odot$)")
pl.ylabel("Log(dN(M)/dM)")
pl.savefig("plots/clusterMF_lumcolor_mtolsize.png",bbox_inches='tight')
pl.show()
| 3,057 | 34.55814 | 110 | py |
imf | imf-master/examples/pmf_evolution.py | import imf.imf, imf.pmf, imp
from imf.pmf import ChabrierPMF_AcceleratingSF_IS, ChabrierPMF_AcceleratingSF_TC, ChabrierPMF_AcceleratingSF_CA#, ChabrierPMF_AcceleratingSF_2CTC
import pylab as pl
import numpy as np
imp.reload(imf.imf)
imp.reload(imf.pmf)
imp.reload(imf.imf)
imp.reload(imf.pmf)
mmin = 0.033
for mmax in (3, 120):
print("Normalizing.")
ChabrierPMF_AcceleratingSF_IS.mmax = mmax
ChabrierPMF_AcceleratingSF_TC.mmax = mmax
ChabrierPMF_AcceleratingSF_CA.mmax = mmax
#ChabrierPMF_AcceleratingSF_2CTC.mmax = mmax
ChabrierPMF_AcceleratingSF_IS.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_AcceleratingSF_TC.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_AcceleratingSF_CA.normalize(log=True, mmin=mmin, mmax=mmax)
#ChabrierPMF_AcceleratingSF_2CTC.normalize(log=True, mmin=mmin, mmax=mmax)
chabrierpowerlaw = imf.ChabrierPowerLaw()
chabrierpowerlaw.normalize(log=True, mmin=mmin, mmax=mmax)
print("Now plotting.")
masses = np.logspace(np.log10(mmin), np.log10(mmax), 100)
for mass_weighted in (True,False):
fname = 'mass_weighted' if mass_weighted else '__call__'
fig1 = pl.figure(1)
fig1.clf()
ax = fig1.gca()
ax.set_title("Accelerating SF McKee/Offner + Chabrier PMF")
ax.loglog(masses, chabrierpowerlaw.__getattribute__(fname)(masses), label="IMF", color='k')
for tau, lw in zip((0.1, 1.0, 10.0), (1,2,3,)):
ax.loglog(masses, ChabrierPMF_AcceleratingSF_IS.__getattribute__(fname)(masses, tau=tau), label="IS", color='r', linewidth=lw, linestyle=':')
ax.loglog(masses, ChabrierPMF_AcceleratingSF_TC.__getattribute__(fname)(masses, tau=tau), label="TC", color='g', linewidth=lw, linestyle='-.')
ax.loglog(masses, ChabrierPMF_AcceleratingSF_CA.__getattribute__(fname)(masses, tau=tau), label="CA", color='y', linewidth=lw, linestyle='-.')
#ax.loglog(masses, ChabrierPMF_AcceleratingSF_2CTC.__getattribute__(fname)(masses), label="2CTC", color='b', linestyle='--')
ax.set_xlabel("(Proto)Stellar Mass (M$_\odot$)")
ax.set_ylabel("m P(m)" if mass_weighted else "Normalized P(M)")
ax.axis([mmin, mmax, 1e-4, 1])
pl.legend(loc='best')
pl.savefig('acceleratingSF_pmf_chabrier{0}_mmax{1}.png'
.format("_integral" if mass_weighted else "", int(mmax)),
bbox_inches='tight')
| 2,430 | 45.75 | 154 | py |
imf | imf-master/examples/mass_to_light.py | import numpy as np
import os
import json
from imf import imf
import pylab as pl
import matplotlib
from astropy.utils.console import ProgressBar
pl.rc('font', size=16)
if os.path.exists('synth_data_m_to_l.json'):
with open('synth_data_m_to_l.json', 'r') as fh:
synth_data = json.load(fh)
else:
synth_data = {}
# uniform random sampling from 100 to 10^5 msun
for stop_crit in ('nearest', 'before', 'after', 'sorted'):
print(stop_crit)
if stop_crit not in synth_data:
clusters, luminosities, masses, mean_luminosities, mean_masses, max_masses, number = {},{},{},{},{},{},{}
for clmass in ProgressBar(np.concatenate([10**(np.random.rand(int(1e3))*1 + 4), 10**(np.random.rand(int(1e4))*2.5+1.5)])):
key = str(clmass) # for jsonification
clusters[key] = imf.make_cluster(clmass, 'kroupa', mmax=150, silent=True, stop_criterion=stop_crit)
# cluster luminosities
luminosities[key] = imf.lum_of_cluster(clusters[key])
masses[key] = clmass
number[key] = len(clusters[key])
#mean_luminosities[clmass] = np.mean(luminosities[clmass])
mean_masses[key] = np.mean(clusters[key])
max_masses[key] = np.max(clusters[key])
synth_data[stop_crit] = {#'clusters': clusters,
'number': number,
'luminosities': luminosities,
'masses': masses,
'mean_luminosities': mean_luminosities,
'mean_masses': mean_masses,
'max_masses': max_masses}
else:
max_masses = synth_data[stop_crit]['max_masses']
#mean_luminosities = synth_data[stop_crit]['mean_luminosities']
mean_masses = synth_data[stop_crit]['mean_masses']
number = synth_data[stop_crit]['number']
masses = synth_data[stop_crit]['masses']
luminosities = synth_data[stop_crit]['luminosities']
clmasses = sorted(map(float, masses))
mass_to_light = np.array([k/10**luminosities[str(k)] for k in clmasses])
# shouldn't this converge to a single value? (yes, don't divide linear by log)
pl.figure(2).clf()
pl.loglog(clmasses, mass_to_light**-1, '.', alpha=0.1)
pl.xlabel("Cluster Mass")
pl.ylabel("Light to Mass $L_\odot / M_\odot$")
pl.ylim(1,5e4)
pl.savefig(f"light_to_mass_vs_mass_{stop_crit}.png", bbox_inches='tight', dpi=200)
pl.savefig(f"light_to_mass_vs_mass_{stop_crit}.pdf", bbox_inches='tight')
pl.figure(3).clf()
pl.loglog(list(map(float, max_masses.keys())), max_masses.values(), '.', alpha=0.1)
pl.xlabel("Cluster Mass")
pl.ylabel("Maximum stellar mass")
pl.savefig(f"maxmass_vs_clustermass_{stop_crit}.png", bbox_inches='tight', dpi=200)
pl.savefig(f"maxmass_vs_clustermass_{stop_crit}.pdf", bbox_inches='tight')
m_to_ls = []
slopes = np.linspace(1.7, 2.9, 20)
for slope in ProgressBar(slopes):
Kroupa = imf.Kroupa(p3=slope)
cluster = imf.make_cluster(1e5, Kroupa, mmax=120, silent=True,
stop_criterion=stop_crit)
lum = imf.lum_of_cluster(cluster)
m_to_l = 1e5/10**lum
m_to_ls.append(m_to_l)
pl.figure(4).clf()
pl.plot(slopes, m_to_ls)
pl.xlabel("Upper-end power-law slope $\\alpha$")
pl.ylabel("Mass-to-light ratio [M$_\odot$/L$_\odot$]")
ax = pl.gca()
tw = ax.twinx()
tw.set_ylim(m_to_ls[0]/m_to_ls[10], m_to_ls[-1]/m_to_ls[10])
tw.set_ylabel("M/L / M/L($\\alpha=2.3$)")
pl.savefig(f"masstolight_vs_slope_{stop_crit}.pdf", bbox_inches='tight')
pl.savefig(f"masstolight_vs_slope_{stop_crit}.png", bbox_inches='tight', dpi=200)
pl.figure(5).clf()
pl.semilogy(slopes, m_to_ls)
ax = pl.gca()
ylim = ax.get_ylim()
ax.vlines(2.3, 1e-5, 1, linestyle='--', color='k', alpha=0.2, zorder=-10)
ax.set_ylim(ylim)
pl.xlabel("Upper-end power-law slope $\\alpha$")
pl.ylabel("Mass-to-light ratio [M$_\odot$/L$_\odot$]")
tw = ax.twinx()
tw.semilogy()
tw.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
tw.set_yticks([0.5,1,2,5])
tw.set_ylim(m_to_ls[0]/m_to_ls[10], m_to_ls[-1]/m_to_ls[10])
tw.set_xlim(ax.get_xlim())
tw.hlines(1, 1, 5, linestyle='--', color='k', alpha=0.2, zorder=-20)
#tw.grid(which='major', linestyle='--')
tw.set_ylabel("M/L / M/L($\\alpha=2.3$)")
pl.savefig(f"masstolight_vs_slope_log_{stop_crit}.pdf", bbox_inches='tight')
pl.savefig(f"masstolight_vs_slope_log_{stop_crit}.png", bbox_inches='tight', dpi=200)
#pl.loglog(clusters.keys(), np.array(list(mean_luminosities.values())) / np.array(list(mean_masses.values())), '.', alpha=0.1)
# compute mass-to-light ratio vs age
with open('synth_data_m_to_l.json', 'w') as fh:
json.dump(synth_data, fh)
| 4,914 | 40.302521 | 130 | py |
imf | imf-master/examples/sampling_methods.py | from imf import make_cluster
import pylab as pl
import imf
maxmass = [imf.make_cluster(500, verbose=False, silent=True).max() for ii in
range(10000)]
maxmass_sorted = [imf.make_cluster(500, stop_criterion='sorted', verbose=False,
silent=True).max() for ii in range(10000)]
maxmass_before = [imf.make_cluster(500, stop_criterion='before', verbose=False,
silent=True).max() for ii in range(10000)]
maxmass_after = [imf.make_cluster(500, stop_criterion='after', verbose=False,
silent=True).max() for ii in range(10000)]
pl.clf()
pl.hist(maxmass, bins=50, alpha=0.5, label='nearest', histtype='step')
pl.hist(maxmass_sorted, bins=50, alpha=0.5, label='sorted', histtype='step')
pl.hist(maxmass_before, bins=50, alpha=0.5, label='before', histtype='step')
pl.hist(maxmass_after, bins=50, alpha=0.5, label='after', histtype='step')
pl.legend(loc='best')
| 967 | 45.095238 | 79 | py |
imf | imf-master/examples/pmf_stats.py | import imf.imf, imf.pmf, imp
imp.reload(imf.imf)
imp.reload(imf.pmf)
imp.reload(imf.imf)
imp.reload(imf.pmf)
from imf.pmf import ChabrierPMF_IS, ChabrierPMF_TC, ChabrierPMF_CA, ChabrierPMF_2CTC
from imf.pmf import KroupaPMF_IS, KroupaPMF_TC, KroupaPMF_CA, KroupaPMF_2CTC
from imf.pmf import McKeeOffner_AcceleratingSF_PMF, ChabrierPMF_AcceleratingSF_IS, ChabrierPMF_AcceleratingSF_TC, ChabrierPMF_AcceleratingSF_CA#, ChabrierPMF_AcceleratingSF_2CTC
import pylab as pl
import numpy as np
mmin = 0.033
mmax = 120
print("Normalizing.")
ChabrierPMF_IS.mmax = mmax
ChabrierPMF_TC.mmax = mmax
ChabrierPMF_CA.mmax = mmax
ChabrierPMF_2CTC.mmax = mmax
ChabrierPMF_IS.normalize(log=False, mmin=mmin, mmax=mmax)
ChabrierPMF_TC.normalize(log=False, mmin=mmin, mmax=mmax)
ChabrierPMF_CA.normalize(log=False, mmin=mmin, mmax=mmax)
ChabrierPMF_2CTC.normalize(log=False, mmin=mmin, mmax=mmax)
ChabrierPMF_AcceleratingSF_IS.mmax = mmax
ChabrierPMF_AcceleratingSF_TC.mmax = mmax
ChabrierPMF_AcceleratingSF_CA.mmax = mmax
#ChabrierPMF_AcceleratingSF_2CTC.mmax = mmax
ChabrierPMF_AcceleratingSF_IS.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_AcceleratingSF_TC.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_AcceleratingSF_CA.normalize(log=True, mmin=mmin, mmax=mmax)
#ChabrierPMF_AcceleratingSF_2CTC.normalize(log=True, mmin=mmin, mmax=mmax)
KroupaPMF_IS.mmax = mmax
KroupaPMF_TC.mmax = mmax
KroupaPMF_CA.mmax = mmax
KroupaPMF_2CTC.mmax = mmax
KroupaPMF_IS.normalize(log=False, mmin=mmin, mmax=mmax)
KroupaPMF_TC.normalize(log=False, mmin=mmin, mmax=mmax)
KroupaPMF_CA.normalize(log=False, mmin=mmin, mmax=mmax)
KroupaPMF_2CTC.normalize(log=False, mmin=mmin, mmax=mmax)
chabrierpowerlaw = imf.ChabrierPowerLaw()
chabrierpowerlaw.normalize(log=False, mmin=mmin, mmax=mmax)
kroupa = imf.Kroupa()
kroupa.normalize(log=False, mmin=mmin, mmax=mmax)
print("Done normalizing")
mfs = {'ChabrierPMF_IS': ChabrierPMF_IS,
'ChabrierPMF_TC': ChabrierPMF_TC,
'ChabrierPMF_CA': ChabrierPMF_CA,
'ChabrierPMF_2CTC': ChabrierPMF_2CTC,
'ChabrierIMF': chabrierpowerlaw,
'KroupaPMF_IS': KroupaPMF_IS,
'KroupaPMF_TC': KroupaPMF_TC,
'KroupaPMF_CA': KroupaPMF_CA,
'KroupaPMF_2CTC': KroupaPMF_2CTC,
'KroupaIMF': kroupa,
}
for tau in (0.1, 0.5, 1.0, 2.0):
mfs['ChabrierPMF_AcceleratingSF_IS_tau{0}'.format(tau)] = McKeeOffner_AcceleratingSF_PMF(j=0, jf=0, tau=tau, mmax=mmax)
mfs['ChabrierPMF_AcceleratingSF_TC_tau{0}'.format(tau)] = McKeeOffner_AcceleratingSF_PMF(j=0.5, jf=0.75, tau=tau, mmax=mmax)
mfs['ChabrierPMF_AcceleratingSF_CA_tau{0}'.format(tau)] = McKeeOffner_AcceleratingSF_PMF(j=2/3., jf=1.0, tau=tau, mmax=mmax)
for mf in sorted(mfs):
total = mfs[mf].m_integrate(mmin, mmax)[0]
gt10 = mfs[mf].m_integrate(10, mmax)[0]
print("Mass fraction for {1} M>10 = {0:0.3f}".format(gt10/total, mf))
| 2,880 | 36.907895 | 177 | py |
imf | imf-master/examples/chabrier_comparisons.py | """
Compare the Chabrier distribution pulled from eqn 18 of Chabrier 2003 to
that quoted in McKee & Offner 2010 as "Chabrier 2005"
"""
import imf
import numpy as np
import pylab as pl
chabrier = imf.chabrierpowerlaw
chabrier2005 = imf.ChabrierPowerLaw(lognormal_width=0.55*np.log(10),
lognormal_center=0.2,
alpha=2.35)
masses = np.geomspace(0.01, 10, 1000)
pl.figure(1)
pl.loglog(masses, chabrier(masses), label='Chabrier 2003 eqn 18')
pl.loglog(masses, chabrier2005(masses), label='Chabrier 2005 via McKee & Offner 2010')
pl.xlabel("Mass")
pl.ylabel("$\\xi \\equiv dN/dM$")
pl.legend(loc='best')
pl.savefig("Chabrier2003v2005.png")
pl.figure(2)
pl.loglog(masses, chabrier2005(masses) - chabrier(masses), label='C03-C05')
pl.loglog(masses, chabrier(masses) - chabrier2005(masses), label='C05-C03')
pl.xlabel("Mass")
pl.ylabel("$\\xi_1 - \\xi_2$")
pl.legend(loc='best')
pl.savefig("Chabrier2003v2005_diff.png")
pl.figure(3)
pl.loglog(masses, (chabrier2005(masses) - chabrier(masses))/chabrier(masses), label='(C03-C05)/C03')
pl.loglog(masses, (chabrier(masses) - chabrier2005(masses))/chabrier(masses), label='(C05-C03)/C03')
pl.xlabel("Mass")
pl.ylabel("$(\\xi_1-\\xi_2)/\\xi_1$")
pl.legend(loc='best')
pl.savefig("Chabrier2003v2005_relativediff.png")
| 1,328 | 33.973684 | 100 | py |
imf | imf-master/examples/imf_figure.py | """
Script to make an IMF diagram that shows dN(M)/dM vs M, then populates the area
under the curve with an appropriate number of stars colored by their
"true"(ish) color and sized by their mass.
"""
import imf
from imf import coolplot,kroupa,make_cluster
from astropy.table import Table
import numpy as np
if __name__ == "__main__":
import pylab as pl
pl.matplotlib.style.use('classic')
pl.rc('font',size=30)
pl.close(1)
# make three figures of dN/dM vs M, one for each mass function,
# then do it again in log-scale
for massfunc in (imf.kroupa, imf.chabrier2005, imf.salpeter):
# this is not a recommended way to get object names, don't do it in general.
# (not all classes are guaranteed to have names; I know they do in this
# case because I made and initialized the classes)
name = massfunc.__class__.__name__
pl.figure(1, figsize=(10,8))
pl.clf()
cluster,yax,colors = coolplot(1000, massfunc=massfunc)
cluster = np.array(cluster)
yax = np.array(yax)
pl.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
pl.gca().set_xscale('log')
masses = np.logspace(np.log10(cluster.min()), np.log10(cluster.max()),10000)
pl.plot(masses,np.log10(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
pl.xlabel("Stellar Mass")
pl.ylabel("log(dN(M)/dM)")
pl.gca().axis([min(cluster)/1.1,max(cluster)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.savefig("{0}_imf_figure_log.png".format(name),bbox_inches='tight', dpi=150)
pl.savefig("{0}_imf_figure_log.pdf".format(name),bbox_inches='tight')
pl.figure(2, figsize=(20,16))
pl.clf()
cluster,yax,colors = coolplot(1000, massfunc=massfunc, log=False)
cluster = np.array(cluster)
yax = np.array(yax)
pl.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
pl.gca().set_xscale('log')
masses = np.logspace(np.log10(cluster.min()), np.log10(cluster.max()),10000)
pl.plot(masses,(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
pl.xlabel("Stellar Mass")
pl.ylabel("dN(M)/dM")
pl.gca().axis([min(cluster)/1.1,max(cluster)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.savefig("{0}_imf_figure_loglinear.png".format(name),bbox_inches='tight')
pl.rc('font',size=20)
pl.figure(3, figsize=(20,16))
ax1 = pl.subplot(1,3,1)
ax1.plot(masses,(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
ax1.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
ax2 = pl.subplot(1,3,2)
ax2.plot(masses,(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
ax2.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
ax3 = pl.subplot(1,3,3)
ax3.plot(masses,(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
ax3.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
ax2.set_xlabel("Stellar Mass", fontsize=30)
ax1.set_ylabel("dN(M)/dM", fontsize=30)
ax1.axis([min(cluster)/1.1,1,min(yax)-0.2,max(yax)+0.5])
ax2.axis([1,5,min(yax)-0.2,max(yax[cluster>1])+0.5])
ax3.axis([5,max(cluster)*1.1,min(yax)-0.2,max(yax[cluster>5])+0.5])
pl.tight_layout()
pl.savefig("{0}_imf_figure_linearlinear.png".format(name),
bbox_inches='tight')
pl.rc('font',size=30)
# make one more plot, now showing a top-heavy (shallow-tail) IMF
massfunc = imf.Kroupa(p3=1.75)
name='KroupaTopHeavy'
pl.figure(1, figsize=(10,8))
pl.clf()
cluster,yax,colors = coolplot(1000, massfunc=massfunc)
pl.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
pl.gca().set_xscale('log')
masses = np.logspace(np.log10(cluster.min()), np.log10(cluster.max()),10000)
pl.plot(masses,np.log10(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
pl.xlabel("Stellar Mass")
pl.ylabel("log(dN(M)/dM)")
pl.gca().axis([min(cluster)/1.1,max(cluster)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.savefig("{0}_imf_figure_log.png".format(name),bbox_inches='tight', dpi=150)
pl.savefig("{0}_imf_figure_log.pdf".format(name),bbox_inches='tight')
# make two more plots, now showing a bottom- and a top-heavy IMF
for massfunc, name in [(imf.Salpeter(alpha=1.5), 'Alpha1p5'),
(imf.Salpeter(alpha=2), 'Alpha2p0'),
(imf.Salpeter(alpha=1), 'Alpha1p0'),
(imf.Salpeter(alpha=3), 'Alpha3p0')]:
pl.figure(1, figsize=(10,8))
pl.clf()
cluster,yax,colors = coolplot(1000, massfunc=massfunc)
pl.scatter(cluster, yax, c=colors, s=np.log10(cluster+3)*85,
linewidths=0.5, edgecolors=(0,0,0,0.25), alpha=0.95)
pl.gca().set_xscale('log')
masses = np.logspace(np.log10(cluster.min()), np.log10(cluster.max()),10000)
pl.plot(masses,np.log10(massfunc(masses)),'r--',linewidth=2,alpha=0.5)
pl.xlabel("Stellar Mass")
pl.ylabel("log(dN(M)/dM)")
pl.gca().axis([min(cluster)/1.1,max(cluster)*1.1,min(yax)-0.2,max(yax)+0.5])
pl.savefig("{0}_imf_figure_log.png".format(name),bbox_inches='tight', dpi=150)
pl.savefig("{0}_imf_figure_log.pdf".format(name),bbox_inches='tight')
| 5,721 | 44.412698 | 86 | py |
imf | imf-master/examples/pmf_comparison.py | import imf.imf, imf.pmf, imp
imp.reload(imf.imf)
imp.reload(imf.pmf)
imp.reload(imf.imf)
imp.reload(imf.pmf)
from imf.pmf import ChabrierPMF_IS, ChabrierPMF_TC, ChabrierPMF_CA, ChabrierPMF_2CTC
from imf.pmf import KroupaPMF_IS, KroupaPMF_TC, KroupaPMF_CA, KroupaPMF_2CTC
import pylab as pl
import numpy as np
pl.rc('font', size=16)
pl.rc('lines', linewidth=2)
mmin = 0.033
mmax = 3.0
for mmax in (3, 120):
# this apparently takes a very, very long time... like 20-30 minutes?
# that doesn't make a whole lot of sense, so it probably indicates a bug
# or an overly-precise convergence threshold
print("Normalizing.")
ChabrierPMF_IS.mmax = mmax
ChabrierPMF_TC.mmax = mmax
ChabrierPMF_CA.mmax = mmax
ChabrierPMF_2CTC.mmax = mmax
ChabrierPMF_IS.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_TC.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_CA.normalize(log=True, mmin=mmin, mmax=mmax)
ChabrierPMF_2CTC.normalize(log=True, mmin=mmin, mmax=mmax)
KroupaPMF_IS.mmax = mmax
KroupaPMF_TC.mmax = mmax
KroupaPMF_CA.mmax = mmax
KroupaPMF_2CTC.mmax = mmax
KroupaPMF_IS.normalize(log=True, mmin=mmin, mmax=mmax)
KroupaPMF_TC.normalize(log=True, mmin=mmin, mmax=mmax)
KroupaPMF_CA.normalize(log=True, mmin=mmin, mmax=mmax)
KroupaPMF_2CTC.normalize(log=True, mmin=mmin, mmax=mmax)
chabrier2005 = imf.Chabrier2005()
chabrier2005.normalize(log=True, mmin=mmin, mmax=mmax)
kroupa = imf.Kroupa()
kroupa.normalize(log=True, mmin=mmin, mmax=mmax)
print("Now plotting.")
masses = np.logspace(np.log10(mmin), np.log10(mmax), 100)
for mass_weighted in (True,False):
fname = 'mass_weighted' if mass_weighted else '__call__'
fig1 = pl.figure(1)
fig1.clf()
ax = fig1.gca()
ax.set_title("Steady State McKee/Offner + Chabrier PMF")
ax.loglog(masses, chabrier2005.__getattribute__(fname)(masses), label="IMF", color='k')
ax.loglog(masses, ChabrierPMF_IS.__getattribute__(fname)(masses), label="IS", color='r', linestyle=':')
ax.loglog(masses, ChabrierPMF_TC.__getattribute__(fname)(masses), label="TC", color='g', linestyle='-.')
ax.loglog(masses, ChabrierPMF_CA.__getattribute__(fname)(masses), label="CA", color='y', linestyle='-.')
ax.loglog(masses, ChabrierPMF_2CTC.__getattribute__(fname)(masses), label="2CTC", color='b', linestyle='--')
ax.set_xlabel("(Proto)Stellar Mass (M$_\odot$)")
ax.set_ylabel("m P(m)" if mass_weighted else "Normalized P(M)")
ax.axis([mmin, mmax, 1e-4, 1])
pl.legend(loc='best')
pl.savefig('steadystate_pmf_chabrier{0}_mmax{1}.png'.format("_integral" if mass_weighted else "",
int(mmax)), bbox_inches='tight')
fig2 = pl.figure(2)
fig2.clf()
ax = fig2.gca()
ax.set_title("Tapered McKee/Offner + Chabrier PMF")
ax.loglog(masses, chabrier2005.__getattribute__(fname)(masses), label="IMF", color='k')
ax.loglog(masses, ChabrierPMF_IS.__getattribute__(fname)(masses, taper=True), label="IS", color='r', linestyle=':')
ax.loglog(masses, ChabrierPMF_TC.__getattribute__(fname)(masses, taper=True), label="TC", color='g', linestyle='-.')
ax.loglog(masses, ChabrierPMF_CA.__getattribute__(fname)(masses, taper=True), label="CA", color='y', linestyle='-.')
ax.loglog(masses, ChabrierPMF_2CTC.__getattribute__(fname)(masses, taper=True), label="2CTC", color='b', linestyle='--')
ax.set_xlabel("(Proto)Stellar Mass (M$_\odot$)")
ax.set_ylabel("m P(m)" if mass_weighted else "Normalized P(M)")
ax.axis([mmin, mmax, 1e-4, 1])
pl.legend(loc='best')
pl.savefig('taperedaccretion_pmf_chabrier{0}_mmax{1}.png'.format("_integral" if mass_weighted else "",
int(mmax)), bbox_inches='tight')
fig3 = pl.figure(3)
fig3.clf()
ax3 = fig3.gca()
ax3.set_title("Steady State McKee/Offner + Kroupa PMF", pad=15)
ax3.loglog(masses, kroupa.__getattribute__(fname)(masses), label="IMF", color='k')
ax3.loglog(masses, KroupaPMF_IS.__getattribute__(fname)(masses), label="IS", color='r', linestyle=':')
ax3.loglog(masses, KroupaPMF_TC.__getattribute__(fname)(masses), label="TC", color='g', linestyle='-.')
ax3.loglog(masses, KroupaPMF_CA.__getattribute__(fname)(masses), label="CA", color='y', linestyle='-.')
ax3.loglog(masses, KroupaPMF_2CTC.__getattribute__(fname)(masses), label="2CTC", color='b', linestyle='--')
ax3.set_xlabel("(Proto)Stellar Mass $\\left(\\mathrm{M}_\odot\\right)$")
ax3.set_ylabel("m P(m)" if mass_weighted else "Normalized P(M)")
ax3.axis([mmin, mmax, 1e-4, 1])
pl.legend(loc='best')
pl.savefig('steadystate_pmf_kroupa{0}_mmax{1}.png'.format("_integral" if mass_weighted else "",
int(mmax)), bbox_inches='tight')
pl.savefig('steadystate_pmf_kroupa{0}_mmax{1}.pdf'.format("_integral" if mass_weighted else "",
int(mmax)), bbox_inches='tight')
fig4 = pl.figure(4)
fig4.clf()
ax4 = fig4.gca()
ax4.set_title("Tapered McKee/Offner + Kroupa PMF")
ax4.loglog(masses, kroupa.__getattribute__(fname)(masses), label="IMF", color='k')
ax4.loglog(masses, KroupaPMF_IS.__getattribute__(fname)(masses, taper=True), label="IS", color='r', linestyle=':')
ax4.loglog(masses, KroupaPMF_TC.__getattribute__(fname)(masses, taper=True), label="TC", color='g', linestyle='-.')
ax4.loglog(masses, KroupaPMF_CA.__getattribute__(fname)(masses, taper=True), label="CA", color='y', linestyle='-.')
ax4.loglog(masses, KroupaPMF_2CTC.__getattribute__(fname)(masses, taper=True), label="2CTC", color='b', linestyle='--')
ax4.set_xlabel("(Proto)Stellar Mass (M$_\odot$)")
ax4.set_ylabel("m P(m)" if mass_weighted else "Normalized P(M)")
ax4.axis([mmin, mmax, 1e-4, 1])
pl.legend(loc='best')
pl.savefig('taperedaccretion_pmf_kroupa{0}_mmax{1}.png'.format("_integral" if mass_weighted else "",
int(mmax)), bbox_inches='tight')
| 6,437 | 50.504 | 128 | py |
imf | imf-master/examples/hr_diagram.py | """
Create a Hertzprung-Russell (temperature-luminosity) diagram and
mass-luminosity and mass-temperature diagrams populating the main sequence only
with data from Ekström+ 2012 (Vizier catalog J/A+A/537/A146/iso).
Colors come from vendian.org.
"""
import numpy as np
import imf
import pylab as pl
from astroquery.vizier import Vizier
from labellines import labelLine, labelLines
# pip install matplotlib-label-lines
pl.rcParams['font.size'] = 20
Vizier.ROW_LIMIT=1e7
tbl = Vizier.get_catalogs('J/A+A/537/A146/iso')[0]
agemass = {}
agelum = {}
for age in np.unique(tbl['logAge']):
agemass[age] = tbl[tbl['logAge']==age]['Mass'].max()
agelum[age] = tbl[tbl['logAge']==age]['logL'].max()
# mass-temperature diagram
pl.figure(2, figsize=(8,8)).clf()
# select a specific population age to plot - this is the youngest, with all
# stars alive and main-sequence
ok = tbl['logAge'] == 6.5
subtbl = tbl[ok]
subtbl.sort('Mass')
lowmass = subtbl[subtbl['Mass'] < 2]
# downsample the high-mass part of the sample to avoid point overlap
subtbl = subtbl[(subtbl['Mass'] < 25) & (subtbl['logTe']<4.6)]
highmass = subtbl[(subtbl['Mass'] > 20) & (subtbl['logTe']<4.6)]
subtbl=subtbl[::20]
# fill in (oversample) the low-mass portion
Lfit = np.polyfit(np.log10(lowmass['Mass']), lowmass['logL'], 1)
# the fit is not exactly right; there's a break at 0.43ish, but it's good enough
masses = np.logspace(np.log10(0.1), np.log10(0.8))
lums = np.poly1d(Lfit)(np.log10(masses))
# this was an old by-hand fit
#lums[masses<(0.43)] = np.log10(0.23*(masses[masses<(0.43)])**2.3)
Tfit = np.polyfit(np.log10(lowmass['Mass']), lowmass['logTe'], 1)
tems = np.poly1d(Tfit)(np.log10(masses))
hmasses = np.logspace(np.log10(25), np.log10(60),5)
Lfit = np.polyfit(np.log10(highmass['Mass']), highmass['logL'], 1)
hlums = np.poly1d(Lfit)(np.log10(hmasses))
#lums[masses<(0.43)] = np.log10(0.23*(masses[masses<(0.43)])**2.3)
Tfit = np.polyfit(np.log10(highmass['Mass']), highmass['logTe'], 1)
htems = np.poly1d(Tfit)(np.log10(hmasses))
# obtain the colors from the vendian-based table
colors = [imf.color_from_mass(m) for m in subtbl['Mass']]
# start plotting
pl.gca().set_yscale('log')
pl.scatter(10**subtbl['logTe'],
subtbl['Mass'],
c=colors,
s=(10**subtbl['logL'])**0.25*45)
colors = [imf.color_from_mass(m) for m in masses]
pl.scatter(10**tems,
masses,
c=colors,
s=(10**lums)**0.25*45)
colors = [imf.color_from_mass(m) for m in hmasses]
pl.scatter(10**htems,
hmasses,
c=colors,
s=(10**hlums)**0.25*45)
# overlay star age horizontal lines
lines = []
for age in (6.5, 7, 8, 10):
L, = pl.plot([10**tems.min(), (10**htems.max())],
[agemass[age]]*2, linestyle='--', color='k',
label="$10^{{{0}}}$ yr".format(age))
lines.append(L)
labelLines(lines)
pl.xlabel("Temperature")
pl.ylabel("Mass")
pl.tight_layout()
pl.savefig("tem_lum_diagram.svg")#, bbox_inches='tight')
# HR diagram (temperature-luminosity)
pl.figure(3, figsize=(8,8)).clf()
colors = [imf.color_from_mass(m) for m in subtbl['Mass']]
#pl.gca().set_xscale('log')
pl.gca().set_yscale('log')
pl.scatter(10**subtbl['logTe'],
10**subtbl['logL'],
c=colors,
s=(subtbl['Mass'])*5)
colors = [imf.color_from_mass(m) for m in masses]
pl.scatter(10**tems,
10**lums,
c=colors,
s=masses*5)
colors = [imf.color_from_mass(m) for m in hmasses]
pl.scatter(10**htems,
10**hlums,
c=colors,
s=hmasses*5)
# I attempted to add age lines, but this approach is incorrect
#suns live 5 Gyr, not 10+ Gyr
# lines = []
# for age in (6.5, 7, 8, 10):
# L, = pl.plot([10**tems.min(), (10**htems.max())],
# [10**agelum[age]]*2,
# linestyle='--', color='k',
# label="$10^{{{0}}}$ yr".format(age))
# lines.append(L)
labelLines(lines)
pl.xlabel("Temperature")
pl.ylabel("Luminosity")
pl.tight_layout()
pl.savefig("HR_diagram.svg")#, bbox_inches='tight')
# mass-luminosity diagram
pl.figure(4, figsize=(8,8)).clf()
colors = [imf.color_from_mass(m) for m in subtbl['Mass']]
#pl.gca().set_xscale('log')
pl.gca().set_yscale('log')
pl.scatter(subtbl['Mass'],
10**subtbl['logL'],
c=colors,
edgecolors='none',
s=10**subtbl['logTe']/100)
colors = [imf.color_from_mass(m) for m in masses]
pl.scatter(masses,
10**lums,
c=colors,
edgecolors='none',
s=10**tems/100)
colors = [imf.color_from_mass(m) for m in hmasses]
pl.scatter(hmasses,
10**hlums,
c=colors,
edgecolors='none',
s=10**htems/100)
#lines = []
#for age in (6.5, 7, 8, 10):
# L, = pl.plot([masses.min(), hmasses.max()],
# [10**agelum[age]]*2,
# linestyle='--', color='k',
# label="$10^{{{0}}}$ yr".format(age))
# lines.append(L)
labelLines(lines)
pl.xlabel("Mass")
pl.ylabel("Luminosity")
pl.tight_layout()
pl.savefig("mass_luminosity.svg")#, bbox_inches='tight')
pl.loglog()
pl.savefig("mass_lum_diagram_loglog.svg")#, bbox_inches='tight')
pl.savefig("mass_lum_diagram_loglog.png", bbox_inches='tight')
# from Zinnecker & Yorke fig 1
pl.plot(np.logspace(0,1),
np.logspace(0,1)**3.7,
'k--')
pl.plot(np.logspace(1,2),
np.logspace(1,2)**1.6 * 1e6/100**1.6,
'k--')
pl.savefig("mass_lum_diagram_loglog_ZinnPlots.svg")#, bbox_inches='tight')
pl.savefig("mass_lum_diagram_loglog_ZinnPlots.png", bbox_inches='tight')
| 5,605 | 28.197917 | 80 | py |
imf | imf-master/imf/imf.py | """
Various codes to work with the initial mass function
"""
from __future__ import print_function
import numpy as np
import types
import scipy.integrate
import scipy.integrate as integrate
from scipy.integrate import quad
from astropy import units as u
from . import distributions
class MassFunction(object):
"""
Generic Mass Function class
(this is mostly meant to be subclassed by other functions, not used itself)
"""
def __init__(self, mmin=None, mmax=None):
self._mmin = self.default_mmin if mmin is None else mmin
self._mmax = self.default_mmax if mmax is None else mmax
def dndm(self, m, **kwargs):
"""
The differential form of the mass function, d N(M) / dM
"""
return self(m, integral_form=False, **kwargs)
def n_of_m(self, m, **kwargs):
"""
The integral form of the mass function, N(M)
"""
return self(m, integral_form=True, **kwargs)
def mass_weighted(self, m, **kwargs):
return self(m, integral_form=False, **kwargs) * m
def integrate(self, mlow, mhigh, **kwargs):
"""
Integrate the mass function over some range
"""
return scipy.integrate.quad(self, mlow, mhigh)
def m_integrate(self, mlow, mhigh, **kwargs):
"""
Integrate the mass-weighted mass function over some range (this tells
you the fraction of mass in the specified range)
"""
return scipy.integrate.quad(self.mass_weighted, mlow, mhigh, **kwargs)
def log_integrate(self, mlow, mhigh, **kwargs):
def logform(x):
return self(x) / x
return scipy.integrate.quad(logform, mlow, mhigh, **kwargs)
def normalize(self, mmin=None, mmax=None, log=False, **kwargs):
"""
Set self.normfactor such that the integral of the function over the
range (mmin, mmax) = 1
"""
if mmin is None:
mmin = self.mmin
if mmax is None:
mmax = self.mmax
self.normfactor = 1
if log:
integral = self.log_integrate(mmin, mmax, **kwargs)
else:
integral = self.integrate(mmin, mmax, **kwargs)
self.normfactor = 1. / integral[0]
assert self.normfactor > 0
@property
def mmin(self):
return self._mmin
@property
def mmax(self):
return self._mmax
class Salpeter(MassFunction):
default_mmin = 0.3
default_mmax = 120
def __init__(self, alpha=2.35, mmin=default_mmin, mmax=default_mmax):
"""
Create a default Salpeter mass function, i.e. a power-law mass function
the Salpeter 1955 IMF: dn/dm ~ m^-2.35
"""
super().__init__(mmin=mmin, mmax=mmax)
self.alpha = alpha
self.normfactor = 1
self.distr = distributions.PowerLaw(-self.alpha, self.mmin, self.mmax)
def __call__(self, m, integral_form=False):
if not integral_form:
return self.distr.pdf(m) * self.normfactor
else:
return self.distr.cdf(m) * self.normfactor
class Kroupa(MassFunction):
# kroupa = BrokenPowerLaw(breaks={0.08: -0.3, 0.5: 1.3, 'last': 2.3}, mmin=0.03, mmax=120)
default_mmin = 0.03
default_mmax = 120
def __init__(self,
mmin=default_mmin,
mmax=default_mmax,
p1=0.3,
p2=1.3,
p3=2.3,
break1=0.08,
break2=0.5):
"""
The Kroupa IMF with two power-law breaks, p1 and p2. See __call__ for
details.
"""
super().__init__(mmin=mmin, mmax=mmax)
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.break1 = break1
self.break2 = break2
self.distr = distributions.BrokenPowerLaw([-p1, -p2, -p3],
[self.mmin, break1, break2, self.mmax])
self.normfactor = 1
def __call__(self, m, integral_form=False):
"""
Kroupa 2001 IMF (http://arxiv.org/abs/astro-ph/0009005,
http://adsabs.harvard.edu/abs/2001MNRAS.322..231K) eqn 2
Parameters
----------
m: float array
The mass at which to evaluate the function (Msun)
p1, p2, p3: floats
The power-law slopes of the different segments of the IMF
break1, break2: floats
The mass breakpoints at which to use the different power laws
"""
if integral_form:
return self.normfactor * self.distr.cdf(m)
else:
return self.normfactor * self.distr.pdf(m)
def integrate(self, mlow, mhigh, numerical=False):
"""
Integrate the mass function over some range
"""
if mhigh <= mlow:
raise ValueError("Must have mlow < mhigh in integral")
if numerical:
return super(Kroupa, self).integrate(mlow, mhigh)
return (self.distr.cdf(mhigh) -
self.distr.cdf(mlow)) * self.normfactor, 0
def m_integrate(self, mlow, mhigh, numerical=False, **kwargs):
"""
Integrate the mass function over some range
"""
if mhigh <= mlow:
raise ValueError("Must have mlow < mhigh in integral")
if numerical:
return super(Kroupa, self).m_integrate(mlow, mhigh, **kwargs)
else:
distr1 = distributions.BrokenPowerLaw(
[-self.p1 + 1, -self.p2 + 1, -self.p3 + 1],
[self.mmin, self.break1, self.break2, self.mmax])
ratio = distr1.pdf(self.break1) / self.distr.pdf(
self.break1) / self.break1
return ((distr1.cdf(mhigh) - distr1.cdf(mlow)) / ratio, 0)
class ChabrierLogNormal(MassFunction):
"""
Eqn 18 of https://ui.adsabs.harvard.edu/abs/2003PASP..115..763C/abstract
is eqn3 of https://ui.adsabs.harvard.edu/abs/2003ApJ...586L.133C/abstract
\\xi = 0.086 exp (-(log m - log 0.22)^2 / (2 * 0.57**2))
This function is a pure lognormal; see ChabrierPowerLaw for the version
with a power-law extension to high mass
Parameters
----------
lognormal_center : float
lognormal_width : float
mmin : float
mmax : float
leading_constant : float
"""
default_mmin = 0
default_mmax = np.inf
def __init__(self, mmin=default_mmin, mmax=default_mmax,
lognormal_center=0.22,
lognormal_width=0.57*np.log(10),
leading_constant=0.086):
super().__init__(mmin=mmin, mmax=mmax)
self.multiplier = leading_constant
self.lognormal_width = lognormal_width
self.distr = distributions.TruncatedLogNormal(mu=lognormal_center,
sig=self.lognormal_width,
m1=self.mmin,
m2=self.mmax)
def __call__(self, mass, integral_form=False, **kw):
if integral_form:
return self.distr.cdf(mass) * self.multiplier
else:
return self.distr.pdf(mass) * self.multiplier
class ChabrierPowerLaw(MassFunction):
default_mmin = 0
default_mmax = np.inf
def __init__(self,
lognormal_center=0.22,
lognormal_width=0.57*np.log(10),
mmin=default_mmin,
mmax=default_mmax,
alpha=2.3,
mmid=1):
"""
From Equation 18 of Chabrier 2003
https://ui.adsabs.harvard.edu/abs/2003PASP..115..763C/abstract
Parameters
----------
lognormal_center : float
lognormal_width : float
The lognormal width. Scipy.stats.lognorm uses log_n,
so we need to scale this b/c Chabrier uses log_10
mmin : float
mmax : float
alpha : float
The high-mass power-law slope
mmid : float
The mass to transition from lognormal to power-law
Notes
-----
A previous version of this function used sigma=0.55,
center=0.2, and alpha=2.35, which come from McKee & Offner 2010
(https://ui.adsabs.harvard.edu/abs/2010ApJ...716..167M/abstract)
but those exact numbers don't appear in Chabrier 2005
"""
# The numbers are from Eqn 3 of
# https://ui.adsabs.harvard.edu/abs/2005ASSL..327...41C/abstract
# There is no equation 3 in that paper, though?
# importantly the lognormal center is the exp(M) where M is the mean of ln(mass)
# normal distribution
super().__init__(mmin=mmin, mmax=mmax)
self._mmid = mmid
if self.mmax <= self._mmid:
raise ValueError("The Chabrier Mass Function does not support "
"mmax <= mmid")
self._alpha = alpha
self._lognormal_width = lognormal_width
self._lognormal_center = lognormal_center
self.distr = distributions.CompositeDistribution([
distributions.TruncatedLogNormal(self._lognormal_center,
self._lognormal_width,
self.mmin,
self._mmid),
distributions.PowerLaw(-self._alpha, self._mmid, self.mmax)
])
def __call__(self, x, integral_form=False, **kw):
if integral_form:
return self.distr.cdf(x)
else:
return self.distr.pdf(x)
class Schechter(MassFunction):
default_mmin = 0
default_mmax = np.inf
def __init__(self, mmin=default_mmin, mmax=default_mmax):
raise NotImplementedError("Schechter function needs to be refactored")
super().__init__(mmin=mmin, mmax=mmax)
def __call__(self, m, A=1, beta=2, m0=100, integral_form=False):
"""
A Schechter function with arbitrary defaults
(integral may not be correct - exponent hasn't been dealt with at all)
(TODO: this should be replaced with a Truncated Power Law Distribution)
$$ A m^{-\\beta} e^{-m/m_0} $$
Parameters
----------
m: np.ndarray
List of masses for which to compute the Schechter function
A: float
Arbitrary amplitude of the Schechter function
beta: float
Power law exponent
m0: float
Characteristic mass (mass at which exponential decay takes over)
Returns
-------
p(m) - the (unnormalized) probability of an object of a given mass
as a function of that object's mass
(though you could interpret mass as anything, it's just a number)
"""
if integral_form:
beta -= 1
return A * m**-beta * np.exp(-m / m0) * (m > self.mmin) * (m < self.mmax)
class ModifiedSchecter(Schechter):
default_mmin = 0
default_mmax = np.inf
def __init__(self, mmin=default_mmin, mmax=default_mmax):
self.schechter = super().__init__(mmin=mmin, mmax=mmax)
def __call__(self, m, m1, **kwargs):
"""
A Schechter function with a low-level exponential cutoff
(TODO: this should be replaced with a Truncated Power Law Distribution)
Parameters
----------
m: np.ndarray
List of masses for which to compute the Schechter function
m1: float
Characteristic minimum mass (exponential decay below this mass)
** See schecter for other parameters **
Returns
-------
p(m) - the (unnormalized) probability of an object of a given mass
as a function of that object's mass
(though you could interpret mass as anything, it's just a number)
"""
return self.schechter(m, **kwargs) * np.exp(-m1 / m) * (m > self.mmin) * (m < self.mmax)
try:
import scipy
def schechter_cdf(m, A=1, beta=2, m0=100, mmin=10, mmax=None, npts=1e4):
"""
Return the CDF value of a given mass for a set mmin, mmax
mmax will default to 10 m0 if not specified
Analytic integral of the Schechter function:
http://www.wolframalpha.com/input/?i=integral%28x^-a+exp%28-x%2Fm%29+dx%29
"""
if mmax is None:
mmax = 10 * m0
# integrate the CDF from the minimum to maximum
posint = -mmax**(1 - beta) * scipy.special.expn(beta, mmax / m0)
negint = -mmin**(1 - beta) * scipy.special.expn(beta, mmin / m0)
tot = posint - negint
# normalize by the integral
ret = (-m**(1 - beta) * scipy.special.expn(beta, m / m0) -
negint) / tot
return ret
def sh_cdf_func(**kwargs):
return lambda x: schechter_cdf(x, **kwargs)
except ImportError:
pass
# these are global objects
salpeter = Salpeter()
kroupa = Kroupa()
lognormal = chabrierlognormal = ChabrierLogNormal()
chabrier = chabrierpowerlaw = ChabrierPowerLaw()
chabrier2005 = ChabrierPowerLaw(lognormal_width=0.55*np.log(10),
lognormal_center=0.2, alpha=2.35)
massfunctions = {'kroupa': Kroupa, 'salpeter': Salpeter,
'chabrierlognormal': ChabrierLogNormal,
'chabrierpowerlaw': ChabrierPowerLaw,
'chabrier': ChabrierPowerLaw,
}
# 'schechter': Schechter, 'modified_schechter': ModifiedSchecter}
reverse_mf_dict = {v: k for k, v in massfunctions.items()}
# salpeter and schechter selections are arbitrary
expectedmass_cache = {}
def get_massfunc(massfunc, mmin=None, mmax=None, **kwargs):
if isinstance(massfunc, MassFunction):
if mmax is not None and massfunc.mmax != mmax:
raise ValueError("mmax was specified, but a massfunction instance"
" was specified with a different mmax")
if mmin is not None and massfunc.mmin != mmin:
raise ValueError("mmin was specified, but a massfunction instance"
" was specified with a different mmin")
return massfunc
elif massfunc in massfunctions.values():
# if the massfunction is a known MassFunc class
return massfunc(mmin=mmin, mmax=mmax, **kwargs)
elif massfunc in massfunctions:
# if the massfunction is the _name_ of a massfunc class
return massfunctions[massfunc](mmin=mmin, mmax=mmax, **kwargs)
else:
raise ValueError("massfunc must either be a string in the set %s or a MassFunction instance"
% (", ".join(massfunctions.keys())))
def get_massfunc_name(massfunc):
if massfunc in reverse_mf_dict:
return reverse_mf_dict[massfunc]
elif type(massfunc) is str:
return massfunc
elif hasattr(massfunc, '__name__'):
return massfunc.__name__
else:
raise ValueError("invalid mass function")
def m_integrate(fn=kroupa, bins=np.logspace(-2, 2, 500)):
xax = (bins[:-1] + bins[1:]) / 2.
integral = xax * (bins[1:] - bins[:-1]) * (fn(bins[:-1]) +
fn(bins[1:])) / 2.
return xax, integral
def cumint(fn=kroupa, bins=np.logspace(-2, 2, 500)):
xax, integral = integrate(fn, bins)
return integral.cumsum() / integral.sum()
def m_cumint(fn=kroupa, bins=np.logspace(-2, 2, 500)):
xax, integral = m_integrate(fn, bins)
return integral.cumsum() / integral.sum()
def inverse_imf(p,
mmin=None,
mmax=None,
massfunc='kroupa',
**kwargs):
"""
Inverse mass function. Given a likelihood value in the range [0, 1),
return the appropriate mass. This just calls the mass function's ppdf
under the hood.
Parameters
----------
p: np.array
An array of floats in the range [0, 1). These should be uniformly random
numbers.
mmin: float
mmax: float
Minimum and maximum stellar mass in the distribution
massfunc: string or function
massfunc can be 'kroupa', 'chabrier', 'salpeter', 'schechter', or a
function
"""
mfc = get_massfunc(massfunc, mmin=mmin, mmax=mmax)
# this should be the entirety of "inverse-imf". The rest is a hack
if hasattr(mfc, 'distr'):
return mfc.distr.ppf(p)
else:
raise NotImplementedError
def make_cluster(mcluster,
massfunc='kroupa',
verbose=False,
silent=False,
tolerance=0.0,
stop_criterion='nearest',
sampling='random',
mmax=None,
mmin=None,
**kwargs):
"""
Sample from an IMF to make a cluster. Returns the masses of all stars in the cluster
Parameters
==========
mcluster : float
The target cluster mass.
massfunc : string or MassFunction
A mass function to use.
tolerance : float
tolerance is how close the cluster mass must be to the requested mass.
It can be zero, but this does not guarantee that the final cluster mass will be
exactly `mcluster`
stop_criterion : 'nearest', 'before', 'after', 'sorted'
The criterion to stop sampling when the total cluster mass is reached.
See, e.g., Krumholz et al 2015: https://ui.adsabs.harvard.edu/abs/2015MNRAS.452.1447K/abstract
sampling: 'random' or 'optimal'
Optimal sampling is based on https://ui.adsabs.harvard.edu/abs/2015A%26A...582A..93S/abstract
(though as of April 23, 2021, it is not yet correct)
Optimal sampling is only to be used in the context of a variable M_max
that is a function of the cluster mass, e.g., eqn 24 of Schulz+ 2015.
"""
# use most common mass to guess needed number of samples
# nsamp = mcluster / mostcommonmass[get_massfunc_name(massfunc)]
# masses = inverse_imf(np.random.random(int(nsamp)), massfunc=massfunc, **kwargs)
# mtot = masses.sum()
# if verbose:
# print(("%i samples yielded a cluster mass of %g (%g requested)" %
# (nsamp, mtot, mcluster)))
mcluster = u.Quantity(mcluster, u.M_sun).value
mfc = get_massfunc(massfunc, mmin=mmin, mmax=mmax, **kwargs)
if (massfunc, mfc.mmin, mfc.mmax) in expectedmass_cache:
expected_mass = expectedmass_cache[(massfunc, mfc.mmin, mfc.mmax)]
assert expected_mass > 0
else:
expected_mass = mfc.m_integrate(mfc.mmin, mfc.mmax)[0]
assert expected_mass > 0
expectedmass_cache[(massfunc, mfc.mmin, mfc.mmax)] = expected_mass
if verbose:
print("Expected mass is {0:0.3f}".format(expected_mass))
if sampling == 'optimal':
# this is probably not _quite_ right, but it's a first step...
p = np.linspace(0, 1, int(mcluster/expected_mass))
return mfc.distr.ppf(p)
elif sampling != 'random':
raise ValueError("Only random sampling and optimal sampling are supported")
mtot = 0
masses = []
while mtot < mcluster + tolerance:
# at least 1 sample, but potentially many more
nsamp = int(np.ceil((mcluster + tolerance - mtot) / expected_mass))
assert nsamp > 0
newmasses = mfc.distr.rvs(nsamp)
masses = np.concatenate([masses, newmasses])
mtot = masses.sum()
if verbose:
print("Sampled %i new stars. Total is now %g" %
(int(nsamp), mtot))
if mtot >= mcluster + tolerance: # don't force exact equality; that would yield infinite loop
mcum = masses.cumsum()
if stop_criterion == 'sorted':
masses = np.sort(masses)
if np.abs(masses[:-1].sum() - mcluster) < np.abs(masses.sum() -
mcluster):
# if the most massive star makes the cluster a worse fit, reject it
# (this follows Krumholz+ 2015 appendix A1)
last_ind = len(masses) - 1
else:
last_ind = len(masses)
else:
if stop_criterion == 'nearest':
# find the closest one, and use +1 to include it
last_ind = np.argmin(np.abs(mcum - mcluster)) + 1
elif stop_criterion == 'before':
last_ind = np.argmax(mcum > mcluster)
elif stop_criterion == 'after':
last_ind = np.argmax(mcum > mcluster) + 1
masses = masses[:last_ind]
mtot = masses.sum()
if verbose:
print(
"Selected the first %i out of %i masses to get %g total" %
(last_ind, len(mcum), mtot))
# force the break, because some stopping criteria can push mtot < mcluster
break
if not silent:
print("Total cluster mass is %g (limit was %g)" % (mtot, mcluster))
return masses
mass_luminosity_interpolator_cache = {}
def mass_luminosity_interpolator(name):
if name in mass_luminosity_interpolator_cache:
return mass_luminosity_interpolator_cache[name]
elif name == 'VGS':
# non-extrapolated
vgsMass = [
51.3, 44.2, 41.0, 38.1, 35.5, 33.1, 30.8, 28.8, 26.9, 25.1, 23.6,
22.1, 20.8, 19.5, 18.4
]
vgslogL = [
6.154, 6.046, 5.991, 5.934, 5.876, 5.817, 5.756, 5.695, 5.631,
5.566, 5.499, 5.431, 5.360, 5.287, 5.211
]
vgslogQ = [
49.18, 48.99, 48.90, 48.81, 48.72, 48.61, 48.49, 48.34, 48.16,
47.92, 47.63, 47.25, 46.77, 46.23, 45.69
]
# mass extrapolated
vgsMe = np.concatenate([
np.linspace(0.03, 0.43, 100),
np.linspace(0.43, 2, 100),
np.linspace(2, 20, 100), vgsMass[::-1],
np.linspace(50, 150, 100)
])
# log luminosity extrapolated
vgslogLe = np.concatenate([
np.log10(0.23 * np.linspace(0.03, 0.43, 100)**2.3),
np.log10(np.linspace(0.43, 2, 100)**4),
np.log10(1.5 * np.linspace(2, 20, 100)**3.5), vgslogL[::-1],
np.polyval(np.polyfit(np.log10(vgsMass[:3]), vgslogL[:3], 1),
np.log10(np.linspace(50, 150, 100)))
])
# log Q (lyman continuum) extrapolated
vgslogQe = np.concatenate([
np.zeros(100), # 0.03-0.43 solar mass stars produce 0 LyC photons
np.zeros(100), # 0.43-2.0 solar mass stars produce 0 LyC photons
np.polyval(np.polyfit(np.log10(vgsMass[-3:]), vgslogQ[-3:], 1),
np.log10(np.linspace(8, 18.4, 100))),
vgslogQ[::-1],
np.polyval(np.polyfit(np.log10(vgsMass[:3]), vgslogQ[:3], 1),
np.log10(np.linspace(50, 150, 100)))
])
mass_luminosity_interpolator_cache[name] = vgsMe, vgslogLe, vgslogQe
return mass_luminosity_interpolator_cache[name]
elif name == 'Ekstrom':
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = 1e7 # effectively infinite
# this query should cache
tbl = Vizier.get_catalogs('J/A+A/537/A146/iso')[0]
match = tbl['logAge'] == 6.5
masses = tbl['Mass'][match]
lums = tbl['logL'][match]
mass_0 = 0.033
lum_0 = np.log10((mass_0 / masses[0])**3.5 * 10**lums[0])
mass_f = 200 # extrapolate to 200 Msun...
lum_f = np.log10(10**lums[-1] * (mass_f / masses[-1])**1.35)
masses = np.array([mass_0] + masses.tolist() + [mass_f])
lums = np.array([lum_0] + lums.tolist() + [lum_f])
# TODO: come up with a half-decent approximation here? based on logTe?
logQ = lums - 0.5
mass_luminosity_interpolator_cache[name] = masses, lums, logQ
return mass_luminosity_interpolator_cache[name]
else:
raise ValueError("Bad grid name {0}".format(name))
def lum_of_star(mass, grid='Ekstrom'):
"""
Determine total luminosity of a star given its mass
Two grids:
(1) VGS:
Uses the Vacca, Garmany, Shull 1996 Table 5 Log Q and Mspec parameters
returns LogL in solar luminosities
**WARNING** Extrapolates for M not in [18.4, 50] msun
http://en.wikipedia.org/wiki/Mass%E2%80%93luminosity_relation
(2) Ekstrom 2012:
Covers 0.8 - 64 Msun, extrapolated out of that
"""
masses, lums, _ = mass_luminosity_interpolator(grid)
return np.interp(mass, masses, lums)
def lum_of_cluster(masses, grid='Ekstrom'):
"""
Determine the log of the integrated luminosity of a cluster
Only M>=8msun count
masses is a list or array of masses.
"""
#if max(masses) < 8: return 0
logL = lum_of_star(masses, grid=grid) #[masses >= 8])
logLtot = np.log10((10**logL).sum())
return logLtot
def lyc_of_star(mass, grid='VGS'):
"""
Determine lyman continuum luminosity of a star given its mass
Uses the Vacca, Garmany, Shull 1996 Table 5 Log Q and Mspec parameters
returns LogQ
"""
masses, _, logQ = mass_luminosity_interpolator(grid)
return np.interp(mass, masses, logQ)
def lyc_of_cluster(masses, grid='VGS'):
"""
Determine the log of the integrated lyman continuum luminosity of a cluster
Only M>=8msun count
masses is a list or array of masses.
"""
if max(masses) < 8:
return 0
logq = lyc_of_star(masses[masses >= 8], grid=grid)
logqtot = np.log10((10**logq).sum())
return logqtot
def color_from_mass(mass, outtype=float):
"""
Use vendian.org colors:
100 O2(V) 150 175 255 #9db4ff
50 O5(V) 157 180 255 #9db4ff
20 B1(V) 162 185 255 #a2b9ff
10 B3(V) 167 188 255 #a7bcff
8 B5(V) 170 191 255 #aabfff
6 B8(V) 175 195 255 #afc3ff
2.2 A1(V) 186 204 255 #baccff
2.0 A3(V) 192 209 255 #c0d1ff
1.86 A5(V) 202 216 255 #cad8ff
1.6 F0(V) 228 232 255 #e4e8ff
1.5 F2(V) 237 238 255 #edeeff
1.3 F5(V) 251 248 255 #fbf8ff
1.2 F8(V) 255 249 249 #fff9f9
1 G2(V) 255 245 236 #fff5ec
0.95 G5(V) 255 244 232 #fff4e8
0.90 G8(V) 255 241 223 #fff1df
0.85 K0(V) 255 235 209 #ffebd1
0.70 K4(V) 255 215 174 #ffd7ae
0.60 K7(V) 255 198 144 #ffc690
0.50 M2(V) 255 190 127 #ffbe7f
0.40 M4(V) 255 187 123 #ffbb7b
0.35 M6(V) 255 187 123 #ffbb7b
0.30 M8(V) 255 167 123 #ffbb7b # my addition
"""
mcolor = { # noqa: E131
100: (150, 175, 255),
50: (157, 180, 255),
20: (162, 185, 255),
10: (167, 188, 255),
8: (170, 191, 255),
6: (175, 195, 255),
2.2: (186, 204, 255),
2.0: (192, 209, 255),
1.86: (202, 216, 255),
1.6: (228, 232, 255),
1.5: (237, 238, 255),
1.3: (251, 248, 255),
1.2: (255, 249, 249),
1: (255, 245, 236),
0.95: (255, 244, 232),
0.90: (255, 241, 223),
0.85: (255, 235, 209),
0.70: (255, 215, 174),
0.60: (255, 198, 144),
0.50: (255, 190, 127),
0.40: (255, 187, 123),
0.35: (255, 187, 123),
0.30: (255, 177, 113),
0.20: (255, 107, 63),
0.10: (155, 57, 33),
0.10: (155, 57, 33),
0.003: (105, 27, 0),
}
keys = sorted(mcolor.keys())
reds, greens, blues = zip(*[mcolor[k] for k in keys])
r = np.interp(mass, keys, reds)
g = np.interp(mass, keys, greens)
b = np.interp(mass, keys, blues)
if outtype == int:
return (r, g, b)
elif outtype == float:
return (r / 255., g / 255., b / 255.)
else:
raise NotImplementedError
def color_of_cluster(cluster, colorfunc=color_from_mass):
colors = np.array([colorfunc(m) for m in cluster])
luminosities = 10**np.array([lum_of_star(m) for m in cluster])
mean_color = (colors *
luminosities[:, None]).sum(axis=0) / luminosities.sum()
return mean_color
def coolplot(clustermass, massfunc=kroupa, log=True, **kwargs):
"""
"cool plot" is just because the plot is kinda neat.
This function creates a cluster using `make_cluster`, assigns each star a
color based on the vendian.org colors using `color_from_mass`, and assigns
each star a random Y-value distributed underneath the specified mass
function's curve.
Parameters
----------
clustermass: float
The mass of the cluster in solar masses
massfunc: str
A MassFunction instance
log: bool
Is the Y-axis log-scaled?
Returns
-------
cluster: array
The array of stellar masses that makes up the cluster
yax: array
The array of Y-values associated with the stellar masses
colors: list
A list of color tuples associated with each star
"""
cluster = make_cluster(clustermass,
massfunc=massfunc,
mmax=massfunc.mmax,
**kwargs)
colors = [color_from_mass(m) for m in cluster]
maxmass = cluster.max()
pmin = massfunc(maxmass)
if log:
yax = [
np.random.rand() * (np.log10(massfunc(m)) - np.log10(pmin)) +
np.log10(pmin) for m in cluster
]
else:
yax = [
np.random.rand() * ((massfunc(m)) / (pmin)) + (pmin)
for m in cluster
]
assert all(np.isfinite(yax))
return cluster, yax, colors
# import pylab as pl
# pl.scatter(cluster, yax, c=colors, s=np.log10(cluster)*5)
class KoenConvolvedPowerLaw(MassFunction):
"""
Implementaton of convolved errror power-law described in 2009 Koen, Kondlo
paper, Fitting power-law distributions to data with measurement errors.
Equations (3) and (5)
Parameters
----------
m: float
The mass at which to evaluate the function
mmin, mmax: floats
The upper and lower bounds for the power law distribution
gamma: floats
The specified gamma for the distribution, slope = -gamma - 1
sigma: float or None
specified spread of error, assumes Normal distribution with mean 0 and variance sigma.
"""
default_mmin = 0
default_mmax = np.inf
def __init__(self, mmin, mmax, gamma, sigma):
super().__init__(mmin, mmax)
self.sigma = sigma
self.gamma = gamma
def __call__(self, m, integral_form=False):
m = np.asarray(m)
if self.mmax < self.mmin:
raise ValueError("mmax must be greater than mmin")
if integral_form:
# Returns
# -------
# Probability that m < x for the given CDF with specified
# mmin, mmax, sigma, and gamma
def error(t):
return np.exp(-(t**2) / 2)
error_coeffecient = 1 / np.sqrt(2 * np.pi)
def error_integral(y):
error_integral = quad(error, -np.inf,
(y - self.mmax) / self.sigma)[0]
return error_integral
vector_errorintegral = np.vectorize(error_integral)
phi = vector_errorintegral(m) * error_coeffecient
def integrand(x, y):
return ((self.mmin**-self.gamma - x**-self.gamma) * np.exp(
(-1 / 2) * ((y - x) / self.sigma)**2))
coef = (1 / (self.sigma * np.sqrt(2 * np.pi) *
(self.mmin**-self.gamma - self.mmax**-self.gamma)))
def eval_integral(y):
integral = quad(integrand, self.mmin, self.mmax, args=(y))[0]
return integral
vector_integral = np.vectorize(eval_integral)
probability = phi + coef * vector_integral(m)
return probability
else:
# Returns
# ------
# Probability of getting x given the PDF with specified mmin, mmax, sigma, and gamma
def integrand(x, y):
return (x**-(self.gamma + 1)) * np.exp(-.5 * (
(y - x) / self.sigma)**2)
coef = (self.gamma / ((self.sigma * np.sqrt(2 * np.pi)) *
((self.mmin**-self.gamma) -
(self.mmax**-self.gamma))))
def Integral(y):
I = quad(integrand, self.mmin, self.mmax, args=(y))[0]
return I
vector_I = np.vectorize(Integral)
return coef * vector_I(m)
class KoenTruePowerLaw(MassFunction):
"""
Implementaton of error free power-law described in 2009 Koen Kondlo paper,
Fitting power-law distributions to data with measurement errors
This is a power law with truncations on the low and high end.
Equations (2) and (4)
Parameters
----------
m: float
The mass at which to evaluate the function
mmin, mmax: floats
The upper and lower bounds for the power law distribution
gamma: floats
The specified gamma for the distribution, related to the slope, alpha = -gamma + 1
"""
default_mmin = 0
default_mmax = np.inf
def __init__(self, mmin, mmax, gamma):
super().__init__(mmin, mmax)
self.gamma = gamma
def __call__(self, m, integral_form=False):
m = np.asarray(m)
if self.mmax < self.mmin:
raise ValueError('mmax must be greater than mmin')
if integral_form:
# Returns
# -------
# Probability that m < x for the given CDF with specified mmin, mmax, sigma, and gamma
# True for L<=x
pdf = ((self.mmin**-self.gamma - np.power(m, -self.gamma)) /
self.mmin**-self.gamma - self.mmax**-self.gamma)
return_value = (pdf * ((m > self.mmin) & (m < self.mmax)) + 1.0 *
(m >= self.mmax) + 0 * (m < self.mmin))
return return_value
else:
# Returns
# ------
# Probability of getting x given the PDF with specified mmin, mmax, and gamma
# Answers it gives are true from mmin<=x<=mmax
cdf = (self.gamma * np.power(m, -(self.gamma + 1)) /
(self.mmin**-self.gamma - self.mmax**-self.gamma))
return_value = (cdf * ((m > self.mmin) & (m < self.mmax)) + 0 *
(m > self.mmax) + 0 * (m < self.mmin))
return return_value
| 34,904 | 33.593657 | 102 | py |
imf | imf-master/imf/distributions.py | import numpy as np
import scipy.stats
class Distribution:
""" The main class describing the distributions, to be inherited"""
def __init__(self):
self.m1 = 0 # edges of the support of the pdf
self.m2 = np.inf
pass
def pdf(self, x):
""" Return the Probability density function"""
pass
def cdf(self, x):
""" Cumulative distribtuion function """
pass
def rvs(self, N):
""" Generate random sample """
pass
def ppf(self, x):
# inverse cdf
raise RuntimeError('not implemented')
pass
class LogNormal(Distribution):
def __init__(self, mu, sig):
"""
Define the Lognormal with distribution
~ 1/x exp( -1/2 *(log(x)-log(mu))^2/sig^2)
I.e. the mean of log of the samples will be log(mu)
and the stddev of log of the samples will be sig
"""
self.m1 = 0
self.m2 = np.inf
self.d = scipy.stats.lognorm(s=sig, scale=mu)
def pdf(self, x):
return self.d.pdf(x)
def cdf(self, x):
return self.d.cdf(x)
def rvs(self, N):
return self.d.rvs(N)
def ppf(self, x):
return self.d.ppf(x)
class TruncatedLogNormal:
def __init__(self, mu, sig, m1, m2):
""" Standard log-normal but truncated in the interval m1,m2 """
self.m1 = m1
self.m2 = m2
self.d = scipy.stats.lognorm(s=sig, scale=mu)
self.norm = self.d.cdf(self.m2) - self.d.cdf(self.m1)
def pdf(self, x):
return self.d.pdf(x) * (x >= self.m1) * (x <= self.m2) / self.norm
def cdf(self, x):
return (self.d.cdf(np.clip(x, self.m1, self.m2)) -
self.d.cdf(self.m1)) / self.norm
def rvs(self, N):
x = np.random.uniform(0, 1, size=N)
return self.ppf(x)
def ppf(self, x0):
x = np.asarray(x0)
cut1 = self.d.cdf(self.m1)
cut2 = self.d.cdf(self.m2)
ret = self.d.ppf(x * (cut2 - cut1) + cut1)
ret = np.asarray(ret)
ret[(x < 0) | (x > 1)] = np.nan
return ret
class PowerLaw(Distribution):
def __init__(self, slope, m1, m2):
""" Power law with slope slope in the interval m1,m2 """
self.slope = slope
self.m1 = float(m1)
self.m2 = float(m2)
assert (m1 < m2)
assert (m1 > 0)
assert (m1 != -1)
def pdf(self, x):
if self.slope == -1:
return (x**self.slope / (np.log(self.m2 / self.m1)) *
(x >= self.m1) * (x <= self.m2))
else:
return x**self.slope * (self.slope + 1) / (
self.m2**(self.slope + 1) -
self.m1**(self.slope + 1)) * (x >= self.m1) * (x <= self.m2)
def cdf(self, x):
if self.slope == -1:
raise RuntimeError('Not implemented')
else:
return (np.clip(x, self.m1, self.m2)**(self.slope + 1) -
(self.m1**(self.slope + 1))) / (self.m2**(self.slope + 1) -
self.m1**(self.slope + 1))
def rvs(self, N):
x = np.random.uniform(size=N)
return self.ppf(x)
def ppf(self, x0):
x = np.asarray(x0)
if self.slope == -1:
ret = np.exp(x * np.log(self.m2 / self.m1)) * self.m1
else:
ret = (x *
(self.m2**(self.slope + 1) - self.m1**(self.slope + 1)) +
self.m1**(self.slope + 1))**(1. / (self.slope + 1))
ret = np.asarray(ret)
ret[(x < 0) | (x > 1)] = np.nan
return ret
class BrokenPowerLaw:
def __init__(self, slopes, breaks):
"""
Broken power-law with different slopes.
Arguments:
slopes: array
Array of power-law slopes
breaks: array
Array of points/edges of powerlaw segments must be larger by one
then the list of slopes
"""
self.slopes = slopes
self.breaks = breaks
self._calcpows()
self._calcweights()
@property
def m1(self):
return self.breaks[0]
@m1.setter
def m1(self, value):
self.breaks[0] = value
self._calcpows()
self._calcweights()
@property
def m2(self):
return self.breaks[-1]
@m2.setter
def m2(self, value):
self.breaks[-1] = value
self._calcpows()
self._calcweights()
def _calcpows(self):
if not (len(self.slopes) == len(self.breaks) - 1):
raise ValueError(
'The length of array of slopes must be equal to length of ' +
'array of break points minus 1')
if not ((np.diff(self.breaks) > 0).all()):
raise ValueError('Power law break-points must be monotonic')
nsegm = len(self.slopes)
pows = []
for ii in range(nsegm):
pows.append(
PowerLaw(self.slopes[ii], self.breaks[ii],
self.breaks[ii + 1]))
self.pows = pows
self.nsegm = nsegm
def _calcweights(self):
nsegm = len(self.slopes)
weights = [1]
for ii in range(1, nsegm):
rat = self.pows[ii].pdf(self.breaks[ii]) / self.pows[ii - 1].pdf(
self.breaks[ii])
weights.append(weights[-1] / rat)
weights = np.array(weights)
self.weights = weights / np.sum(weights) # relative normalizations
self.nsegm = nsegm
def pdf(self, x):
x1 = np.asarray(x)
ret = np.atleast_1d(x1) * 0.
for ii in range(self.nsegm):
xind = (x1 < self.breaks[ii + 1]) & (x1 >= self.breaks[ii])
if xind.sum() > 0:
ret[xind] = self.weights[ii] * self.pows[ii].pdf(x1[xind])
return ret.reshape(x1.shape)
def cdf(self, x):
x1 = np.asarray(x)
ret = np.atleast_1d(x1) * 0.
cums = np.r_[[0], np.cumsum(self.weights)]
for ii in range(self.nsegm):
xind = (x1 < self.breaks[ii + 1]) & (x1 >= self.breaks[ii])
if np.any(xind):
ret[xind] = cums[ii] + self.weights[ii] * self.pows[ii].cdf(
x1[xind])
xind = x1 >= self.breaks[-1]
if np.any(xind):
ret[xind] = 1
return ret.reshape(x1.shape)
def rvs(self, N):
Ns = np.random.multinomial(N, self.weights)
ret = []
for ii in range(self.nsegm):
if Ns[ii] > 0:
ret.append(self.pows[ii].rvs(Ns[ii]))
return np.concatenate(ret)
def ppf(self, x0):
x = np.asarray(x0)
x1 = np.atleast_1d(x)
edges = np.r_[[0], np.cumsum(self.weights)]
# edges of powerlaw in CDF scale from 0 to 1
pos = np.digitize(x1, edges) # bin positions, 1 is the leftmost
pos = np.clip(pos, 1, self.nsegm)
# we can get zeros here if input is corrupt
left = edges[pos - 1]
w = self.weights[pos - 1]
x2 = np.clip((x1 - left) / w, 0, 1) # mapping to 0,1 on the segment
# must force float b/c int dtypes can result in truncation
ret = np.zeros_like(x1, dtype='float')
for ii in range(x.size):
ret[ii] = self.pows[pos[ii] - 1].ppf(x2[ii])
isnan = (x1 < 0) | (x1 > 1)
if any(isnan):
ret[isnan] = np.nan
return ret.reshape(x.shape)
class CompositeDistribution(Distribution):
def __init__(self, distrs):
""" A Composite distribution that consists of several distributions
that continuously join together
Arguments:
----------
distrs: list of Distributions
The list of distributions. Their supports must not overlap
and not have any gaps.
Example:
--------
dd=distributions.CompositeDistribution([
distributions.TruncatedLogNormal(0.3,0.3,0.08,1),
distributions.PowerLaw(-2.55,1,np.inf)])
dd.pdf(3)
"""
nsegm = len(distrs)
self.distrs = distrs
weights = [1]
breaks = [_.m1 for _ in self.distrs] + [self.distrs[-1].m2]
self.m1 = breaks[0] # leftmost edge
self.m2 = breaks[-1] # rightmost edge
# check that edges of intervals match
for ii in range(1, nsegm):
assert (distrs[ii].m1 == distrs[ii - 1].m2)
for ii in range(1, nsegm):
rat = distrs[ii].pdf(breaks[ii]) / distrs[ii - 1].pdf(breaks[ii])
# relative normalization of next pdf to the previous one so they
# join without a break
weights.append(weights[-1] / rat)
weights = np.array(weights)
self.breaks = breaks
self.weights = weights / np.sum(weights)
# these are relative weights of each pdf
self.nsegm = nsegm
def pdf(self, x):
x1 = np.asarray(x)
ret = np.atleast_1d(x1 * 0.)
for ii in range(self.nsegm):
xind = (x1 < self.breaks[ii + 1]) & (x1 >= self.breaks[ii])
if xind.sum() > 0:
ret[xind] = self.weights[ii] * self.distrs[ii].pdf(x1[xind])
return ret.reshape(x1.shape)
def cdf(self, x):
x1 = np.asarray(x)
ret = np.atleast_1d(x1 * 0.)
cums = np.r_[[0], np.cumsum(self.weights)]
for ii in range(self.nsegm):
xind = (x1 < self.breaks[ii + 1]) & (x1 >= self.breaks[ii])
if xind.sum() > 0:
ret[xind] = cums[ii] + self.weights[ii] * self.distrs[ii].cdf(
x1[xind])
xind = x1 >= self.breaks[-1]
if xind.sum():
ret[xind] = 1
return ret.reshape(x1.shape)
def rvs(self, N):
Ns = np.random.multinomial(N, self.weights)
ret = []
for ii in range(self.nsegm):
if Ns[ii] > 0:
ret.append(self.distrs[ii].rvs(Ns[ii]))
ret = np.concatenate(ret)
ret = np.random.permutation(ret) # permutation
return ret
def ppf(self, x0):
x = np.asarray(x0)
x1 = np.atleast_1d(x)
edges = np.r_[[0], np.cumsum(self.weights)]
pos = np.digitize(x1, edges)
pos = np.clip(pos, 1, self.nsegm) # if input is <0 or >1
left = edges[pos - 1]
w = self.weights[pos - 1]
x2 = np.clip((x1 - left) / w, 0, 1) # mapping to 0,1 on the segment
ret = np.zeros_like(x1)
for ii in range(x.size):
ret[ii] = self.distrs[pos[ii] - 1].ppf(x2[ii])
ret[(x1 < 0) | (x1 > 1)] = np.nan
return ret.reshape(x.shape)
| 10,611 | 30.39645 | 79 | py |
imf | imf-master/imf/plf.py | """
Protostellar luminosity functions as described by Offner and McKee, 2011
Alternatively, perhaps try to construct a probabilistic P(L; m, m_f) given a
series of stellar evolution codes?
"""
import numpy as np
import scipy.integrate
import warnings
from .imf import MassFunction, ChabrierPowerLaw
chabrierpowerlaw = ChabrierPowerLaw()
class McKeeOffner_PLF(MassFunction):
def __init__(self, j=1, n=1, jf=3/4., mmin=0.033, mmax=3.0, imf=chabrierpowerlaw, **kwargs):
"""
Incomplete. The PLF requires a protostellar evolution code as part of its input.
"""
raise NotImplementedError
self.j = j
self.jf = jf
self.n = n
self.mmin = mmin
self.mmax = mmax
self.imf = imf
def den_func(x):
return self.imf(x)*x**(-self.jf)
self.denominator = scipy.integrate.quad(den_func, self.mmin, self.mmax, **kwargs)[0]
self.normfactor = 1
def __call__(self, luminosity, taper=False, integral_form=False, **kwargs):
""" Unclear if integral_form is right..."""
if taper:
def num_func(x, luminosity_):
tf = (1-(luminosity_/x)**(1-self.j))**0.5
return self.imf(x)*x**(self.j-self.jf-1) * tf
def integrate(lolim, luminosity_):
integral = scipy.integrate.quad(num_func, lolim, self.mmax,
args=(luminosity_,),
**kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin <
luminosity,
luminosity,
self.mmin),
luminosity)
else:
def num_func(x):
return self.imf(x)*x**(self.j-self.jf-1)
def integrate(lolim):
integral = scipy.integrate.quad(num_func, lolim, self.mmax, **kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin <
luminosity,
luminosity,
self.mmin))
result = (1-self.j) * luminosity**(1-self.j) * numerator / self.denominator
if integral_form:
warnings.warn("The 'integral form' of the Chabrier PMF is not correctly normalized; "
"it is just PMF(m) * m")
return result * self.normfactor * luminosity
raise ValueError("Integral version not yet computed")
else:
return result * self.normfactor
| 2,862 | 36.181818 | 97 | py |
imf | imf-master/imf/pmf.py | """
Protostellar mass functions as described by McKee and Offner, 2010
"""
import numpy as np
import scipy.integrate
import warnings
from .imf import MassFunction, ChabrierPowerLaw, Kroupa
chabrierpowerlaw = ChabrierPowerLaw()
class McKeeOffner_PMF(MassFunction):
default_mmin = 0.033
default_mmax = 3.0
def __init__(self, j=1, n=1, jf=3/4., mmin=default_mmin, mmax=default_mmax,
imf=chabrierpowerlaw, **kwargs):
"""
"""
super().__init__(mmin=mmin, mmax=mmax)
self.j = j
self.jf = jf
self.n = n
self.imf = imf
def den_func(x):
return self.imf(x)*x**(-self.jf)
self.denominator = scipy.integrate.quad(den_func, self.mmin, self.mmax, **kwargs)[0]
self.normfactor = 1
def __call__(self, mass, taper=False, integral_form=False, **kwargs):
""" Unclear if integral_form is right..."""
if taper:
def num_func(x, mass_):
tf = (1-(mass_/x)**(1-self.j))**0.5
return self.imf(x)*x**(self.j-self.jf-1) * tf
def integrate(lolim, mass_):
integral = scipy.integrate.quad(num_func, lolim, self.mmax, args=(mass_,), **kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin), mass)
else:
def num_func(x):
return self.imf(x)*x**(self.j-self.jf-1)
def integrate(lolim):
integral = scipy.integrate.quad(num_func, lolim, self.mmax, **kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin))
result = (1-self.j) * mass**(1-self.j) * numerator / self.denominator
if integral_form:
warnings.warn("The 'integral form' of the Chabrier PMF is not correctly normalized; "
"it is just PMF(m) * m")
return result * self.normfactor * mass
raise ValueError("Integral version not yet computed")
else:
return result * self.normfactor
class McKeeOffner_2CTC(MassFunction):
""" 2-component Turbulent Core variant """
default_mmin = 0.033
default_mmax = 3.0
def __init__(self, Rmdot=3.6, j=0.5, jf=3/4., mmin=default_mmin, mmax=default_mmax,
imf=chabrierpowerlaw, **kwargs):
"""
"""
super().__init__(mmin=mmin, mmax=mmax)
self.j = j
self.jf = jf
self.Rmdot = Rmdot
self.imf = imf
def den_func(x):
return self.imf(x) * (2/((1+Rmdot**2*x**1.5)**0.5+1))
self.denominator = scipy.integrate.quad(den_func, self.mmin, self.mmax, **kwargs)[0]
self.normfactor = 1
def __call__(self, mass, taper=False, integral_form=False, **kwargs):
""" Unclear if integral_form is right..."""
if taper:
def num_func(x, mass_):
tf = (1-(mass_/x)**(1-self.j))**0.5
return self.imf(x)*(1./x)**(1-self.j) * (2/((1+self.Rmdot**2*x**1.5)**0.5+1)) * tf
def integrate(lolim, mass_):
integral = scipy.integrate.quad(num_func, lolim, self.mmax, args=(mass_,), **kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin), mass)
else:
def num_func(x):
return self.imf(x)*(1./x)**(1-self.j) * (2/((1+self.Rmdot**2*x**1.5)**0.5+1))
def integrate(lolim):
integral = scipy.integrate.quad(num_func, lolim, self.mmax, **kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin))
result = (1-self.j) * mass**(1-self.j) * numerator / self.denominator
if integral_form:
warnings.warn("The 'integral form' of the Chabrier PMF is not correctly normalized; "
"it is just PMF(m) * m")
return result * self.normfactor * mass
raise ValueError("Integral version not yet computed")
else:
return result * self.normfactor
ChabrierPMF_IS = McKeeOffner_PMF(j=0, jf=0, )
ChabrierPMF_TC = McKeeOffner_PMF(j=0.5, jf=0.75, )
ChabrierPMF_CA = McKeeOffner_PMF(j=2/3., jf=1.0, )
ChabrierPMF_2CTC = McKeeOffner_2CTC()
class McKeeOffner_SalpeterPMF(MassFunction):
" special case; above is now generalized to obsolete this "
default_mmin = 0.033
default_mmax = 3.0
def __init__(self, j=1, jf=3/4., alpha=2.35, mmin=default_mmin, mmax=default_mmax):
super().__init__(mmin=mmin, mmax=mmax)
self.alpha = alpha
self.j = j
self.jf = jf
def __call__(self, mass, **kwargs):
alpha = (self.alpha-1+self.jf-self.j)
fm = 1 - (mass/self.mmax)**(alpha)
result = fm * mass**(-((self.alpha-2)+self.jf))
return result
SalpeterPMF_IS = McKeeOffner_SalpeterPMF(j=0, jf=0, )
SalpeterPMF_TC = McKeeOffner_SalpeterPMF(j=0.5, jf=0.75, )
SalpeterPMF_CA = McKeeOffner_SalpeterPMF(j=2/3., jf=1.0, )
kroupa = Kroupa()
KroupaPMF_IS = McKeeOffner_PMF(j=0, jf=0, imf=kroupa)
KroupaPMF_TC = McKeeOffner_PMF(j=0.5, jf=0.75, imf=kroupa)
KroupaPMF_CA = McKeeOffner_PMF(j=2/3., jf=1.0, imf=kroupa)
KroupaPMF_2CTC = McKeeOffner_2CTC(imf=kroupa)
class McKeeOffner_AcceleratingSF_PMF(MassFunction):
default_mmin = 0.033
default_mmax = 3.0
def __init__(self, j=1, n=1, jf=3/4., mmin=default_mmin, mmax=default_mmax,
tau=1, # current time, Myr
tm=0.54, # SF timescale, Myr
tf1=0.50, # accretion timescale for a 1-msun star
imf=chabrierpowerlaw, **kwargs):
"""
McKee & Offner 2010 Protostellar Mass Function with an accelerating star formation rate
"""
super().__init__(mmin=mmin, mmax=mmax)
self.j = j
self.jf = jf
self.n = n
self.tau = tau
self.tm = tm
self.imf = imf
self.tf1 = tf1
def den_func(x):
return self.imf(x)*(1-np.exp(-self.tf1*x**(1-self.jf)/self.tau))/x
self.denominator = self.tau * scipy.integrate.quad(den_func, self.mmin, self.mmax, **kwargs)[0]
assert self.denominator > 0
self.normfactor = 1
def __call__(self, mass, tau=None, taper=False, integral_form=False, **kwargs):
"""
Parameters
----------
tm : float
Star formation timescale in Myrs
"""
if tau is None:
tau = self.tau
if taper:
raise NotImplementedError()
# def num_func(x, mass_):
# tf = (1-(mass_/x)**(1-self.j))**0.5
# return self.massfunc(x)*x**(self.j-self.jf-1) * tf
# def integrate(lolim, mass_):
# integral = scipy.integrate.quad(num_func, lolim, self.mmax, args=(mass_,), **kwargs)[0]
# return integral
# numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin), mass)
else:
def num_func(x, mass_):
tm = self.tf1 * mass_**(1-self.j) / (x**(self.jf-self.j))
return self.imf(x)*x**(self.j-self.jf-1) * np.exp(-tm / tau)
def integrate(lolim, mass_):
integral = scipy.integrate.quad(num_func, lolim, self.mmax, args=(mass_), **kwargs)[0]
return integral
numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin), mass)
assert np.all(numerator >= 0)
result = self.tf1 * (1-self.j) * mass**(1-self.j) * numerator / self.denominator
# it is possible with the time-evolution case to get negative values for high masses;
# these should (probably?) just be zero'd
result = np.where(mass > self.mmax, np.nan, result)
if integral_form:
warnings.warn("The 'integral form' of the Chabrier PMF is not correctly normalized; "
"it is just PMF(m) * m")
return result * self.normfactor * mass
raise ValueError("Integral version not yet computed")
else:
return result * self.normfactor
ChabrierPMF_AcceleratingSF_IS = McKeeOffner_AcceleratingSF_PMF(j=0, jf=0, )
ChabrierPMF_AcceleratingSF_TC = McKeeOffner_AcceleratingSF_PMF(j=0.5, jf=0.75, )
ChabrierPMF_AcceleratingSF_CA = McKeeOffner_AcceleratingSF_PMF(j=2/3., jf=1.0, )
#ChabrierPMF_AcceleratingSF_2CTC = McKeeOffner_AcceleratingSF_2CTC()
| 8,608 | 35.634043 | 104 | py |
imf | imf-master/imf/__init__.py | from .imf import *
| 19 | 9 | 18 | py |
imf | imf-master/imf/cmf.py | from __future__ import print_function
import numpy as np
from astropy import units as u
from astropy import constants
import scipy.stats
from . import imf
def pn11_mf(tnow=1, mmin=0.01*u.M_sun, mmax=120*u.M_sun, T0=10*u.K,
T_mean=7*u.K, L0=10*u.pc, rho0=2e-21*u.g/u.cm**3, MS0=25,
beta=0.4, alpham1=1.35, v0=4.9*u.km/u.s, eff=0.26,
mean_mol_wt=2.33):
"""
Padoan & Nordlund IMF - from http://adsabs.harvard.edu/abs/2011ApJ...741L..22P
Parameters
----------
tnow : float
The time at which to evaluate the mass function in units of the
crossing time
Does not match their figures yet!
"""
tcross = (L0/v0).to(u.Myr)
# total molecular cloud mass
m0 = (4/3.*np.pi*L0**3*rho0).to(u.M_sun)
massfunc = imf.Salpeter(alpha=alpham1+1)
massfunc.__name__ = 'salpeter'
maccr = imf.make_cluster(mcluster=(m0*eff).to(u.M_sun).value,
massfunc=massfunc,
mmin=mmin.to(u.M_sun).value,
mmax=mmax.to(u.M_sun).value,
silent=True)*u.M_sun
# sigma_squared of log(rho/rho0)
sigma_squared = np.log(1+(MS0/2.)**2 * (1+1./beta)**-1)
# two positional parameters: x, s such that pdf=lognorm(x,s)
# p(x) = const * exp(-(ln(x)/s)^2 / 2)
# p(rho/rho0) = const * exp(-(ln(rho/rho0) + sigma_squared/2)^2 / (2*sigma_squared))
# (ln(rho/rho0)+sigma_squared/2)^2/(2*sigma_squared) = ln(x)^2/s^2 /2
# s^2 = sigma_squared
# ln(x) = ln(rho/rho0)+sigma_squared/2
# x = np.exp(ln(rho/rho0)+sigma_squared/2)
# ln(rho/rho0) = ln(x) - sigma_squared/2
# rho = rho0 * exp(ln(x) - sigma_squared/2)
s = sigma_squared**0.5
pdf_func = scipy.stats.lognorm(s)
x = pdf_func.rvs(len(maccr))
rho = rho0 * np.exp(np.log(x) - sigma_squared/2)
sigma = (1+beta**-1)**-0.5 * MS0/2.
# core birthdays: "Finally, we associate a random age to each core,
# assuming for simplicity that the SFR is uniform over time and independent
# of core mass."
# Confirmed: "what I did was to assume constant star formation rate during
# the whole period t=t_0, where t_0 is the crossing time, and also the time
# when the mass functions are computed. So cores are formed with a uniform
# distribution (of birth times) between 0 and t_0."
birthday = np.random.random(len(maccr)) * tcross
born = birthday < tnow*tcross
b = 4.-(3./alpham1)
a = (b-1)/2.
taccr = (tcross * sigma**((4.-4.*a)/(3.-2.*a)) *
(maccr/m0)**((1-a)/(3-2*a))).to(u.Myr)
print(("taccr=[{0} - {1}]".format(taccr.to(u.Myr).min(), taccr.to(u.Myr).max())))
c_s = ((constants.k_B * T_mean / (mean_mol_wt*constants.m_p))**0.5).to(u.km/u.s)
print(("c_s = {0}".format(c_s)))
mbe = (1.182 * c_s**3 / (constants.G**1.5 * rho**0.5)).to(u.M_sun)
tbe = (taccr * (maccr/mbe)**(-1/3.)).to(u.s)
tff = ((3*np.pi/(32*constants.G*rho))**0.5).to(u.s)
mmax = (maccr * ((tbe+tff)/taccr)**3).to(u.M_sun)
# tnow = number of crossing times
age = (tnow*tcross-birthday)
mnow = ((age/taccr)**3 * maccr).to(u.M_sun)
prestellar = age < tbe+tff
ltbe = maccr < mbe
stellar = (~prestellar) & (~ltbe)
forming = age < taccr
m_f = np.vstack([mmax.value, maccr.value]).min(axis=0)*u.M_sun
mnow[mnow > m_f] = m_f[mnow > m_f]
will_collapse = maccr > mbe/2.
# We assume that cores that do not reach their BE mass are seen only during
# their formation time, taccr,
notseen = ltbe & ~forming
core_mass = mnow[born & (~notseen) & (~stellar)].sum()
stellar_mass = mnow[stellar].sum()
print(("{0} of {1} have mass greater than final at t={2}."
" {3} are unborn. {4} are stellar. "
"{7} are not seen ({8:0.02f}%) because they are older than "
"one accretion time and have M<M_BE. "
"The cloud mass is {9}. "
"The CFE={5}"
" and SFE={6}".format((mnow > m_f).sum(), len(mnow), tnow*tcross,
np.sum(~born), np.sum(stellar),
(core_mass/m0).decompose().value,
(stellar_mass/m0).decompose().value,
notseen.sum(),
(notseen.sum()/float(notseen.size))*100,
m0
)))
return (mnow[born], m_f[born], will_collapse[born], maccr[born], mbe[born],
mmax[born], forming[born])
def test_pn11(nreal=5, nbins=50, **kwargs):
mnow, mf, wc, maccr, mbe, mmax, forming = pn11_mf(**kwargs)
import pylab as pl
pl.figure(1).clf()
ltbe = maccr < mbe
# We assume that cores that do not reach their BE mass are seen only during
# their formation time, taccr,
notseen = ltbe & ~forming
toplot = (mnow > 0.05*u.M_sun) & (~notseen)
gtmax = maccr > mmax
btw = (~gtmax) & (~ltbe)
pl.loglog(mnow[toplot & gtmax], (maccr/mnow)[toplot & gtmax], 'kd',
markerfacecolor='none')
print(("{0} have maccr<mbe and m>0.05".format((toplot & ltbe).sum())))
pl.loglog(mnow[toplot & ltbe], (maccr/mnow)[toplot & ltbe], 'r.',
markersize=2, alpha=0.5,
markerfacecolor='none')
pl.loglog(mnow[toplot & btw], (maccr/mnow)[toplot & btw], 'b+',
markerfacecolor='none')
pl.gca().set_ylim(0.5, 500)
pl.gca().set_xlim(0.05, 50)
pl.ylabel("$m_{accr}/m$")
pl.xlabel("$m$, i.e. $m_{now}$")
pl.figure(2).clf()
pl.hist((maccr/mnow)[mnow > 0.1*u.M_sun], bins=np.logspace(0, 2, 12),
histtype='step', color='k', log=True)
pl.hist((maccr/mnow)[mnow > (mbe/2.)], bins=np.logspace(0, 2, 12),
histtype='step', linestyle='dashed', color='k', log=True)
pl.gca().set_xscale('log')
pl.ylabel("$N(m_{accr}/m)$")
pl.xlabel("$m_{accr}/m$")
pl.figure(3).clf()
pl.loglog(mnow[toplot & gtmax], (mnow/mbe)[toplot & gtmax], 'kd',
markerfacecolor='none')
pl.loglog(mnow[toplot & ltbe], (mnow/mbe)[toplot & ltbe], 'r.',
markersize=2,
markerfacecolor='none', alpha=0.5)
pl.loglog(mnow[toplot & btw], (mnow/mbe)[toplot & btw], 'b+',
markerfacecolor='none')
ct, bn = np.histogram(mnow[(mnow > mbe/2.) & toplot & (maccr < mbe)],
bins=np.logspace(np.log10(0.05), np.log10(20)))
ctall, bn = np.histogram(mnow[(mnow > mbe/2.) & toplot],
bins=np.logspace(np.log10(0.05), np.log10(20)))
bbn = (bn[1:]+bn[:-1])/2.
pl.loglog(bbn, ct/ctall.astype('float'), 'k-')
pl.ylabel("$m_{now}/m_{BE}$")
pl.xlabel("$m$, i.e. $m_{now}$")
pl.figure(4)
pl.clf()
pl.hist(mnow, bins=np.logspace(-2, 2, nbins), histtype='step', log=True,
edgecolor='k', label='$m$')
pl.hist(mnow[wc], bins=np.logspace(-2.02, 1.98, nbins), histtype='step', log=True,
edgecolor='b', facecolor=(0, 0, 1, 0.25), label='$m>m_{BE}/2$')
pl.hist(mbe, bins=np.logspace(-1.98, 2.02, nbins), histtype='step',
log=True, linestyle='dashed', color='g', label='$m_{BE}$')
pl.hist(mmax, bins=np.logspace(-1.96, 2.04, nbins), histtype='step',
log=True, linestyle='dashed', color='m', label='$m_{max}$')
pl.gca().set_xscale('log')
pl.legend(loc='best')
pl.ylabel("$N(M)$")
pl.xlabel("$m$, i.e. $m_{now}$")
many_realizations = [pn11_mf(**kwargs) for ii in range(nreal)]
mnow_many = np.hstack([x.value for x, y, z, w, v, s, t in many_realizations]).ravel()
counts, bins = np.histogram(mnow_many.ravel(), bins=np.logspace(-2, 2, nbins*nreal))
bbins = (bins[:-1]+bins[1:])/2.
ok = np.log(counts) > 0
ppars = np.polyfit(np.log(bbins)[ok], np.log(counts)[ok], 1)
pl.plot(bbins, np.exp(ppars[1]) * bbins**ppars[0], 'r')
pl.figure(5).clf()
pl.plot(maccr[toplot], mbe[toplot], 'k,')
return mnow, mf, wc, maccr, mbe, mmax
def hc13_mf(mass, sizescale, n17=3.8, alpha_ct=0.75, mean_mol_wt=2.33,
V0=0.8*u.km/u.s, meandens=5000*u.cm**-3, temperature=10*u.K,
eta=0.45, b_forcing=0.4, Mach=6):
""" Equation 21 of Hennebelle & Chabrier 2013
Parameters
----------
mass : np.array
Masses at which to evaluate the PDF
sizescale : pc equivalent
The size of the clump (I think - extremely difficult to find this)
n17 : float
The "n" value in Equation 17, quoted to be 3.8 shortly afterward in the
text
alpha_ct : float
"a dimensionless coefficient of the order of a few"
I derived 0.75 from equation 9
V0 : float
u0 * 0.8 km/s according to the bottom of page 6, under eqn 36,
which references eqn 16
eta : None or float
derived from Equation 17, but can be specified directly
b_forcing : float
The Forcing Parameter `b` from equation 4
Mach : float
Mach number
"""
rho_bar = meandens * mean_mol_wt * constants.m_p
c_s = ((constants.k_B * temperature /
(mean_mol_wt*constants.m_p))**0.5).to(u.km/u.s)
sigma = (np.log(1+b_forcing**2 * Mach**2))**0.5
if eta is None:
# eqn 17
eta = (n17 - 3.)/2.
alpha_g = 3/5. # for a uniform density fluctuation
# eqn 9
phit = 2 * alpha_ct * (24/np.pi**2/alpha_g)
# dimensionless geometrical factor of the order of unity
# For a sphere, becoMes:
aJ = np.pi**2.5/6.
# a geometrical factor, typically of the order of 4pi/3
Cm = 4*np.pi/3
# eqn 13
MJ0 = (aJ / Cm * c_s**3 * constants.G**-1.5 * rho_bar**-0.5).to(u.M_sun)
# eqn 14
lambdaJ0 = (np.pi**0.5 * c_s / Cm * (constants.G*rho_bar)**-0.5).to(u.pc)
# Eqn 7 of Paper I
# delta = np.log(rho/rho_bar
# R = (mass/rho_bar)**(1/3.) * np.exp(-delta/3.) / lambdaJ0
Rtwiddle = (sizescale / lambdaJ0).to(u.dimensionless_unscaled)
Mtwiddle = (mass / MJ0).to(u.dimensionless_unscaled)
# eqn 20
Mstar = (3**-0.5 * V0/c_s * (lambdaJ0/(u.pc))**eta).to(u.dimensionless_unscaled)
# after eqn 21
N0 = rho_bar / MJ0
# PROBLEM: N0 is defined to be rho_bar / MJ0, but that is a dimensional
# quantity with units cm^-3. This is a contradiction that means some
# definition here is wrong.
# eqn 21
N = (2./phit * N0 * Rtwiddle**-6 * (1 + (1-eta)*Mstar**2*Rtwiddle**(2*eta)) /
(1+(2*eta+1)*Mstar**2*Rtwiddle**(2*eta)) *
(Mtwiddle/Rtwiddle**3)**(-1-1/(2*sigma**2)*np.log(Mtwiddle/Rtwiddle**3)) *
np.exp(sigma**2/8.) * ((2*np.pi)**0.5 * sigma)
).to(u.dimensionless_unscaled)
return N
def test_hc13():
masses = np.logspace(-2, 2, 100)*u.M_sun
sizescale = 10*u.pc
return hc13_mf(mass=masses, sizescale=sizescale)
| 10,822 | 37.379433 | 89 | py |
imf | imf-master/imf/tests/test_imf.py | import pytest
import numpy as np
import itertools
from .. import imf
from ..imf import kroupa, chabrierpowerlaw
extra_massfunc_kwargs = {'schecter': {'m1': 1.0},
'modified_schecter': {'m1': 1.0},
'chabrierpowerlaw': {'mmid': 0.5},
}
@pytest.mark.parametrize(('inp', 'out', 'rtol', 'atol'),
[(0.05, 5.6159, 1e-3, 1e-3),
(1.5, 0.0359, 1e-4, 1e-4),
(1.0, 0.0914, 1e-4, 1e-4),
(3.0, 0.0073, 1e-4, 1e-4),
(1, 0.0914, 1e-4, 1e-4),
(3, 0.0073, 1e-4, 1e-4)])
def test_kroupa_val(inp, out, rtol, atol):
kroupa = imf.Kroupa()
np.testing.assert_allclose(kroupa(inp), out, rtol=rtol, atol=atol)
np.testing.assert_allclose(imf.kroupa(inp), out, rtol=rtol, atol=atol)
@pytest.mark.parametrize('massfunc', imf.massfunctions.keys())
def test_mmax(massfunc):
"""
Regression test for issue #4
"""
if (not hasattr(imf.get_massfunc(massfunc), 'mmin')):
pytest.skip("{0} doesn't have mmin defined".format(massfunc))
extra_kwargs = extra_massfunc_kwargs.get(massfunc, {})
c = imf.make_cluster(10000, mmax=1.001, mmin=0.01, massfunc=massfunc, **extra_kwargs)
assert c.max() <= 1.001
@pytest.mark.parametrize(('mlow', 'mhigh'),
itertools.product((0.01, 0.08, 0.1, 0.5, 1.0, 0.03),
(0.02, 0.08, 0.4, 0.5, 1.0, 120)))
def test_kroupa_integral(mlow, mhigh):
if mlow >= mhigh:
pytest.skip("mmin >= mmax")
num = kroupa.integrate(mlow, mhigh, numerical=True)[0]
anl = kroupa.integrate(mlow, mhigh, numerical=False)[0]
np.testing.assert_almost_equal(num, anl)
if num != 0:
assert anl != 0
@pytest.mark.parametrize(('mlow', 'mhigh'),
itertools.product((0.01, 0.08, 0.1, 0.5, 1.0, 0.03),
(0.02, 0.08, 0.4, 0.5, 1.0, 120)))
def test_kroupa_mintegral(mlow, mhigh):
if mlow >= mhigh:
pytest.skip("mmin >= mmax")
num = kroupa.m_integrate(mlow, mhigh, numerical=True)[0]
anl = kroupa.m_integrate(mlow, mhigh, numerical=False)[0]
print("{0} {1} {2:0.3f} {3:0.3f}".format(mlow, mhigh, num, anl))
np.testing.assert_almost_equal(num, anl)
if num != 0:
assert anl != 0
@pytest.mark.parametrize(('mlow', 'mhigh'),
itertools.product((0.033, 0.01, 0.08, 0.1, 0.5, 1.0, 0.03),
(0.02, 0.05, 0.08, 0.4, 0.5, 1.0, 120)))
def test_chabrier_integral(mlow, mhigh):
if mlow >= mhigh:
pytest.skip("mmin >= mmax")
num = chabrierpowerlaw.integrate(mlow, mhigh, numerical=True)[0]
anl = chabrierpowerlaw.integrate(mlow, mhigh, numerical=False)[0]
print("{0} {1} {2:0.3f} {3:0.3f}".format(mlow, mhigh, num, anl))
np.testing.assert_almost_equal(num, anl)
# for mlow in (0.01, 0.08, 0.1, 0.5, 1.0):
# for mhigh in (0.02, 0.08, 0.4, 0.5, 1.0):
# try:
# num = chabrierpowerlaw.m_integrate(mlow, mhigh, numerical=True)[0]
# anl = chabrierpowerlaw.m_integrate(mlow, mhigh, numerical=False)[0]
# except ValueError:
# continue
# print("{0} {1} {2:0.3f} {3:0.3f}".format(mlow, mhigh, num, anl))
# np.testing.assert_almost_equal(num, anl)
def test_make_cluster():
cluster = imf.make_cluster(1000)
assert np.abs(sum(cluster) - 1000 < 100)
def test_kroupa_inverses():
assert np.abs(imf.inverse_imf(0, massfunc=imf.Kroupa, mmin=0.01) - 0.01) < 2e-3
assert np.abs(imf.inverse_imf(0, massfunc=imf.Kroupa(mmin=0.01)) - 0.01) < 2e-3
assert np.abs(imf.inverse_imf(1, massfunc=imf.Kroupa, mmax=200) - 200) < 1
assert np.abs(imf.inverse_imf(1, massfunc=imf.Kroupa(mmax=200)) - 200) < 1
def test_cannot_override_mmin_mmax_instance():
with pytest.raises(ValueError) as ex:
imf.inverse_imf(0, massfunc=imf.Kroupa(), mmin=0.01)
assert "mmin was specified" in str(ex.value)
with pytest.raises(ValueError) as ex:
imf.inverse_imf(0, massfunc=imf.Kroupa(), mmax=200)
assert "mmax was specified" in str(ex.value)
with pytest.raises(ValueError) as ex:
imf.get_massfunc(massfunc=imf.Kroupa(), mmin=0.01)
assert "mmin was specified" in str(ex.value)
with pytest.raises(ValueError) as ex:
imf.get_massfunc(imf.Kroupa(), mmax=200)
assert "mmax was specified" in str(ex.value)
mf = imf.get_massfunc(imf.Kroupa, mmax=200)
assert mf.mmax == 200
mf = imf.get_massfunc(imf.Kroupa, mmin=0.01)
assert mf.mmin == 0.01
@pytest.mark.parametrize(('inp', 'out', 'rtol', 'atol'),
[(0.05, 5.6159, 1e-3, 1e-3),
(1.5, 0.0359, 1e-4, 1e-4),
(1.0, 0.0914, 1e-4, 1e-4),
(3.0, 0.0073, 1e-4, 1e-4),
(1, 0.0914, 1e-4, 1e-4),
(3, 0.0073, 1e-4, 1e-4)])
def test_kroupa_val_unchanged(inp, out, rtol, atol):
# regression: make sure that imf.kroupa = imf.Kroupa
kroupa = imf.Kroupa()
np.testing.assert_allclose(kroupa(inp), out, rtol=rtol, atol=atol)
np.testing.assert_allclose(imf.kroupa(inp), out, rtol=rtol, atol=atol)
np.testing.assert_allclose(kroupa(inp), imf.kroupa(inp))
| 5,462 | 37.202797 | 89 | py |
imf | imf-master/imf/tests/test_distributions.py | import numpy as np
import scipy.interpolate
from .. import distributions as D
np.random.seed(1)
def sampltest(distr, left=None, right=None, bounds=None):
# check that mean and stddev from the generated sample
# match what we get from integrating the PDF
def FF1(x):
return distr.pdf(x) * x
def FF2(x):
return distr.pdf(x) * x**2
if left is None:
left = 0
if right is None:
right = np.inf
if bounds is None:
mom1, _ = scipy.integrate.quad(FF1, left, right)
mom2, _ = scipy.integrate.quad(FF2, left, right)
else:
mom1, mom2 = 0, 0
for curb in bounds:
cmom1, _ = scipy.integrate.quad(FF1, curb[0], curb[1])
cmom2, _ = scipy.integrate.quad(FF2, curb[0], curb[1])
mom1 += cmom1
mom2 += cmom2
std = np.sqrt(mom2 - mom1**2)
assert (mom2 > mom1**2)
N = int(1e6)
samps = distr.rvs(N)
assert ((samps.mean() - mom1) < 5 * std / np.sqrt(N))
assert ((samps.std() - std) < 20 * std / np.sqrt(2 * (N - 1)))
def ppftest(distr):
# test that ppf is inverse of cdf
xs = np.random.uniform(0, 1, size=100)
eps = 1e-5
assert (np.all(np.abs(distr.cdf(distr.ppf(xs)) - xs) < eps))
# test on scalar
assert (np.abs(distr.cdf(distr.ppf(xs[0])) - xs[0]) < eps)
assert (np.isnan(distr.ppf(-0.1)))
assert (np.isnan(distr.ppf(1.1)))
def test_lognorm():
ln = D.LogNormal(1, 1)
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln)
for i in range(10):
N = 100000
mean = np.random.uniform(0.1, 10)
sig = np.random.uniform(0.1, 10)
ln2 = D.LogNormal(mean, sig)
samp = ln2.rvs(N)
# check that the means and sigmas are correct
assert (np.abs(np.log(samp).mean() - np.log(mean)) < 0.01 * sig)
assert (np.abs(np.log(samp).std() - sig) < 0.01 * sig)
def test_broken_plaw():
ln = D.BrokenPowerLaw([-2, -1.1, -3], [0.1, 1, 2, 100])
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln, 0.05, 120, bounds=[[0.05, 1], [1, 2], [2, 120]])
# test values in each range
assert (np.abs(ln.ppf(ln.cdf(0.5)) - 0.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(1.5)) - 1.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(2.5)) - 2.5) < 1e-5)
def test_distr():
ln = D.TruncatedLogNormal(1, 1, 2, 3)
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln, 1, 4)
ln = D.PowerLaw(-2, 2, 6)
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln, 1, 7)
def test_composite():
ln = D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, 2, 3),
D.PowerLaw(-2, 3, 4),
D.TruncatedLogNormal(1, 1, 4, 5),
D.PowerLaw(-3.5, 5, np.inf)
])
ln.pdf(2.5)
ln.cdf(2.5)
ln.rvs(1000)
ppftest(ln)
# test values in each break
assert (np.abs(ln.ppf(ln.cdf(2.5)) - 2.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(3.5)) - 3.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(4.5)) - 4.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(5.5)) - 5.5) < 1e-5)
sampltest(ln, 1, np.inf, bounds=[[1, 3], [3, 4], [4, 5], [5, np.inf]])
ln1 = D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, 2, 3),
D.PowerLaw(-2, 3, 4),
])
# check the exact edges work
assert (ln1.cdf(4) == 1)
assert (ln1.cdf(2) == 0)
def test_bounds():
left, right = 1, 2
tleft, tright = 0.5, 3
ln = D.TruncatedLogNormal(1, 1, left, right)
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
ln = D.PowerLaw(-3, left, right)
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
ln = D.BrokenPowerLaw(
[-2, -1.1, -3],
[left, .6 * left + .3 * right, .3 * left + .6 * right, right])
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
ln = D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, left, .75 * left + .25 * right),
D.PowerLaw(-2, .75 * left + .25 * right, .5 * left + .5 * right),
D.TruncatedLogNormal(1, 1, .5 * left + .5 * right,
.25 * left + .75 * right),
D.PowerLaw(-2, .25 * left + .75 * right, right)
])
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
def integralcheck(distr, left, x, val):
I, EI = scipy.integrate.quad(lambda y: distr.pdf(y), left, x)
assert (np.abs(val - I) < 1e-6)
def integralcheck_many(distr, left, right):
integralcheck(distr, left, right, 1)
N = 100
xs = np.random.uniform(left, right, size=N)
for x in xs:
integralcheck(distr, left, x, distr.cdf(x))
def test_integral():
# test that the numerically integrated pdf is within 3 sigma of 1
# for different kind of pdfs
left, right = 2, 3
distrs = [
D.TruncatedLogNormal(1, 1, left, right),
D.PowerLaw(-2, left, right),
D.BrokenPowerLaw(
[-2, -1.1, -3],
[left, .6 * left + .3 * right, .3 * left + .6 * right, right]),
D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, left, .75 * left + .25 * right),
D.PowerLaw(-2, .75 * left + .25 * right, .5 * left + .5 * right),
D.TruncatedLogNormal(1, 1, .5 * left + .5 * right,
.25 * left + .75 * right),
D.PowerLaw(-2, .25 * left + .75 * right, right)
])
]
for curd in distrs:
integralcheck_many(curd, left, right)
| 5,757 | 28.228426 | 77 | py |
imf | imf-master/imf/tests/__init__.py | 0 | 0 | 0 | py | |
imaging_MLPs | imaging_MLPs-master/ImageNet/config.py | #Path to original dataset
original_path='/workspace/media/hdd1/image_net_mini/imagenet-mini/train/'
#Path to processed dataset
data_path='/workspace/media/hdd1/image_net_mini/dataset2/'
#Path to store trained models
models_path= "/workspace/media/hdd1/image_net_mini/trained_networks/"
#Path to store tensor board data
logs_path= './logs/' | 342 | 30.181818 | 73 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/linear_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int,
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
nn.Linear(num_patches, num_patches),
Rearrange("b c w h -> b c h w"),
nn.Linear(num_patches, num_patches),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Linear_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
patch_size: int = 4,
embed_dim: int = 140,
num_layers: int = 19,
f_hidden: int = 4,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x
| 3,644 | 26.613636 | 127 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/original_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=hidden_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b (h w) c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchEmbeddings_transpose(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int,
d: int
):
super().__init__()
self.proj_transpose = nn.Sequential(
Rearrange("b (h w) c -> b c h w", h=d),
nn.ConvTranspose2d(
in_channels=hidden_dim,
out_channels=channels,
kernel_size=patch_size,
stride=patch_size
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj_transpose(x)
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class MixerBlock(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
tokens_hidden_dim: int,
channels_hidden_dim: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b p c -> b c p"),
MLPBlock(num_patches, tokens_hidden_dim),
Rearrange("b c p -> b p c")
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, channels_hidden_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Original_Mixer(nn.Module):
def __init__(
self,
image_size: int = 256,
channels: int = 3,
patch_size: int = 4,
num_layers: int = 8,
hidden_dim: int = 128,
tokens_hidden_dim: int = 96,
channels_hidden_dim: int = 256
):
super().__init__()
num_patches = (image_size // patch_size) ** 2
d=(image_size-patch_size)//patch_size + 1
self.embed = PatchEmbeddings(patch_size, hidden_dim, channels)
layers = [
MixerBlock(
num_patches=num_patches,
num_channels=hidden_dim,
tokens_hidden_dim=tokens_hidden_dim,
channels_hidden_dim=channels_hidden_dim
)
for _ in range(num_layers)
]
self.layers = nn.Sequential(*layers)
self.norm = nn.LayerNorm(hidden_dim)
self.embed_transpose = PatchEmbeddings_transpose(patch_size, hidden_dim, channels, d)
def forward(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w = x.shape
x = self.embed(x)
x = self.layers(x)
x = self.norm(x)
x = self.embed_transpose(x)
return x | 3,674 | 26.840909 | 93 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/img2img_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 4,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,618 | 27.054264 | 127 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/vit.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x = self.net(x, k)
x = self.unpad(x, wpad, hpad)
return x | 810 | 26.033333 | 90 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int = 3,
out_chans: int = 3,
chans: int = 21,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image) | 5,981 | 32.79661 | 88 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/__init__.py | from .img2img_mixer import *
from .linear_mixer import *
from .original_mixer import *
from .u_mixer import *
from .unet import *
from .recon_net import ReconNet
from .vit import VisionTransformer
| 197 | 23.75 | 34 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/u_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchMerge(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (int): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, channel_dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.reduction = nn.Linear(4 * channel_dim, 2 * channel_dim, bias=False)
self.norm = norm_layer(4 * channel_dim)
def forward(self, x):
"""
x: B, H, W, C
"""
B, H, W, C = x.shape
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, H//2, W//2, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class PatchExpand(nn.Module):
def __init__(self, channel_dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.expand = nn.Linear(channel_dim, 2* channel_dim, bias=False) if dim_scale==2 else nn.Identity()
self.norm = norm_layer(channel_dim // dim_scale)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)
x = x.view(B, H*2, W*2 ,C//4)
x= self.norm(x)
return x
class FinalPatchExpand(nn.Module):
def __init__(self, channel_dim, img_channels, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, 16* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class U_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
embed_dim: int = 96,
):
super().__init__()
#mixer blocks
self.mixer1= Mixer( img_size//4, embed_dim)
self.mixer2= Mixer( img_size//8, embed_dim*2)
self.mixer3= Mixer( img_size//16, embed_dim*4)
self.mixer4= Mixer( img_size//32, embed_dim*8)
self.mixer11= Mixer( img_size//4, embed_dim)
self.mixer22= Mixer( img_size//8, embed_dim*2)
self.mixer33= Mixer( img_size//16, embed_dim*4)
self.mixer44= Mixer( img_size//32, embed_dim*8)
#encode
self.patch_embed = PatchEmbeddings(4, embed_dim, img_channels)
self.patch_merge1 = PatchMerge(embed_dim)
self.patch_merge2= PatchMerge(embed_dim*2)
self.patch_merge3= PatchMerge(embed_dim*4)
#decode
self.patch_expand1 = PatchExpand(embed_dim*8)
self.patch_expand2 = PatchExpand(embed_dim*4)
self.patch_expand3 = PatchExpand(embed_dim*2)
self.final_expand = FinalPatchExpand(embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y=[]
x = self.patch_embed(x)
x = self.mixer1(x)
x = self.mixer1(x)
y.append(x)
x = self.patch_merge1(x)
x = self.mixer2(x)
x = self.mixer2(x)
y.append(x)
x = self.patch_merge2(x)
x = self.mixer3(x)
x = self.mixer3(x)
y.append(x)
x = self.patch_merge3(x)
x = self.mixer4(x)
x = self.mixer4(x)
x = self.mixer44(x)
x = self.mixer44(x)
x = self.patch_expand1(x)
x = self.mixer33(x)
x = self.mixer33(x) + y[2]
x = self.patch_expand2(x)
x = self.mixer22(x)
x = self.mixer22(x) + y[1]
x = self.patch_expand3(x)
x = self.mixer11(x)
x = self.mixer11(x) + y[0]
x = self.final_expand(x)
return x | 6,516 | 28.488688 | 127 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/config.py | #Path to original dataset
original_path='/workspace/media/hdd1/SSID/SIDD_Small_sRGB_Only/Data/'
#Path to processed dataset
data_path='/workspace/media/hdd1/SSID/dataset/'
#Path to store trained models
models_path= "/workspace/media/hdd1/SSID/trained_models/"
#Path to store tensor board data
logs_path= './logs/' | 315 | 27.727273 | 69 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/img2img_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 4,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,618 | 27.054264 | 127 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/vit.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x = self.net(x, k)
x = self.unpad(x, wpad, hpad)
return x | 810 | 26.033333 | 90 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int = 3,
out_chans: int = 3,
chans: int = 21,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image) | 5,981 | 32.79661 | 88 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/__init__.py | from .img2img_mixer import *
from .u_mixer import *
from .unet import *
from .recon_net import ReconNet
from .vit import VisionTransformer
| 140 | 19.142857 | 34 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/u_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchMerge(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (int): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, channel_dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.reduction = nn.Linear(4 * channel_dim, 2 * channel_dim, bias=False)
self.norm = norm_layer(4 * channel_dim)
def forward(self, x):
"""
x: B, H, W, C
"""
B, H, W, C = x.shape
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, H//2, W//2, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class PatchExpand(nn.Module):
def __init__(self, channel_dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.expand = nn.Linear(channel_dim, 2* channel_dim, bias=False) if dim_scale==2 else nn.Identity()
self.norm = norm_layer(channel_dim // dim_scale)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)
x = x.view(B, H*2, W*2 ,C//4)
x= self.norm(x)
return x
class FinalPatchExpand(nn.Module):
def __init__(self, channel_dim, img_channels, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, 16* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class U_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
embed_dim: int = 96,
):
super().__init__()
#mixer blocks
self.mixer1= Mixer( img_size//4, embed_dim)
self.mixer2= Mixer( img_size//8, embed_dim*2)
self.mixer3= Mixer( img_size//16, embed_dim*4)
self.mixer4= Mixer( img_size//32, embed_dim*8)
self.mixer11= Mixer( img_size//4, embed_dim)
self.mixer22= Mixer( img_size//8, embed_dim*2)
self.mixer33= Mixer( img_size//16, embed_dim*4)
self.mixer44= Mixer( img_size//32, embed_dim*8)
#encode
self.patch_embed = PatchEmbeddings(4, embed_dim, img_channels)
self.patch_merge1 = PatchMerge(embed_dim)
self.patch_merge2= PatchMerge(embed_dim*2)
self.patch_merge3= PatchMerge(embed_dim*4)
#decode
self.patch_expand1 = PatchExpand(embed_dim*8)
self.patch_expand2 = PatchExpand(embed_dim*4)
self.patch_expand3 = PatchExpand(embed_dim*2)
self.final_expand = FinalPatchExpand(embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y=[]
x = self.patch_embed(x)
x = self.mixer1(x)
x = self.mixer1(x)
y.append(x)
x = self.patch_merge1(x)
x = self.mixer2(x)
x = self.mixer2(x)
y.append(x)
x = self.patch_merge2(x)
x = self.mixer3(x)
x = self.mixer3(x)
y.append(x)
x = self.patch_merge3(x)
x = self.mixer4(x)
x = self.mixer4(x)
x = self.mixer44(x)
x = self.mixer44(x)
x = self.patch_expand1(x)
x = self.mixer33(x)
x = self.mixer33(x) + y[2]
x = self.patch_expand2(x)
x = self.mixer22(x)
x = self.mixer22(x) + y[1]
x = self.patch_expand3(x)
x = self.mixer11(x)
x = self.mixer11(x) + y[0]
x = self.final_expand(x)
return x | 6,516 | 28.488688 | 127 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
self.win_size = win_size
self.k1, self.k2 = k1, k2
self.register_buffer("w", torch.ones(1, 1, win_size, win_size) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = NP / (NP - 1)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
| 1,671 | 28.857143 | 87 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/coil_combine.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import fastmri
def rss(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS).
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt((data ** 2).sum(dim))
def rss_complex(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS) for complex inputs.
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt(fastmri.complex_abs_sq(data).sum(dim))
| 996 | 22.186047 | 66 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/evaluate.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import pathlib
from argparse import ArgumentParser
from typing import Optional
import h5py
import numpy as np
from runstats import Statistics
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
from fastmri.data import transforms
def mse(gt: np.ndarray, pred: np.ndarray) -> np.ndarray:
"""Compute Mean Squared Error (MSE)"""
return np.mean((gt - pred) ** 2)
def nmse(gt: np.ndarray, pred: np.ndarray) -> np.ndarray:
"""Compute Normalized Mean Squared Error (NMSE)"""
return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2
def psnr(
gt: np.ndarray, pred: np.ndarray, maxval: Optional[float] = None
) -> np.ndarray:
"""Compute Peak Signal to Noise Ratio metric (PSNR)"""
if maxval is None:
maxval = gt.max()
return peak_signal_noise_ratio(gt, pred, data_range=maxval)
def ssim(
gt: np.ndarray, pred: np.ndarray, maxval: Optional[float] = None
) -> np.ndarray:
"""Compute Structural Similarity Index Metric (SSIM)"""
if not gt.ndim == 3:
raise ValueError("Unexpected number of dimensions in ground truth.")
if not gt.ndim == pred.ndim:
raise ValueError("Ground truth dimensions does not match pred.")
maxval = gt.max() if maxval is None else maxval
ssim = 0
for slice_num in range(gt.shape[0]):
ssim = ssim + structural_similarity(
gt[slice_num], pred[slice_num], data_range=maxval
)
return ssim / gt.shape[0]
METRIC_FUNCS = dict(
MSE=mse,
NMSE=nmse,
PSNR=psnr,
SSIM=ssim,
)
class Metrics:
"""
Maintains running statistics for a given collection of metrics.
"""
def __init__(self, metric_funcs):
"""
Args:
metric_funcs (dict): A dict where the keys are metric names and the
values are Python functions for evaluating that metric.
"""
self.metrics = {metric: Statistics() for metric in metric_funcs}
def push(self, target, recons):
for metric, func in METRIC_FUNCS.items():
self.metrics[metric].push(func(target, recons))
def means(self):
return {metric: stat.mean() for metric, stat in self.metrics.items()}
def stddevs(self):
return {metric: stat.stddev() for metric, stat in self.metrics.items()}
def __repr__(self):
means = self.means()
stddevs = self.stddevs()
metric_names = sorted(list(means))
return " ".join(
f"{name} = {means[name]:.4g} +/- {2 * stddevs[name]:.4g}"
for name in metric_names
)
def evaluate(args, recons_key):
metrics = Metrics(METRIC_FUNCS)
for tgt_file in args.target_path.iterdir():
with h5py.File(tgt_file, "r") as target, h5py.File(
args.predictions_path / tgt_file.name, "r"
) as recons:
if args.acquisition and args.acquisition != target.attrs["acquisition"]:
continue
if args.acceleration and target.attrs["acceleration"] != args.acceleration:
continue
target = target[recons_key][()]
recons = recons["reconstruction"][()]
target = transforms.center_crop(
target, (target.shape[-1], target.shape[-1])
)
recons = transforms.center_crop(
recons, (target.shape[-1], target.shape[-1])
)
metrics.push(target, recons)
return metrics
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--target-path",
type=pathlib.Path,
required=True,
help="Path to the ground truth data",
)
parser.add_argument(
"--predictions-path",
type=pathlib.Path,
required=True,
help="Path to reconstructions",
)
parser.add_argument(
"--challenge",
choices=["singlecoil", "multicoil"],
required=True,
help="Which challenge",
)
parser.add_argument("--acceleration", type=int, default=None)
parser.add_argument(
"--acquisition",
choices=[
"CORPD_FBK",
"CORPDFS_FBK",
"AXT1",
"AXT1PRE",
"AXT1POST",
"AXT2",
"AXFLAIR",
],
default=None,
help="If set, only volumes of the specified acquisition type are used "
"for evaluation. By default, all volumes are included.",
)
args = parser.parse_args()
recons_key = (
"reconstruction_rss" if args.challenge == "multicoil" else "reconstruction_esc"
)
metrics = evaluate(args, recons_key)
print(metrics)
| 4,903 | 27.847059 | 87 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/math.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def complex_mul(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Complex multiplication.
This multiplies two complex tensors assuming that they are both stored as
real arrays with the last dimension being the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == y.shape[-1] == 2:
raise ValueError("Tensors do not have separate complex dim.")
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x: torch.Tensor) -> torch.Tensor:
"""
Complex conjugate.
This applies the complex conjugate assuming that the input array has the
last dimension as the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def complex_abs(data: torch.Tensor) -> torch.Tensor:
"""
Compute the absolute value of a complex valued input tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data: torch.Tensor) -> torch.Tensor:
"""
Compute the squared absolute value of a complex tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Squared absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1)
def tensor_to_complex_np(data: torch.Tensor) -> np.ndarray:
"""
Converts a complex torch tensor to numpy array.
Args:
data: Input data to be converted to numpy.
Returns:
Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
| 2,728 | 25.754902 | 77 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from pathlib import Path
from typing import Dict
import h5py
import numpy as np
def save_reconstructions(reconstructions: Dict[str, np.ndarray], out_dir: Path):
"""
Save reconstruction images.
This function writes to h5 files that are appropriate for submission to the
leaderboard.
Args:
reconstructions: A dictionary mapping input filenames to corresponding
reconstructions.
out_dir: Path to the output directory where the reconstructions should
be saved.
"""
out_dir.mkdir(exist_ok=True, parents=True)
for fname, recons in reconstructions.items():
with h5py.File(out_dir / fname, "w") as hf:
hf.create_dataset("reconstruction", data=recons)
def convert_fnames_to_v2(path: Path):
"""
Converts filenames to conform to `v2` standard for knee data.
For a file with name file1000.h5 in `path`, this script simply renames it
to file1000_v2.h5. This is for submission to the public knee leaderboards.
Args:
path: Path with files to be renamed.
"""
if not path.exists():
raise ValueError("Path does not exist")
for fname in path.glob("*.h5"):
if not fname.name[-6:] == "_v2.h5":
fname.rename(path / (fname.stem + "_v2.h5"))
| 1,467 | 28.36 | 80 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from packaging import version
from .coil_combine import rss, rss_complex
from .fftc import fftshift, ifftshift, roll
from .losses import SSIMLoss
from .math import (
complex_abs,
complex_abs_sq,
complex_conj,
complex_mul,
tensor_to_complex_np,
)
from .utils import convert_fnames_to_v2, save_reconstructions
if version.parse(torch.__version__) >= version.parse("1.7.0"):
from .fftc import fft2c_new as fft2c
from .fftc import ifft2c_new as ifft2c
else:
from .fftc import fft2c_old as fft2c
from .fftc import ifft2c_old as ifft2c
| 758 | 26.107143 | 63 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/fftc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse("1.7.0"):
import torch.fft # type: ignore
def fft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def fft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
# Helper functions
def roll_one_dim(x: torch.Tensor, shift: int, dim: int) -> torch.Tensor:
"""
Similar to roll but for only one dim.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def roll(
x: torch.Tensor,
shift: List[int],
dim: List[int],
) -> torch.Tensor:
"""
Similar to np.roll but applies to PyTorch Tensors.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
if len(shift) != len(dim):
raise ValueError("len(shift) must match len(dim)")
for (s, d) in zip(shift, dim):
x = roll_one_dim(x, s, d)
return x
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim)
def ifftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to ifftshift.
Returns:
ifftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = (x.shape[dim_num] + 1) // 2
return roll(x, shift, dim)
| 5,535 | 25.236967 | 80 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/volume_sampler.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional, Union
import torch
import torch.distributed as dist
from fastmri.data.mri_data import CombinedSliceDataset, SliceDataset
from torch.utils.data import Sampler
class VolumeSampler(Sampler):
"""
Sampler for volumetric MRI data.
Based on pytorch DistributedSampler, the difference is that all instances
from the same MRI volume need to go to the same node for distributed
training. Dataset example is a list of tuples (fname, instance), where
fname is essentially the volume name (actually a filename).
"""
def __init__(
self,
dataset: Union[CombinedSliceDataset, SliceDataset],
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
):
"""
Args:
dataset: An MRI dataset (e.g., SliceData).
num_replicas: Number of processes participating in distributed
training. By default, :attr:`rank` is retrieved from the
current distributed group.
rank: Rank of the current process within :attr:`num_replicas`. By
default, :attr:`rank` is retrieved from the current distributed
group.
shuffle: If ``True`` (default), sampler will shuffle the indices.
seed: random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across
all processes in the distributed group.
"""
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.shuffle = shuffle
self.seed = seed
# get all file names and split them based on number of processes
self.all_volume_names = sorted(
set(str(example[0]) for example in self.dataset.examples)
)
self.all_volumes_split: List[List[str]] = []
for rank_num in range(self.num_replicas):
self.all_volumes_split.append(
[
self.all_volume_names[i]
for i in range(
rank_num, len(self.all_volume_names), self.num_replicas
)
]
)
# get slice indices for each file name
rank_indices: List[List[int]] = [[] for _ in range(self.num_replicas)]
for i, example in enumerate(self.dataset.examples):
vname = str(example[0])
for rank_num in range(self.num_replicas):
if vname in self.all_volumes_split[rank_num]:
rank_indices[rank_num].append(i)
break
# need to send equal number of samples to each process - take the max
self.num_samples = max(len(indices) for indices in rank_indices)
self.total_size = self.num_samples * self.num_replicas
self.indices = rank_indices[self.rank]
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
ordering = torch.randperm(len(self.indices), generator=g).tolist()
indices = [self.indices[i] for i in ordering]
else:
indices = self.indices
# add extra samples to match num_samples
repeat_times = self.num_samples // len(indices)
indices = indices * repeat_times
indices = indices + indices[: self.num_samples - len(indices)]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 4,332 | 36.678261 | 82 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/mri_data.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import pickle
import random
import xml.etree.ElementTree as etree
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
from warnings import warn
import h5py
import numpy as np
import torch
import yaml
def et_query(
root: etree.Element,
qlist: Sequence[str],
namespace: str = "http://www.ismrm.org/ISMRMRD",
) -> str:
"""
ElementTree query function.
This can be used to query an xml document via ElementTree. It uses qlist
for nested queries.
Args:
root: Root of the xml to search through.
qlist: A list of strings for nested searches, e.g. ["Encoding",
"matrixSize"]
namespace: Optional; xml namespace to prepend query.
Returns:
The retrieved data as a string.
"""
s = "."
prefix = "ismrmrd_namespace"
ns = {prefix: namespace}
for el in qlist:
s = s + f"//{prefix}:{el}"
value = root.find(s, ns)
if value is None:
raise RuntimeError("Element not found")
return str(value.text)
def fetch_dir(
key: str, data_config_file: Union[str, Path, os.PathLike] = "fastmri_dirs.yaml"
) -> Path:
"""
Data directory fetcher.
This is a brute-force simple way to configure data directories for a
project. Simply overwrite the variables for `knee_path` and `brain_path`
and this function will retrieve the requested subsplit of the data for use.
Args:
key: key to retrieve path from data_config_file. Expected to be in
("knee_path", "brain_path", "log_path").
data_config_file: Optional; Default path config file to fetch path
from.
Returns:
The path to the specified directory.
"""
data_config_file = Path(data_config_file)
if not data_config_file.is_file():
default_config = {
"knee_path": "/path/to/knee",
"brain_path": "/path/to/brain",
"log_path": ".",
}
with open(data_config_file, "w") as f:
yaml.dump(default_config, f)
data_dir = default_config[key]
warn(
f"Path config at {data_config_file.resolve()} does not exist. "
"A template has been created for you. "
"Please enter the directory paths for your system to have defaults."
)
else:
with open(data_config_file, "r") as f:
data_dir = yaml.safe_load(f)[key]
return Path(data_dir)
class CombinedSliceDataset(torch.utils.data.Dataset):
"""
A container for combining slice datasets.
"""
def __init__(
self,
roots: Sequence[Path],
challenges: Sequence[str],
transforms: Optional[Sequence[Optional[Callable]]] = None,
sample_rates: Optional[Sequence[Optional[float]]] = None,
volume_sample_rates: Optional[Sequence[Optional[float]]] = None,
use_dataset_cache: bool = False,
dataset_cache_file: Union[str, Path, os.PathLike] = "dataset_cache.pkl",
num_cols: Optional[Tuple[int]] = None,
):
"""
Args:
roots: Paths to the datasets.
challenges: "singlecoil" or "multicoil" depending on which
challenge to use.
transforms: Optional; A sequence of callable objects that
preprocesses the raw data into appropriate form. The transform
function should take 'kspace', 'target', 'attributes',
'filename', and 'slice' as inputs. 'target' may be null for
test data.
sample_rates: Optional; A sequence of floats between 0 and 1.
This controls what fraction of the slices should be loaded.
When creating subsampled datasets either set sample_rates
(sample by slices) or volume_sample_rates (sample by volumes)
but not both.
volume_sample_rates: Optional; A sequence of floats between 0 and 1.
This controls what fraction of the volumes should be loaded.
When creating subsampled datasets either set sample_rates
(sample by slices) or volume_sample_rates (sample by volumes)
but not both.
use_dataset_cache: Whether to cache dataset metadata. This is very
useful for large datasets like the brain data.
dataset_cache_file: Optional; A file in which to cache dataset
information for faster load times.
num_cols: Optional; If provided, only slices with the desired
number of columns will be considered.
"""
if sample_rates is not None and volume_sample_rates is not None:
raise ValueError(
"either set sample_rates (sample by slices) or volume_sample_rates (sample by volumes) but not both"
)
if transforms is None:
transforms = [None] * len(roots)
if sample_rates is None:
sample_rates = [None] * len(roots)
if volume_sample_rates is None:
volume_sample_rates = [None] * len(roots)
if not (
len(roots)
== len(transforms)
== len(challenges)
== len(sample_rates)
== len(volume_sample_rates)
):
raise ValueError(
"Lengths of roots, transforms, challenges, sample_rates do not match"
)
self.datasets = []
self.examples: List[Tuple[Path, int, Dict[str, object]]] = []
for i in range(len(roots)):
self.datasets.append(
SliceDataset(
root=roots[i],
transform=transforms[i],
challenge=challenges[i],
sample_rate=sample_rates[i],
volume_sample_rate=volume_sample_rates[i],
use_dataset_cache=use_dataset_cache,
dataset_cache_file=dataset_cache_file,
num_cols=num_cols,
)
)
self.examples = self.examples + self.datasets[-1].examples
def __len__(self):
return sum(len(dataset) for dataset in self.datasets)
def __getitem__(self, i):
for dataset in self.datasets:
if i < len(dataset):
return dataset[i]
else:
i = i - len(dataset)
class SliceDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(
self,
root: Union[str, Path, os.PathLike],
challenge: str,
transform: Optional[Callable] = None,
use_dataset_cache: bool = False,
sample_rate: Optional[float] = None,
volume_sample_rate: Optional[float] = None,
dataset_cache_file: Union[str, Path, os.PathLike] = "dataset_cache.pkl",
num_cols: Optional[Tuple[int]] = None,
):
"""
Args:
root: Path to the dataset.
challenge: "singlecoil" or "multicoil" depending on which challenge
to use.
transform: Optional; A callable object that pre-processes the raw
data into appropriate form. The transform function should take
'kspace', 'target', 'attributes', 'filename', and 'slice' as
inputs. 'target' may be null for test data.
use_dataset_cache: Whether to cache dataset metadata. This is very
useful for large datasets like the brain data.
sample_rate: Optional; A float between 0 and 1. This controls what fraction
of the slices should be loaded. Defaults to 1 if no value is given.
When creating a sampled dataset either set sample_rate (sample by slices)
or volume_sample_rate (sample by volumes) but not both.
volume_sample_rate: Optional; A float between 0 and 1. This controls what fraction
of the volumes should be loaded. Defaults to 1 if no value is given.
When creating a sampled dataset either set sample_rate (sample by slices)
or volume_sample_rate (sample by volumes) but not both.
dataset_cache_file: Optional; A file in which to cache dataset
information for faster load times.
num_cols: Optional; If provided, only slices with the desired
number of columns will be considered.
"""
if challenge not in ("singlecoil", "multicoil"):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
if sample_rate is not None and volume_sample_rate is not None:
raise ValueError(
"either set sample_rate (sample by slices) or volume_sample_rate (sample by volumes) but not both"
)
self.dataset_cache_file = Path(dataset_cache_file)
self.transform = transform
self.recons_key = (
"reconstruction_esc" if challenge == "singlecoil" else "reconstruction_rss"
)
self.examples = []
# set default sampling mode if none given
if sample_rate is None:
sample_rate = 1.0
if volume_sample_rate is None:
volume_sample_rate = 1.0
# load dataset cache if we have and user wants to use it
if self.dataset_cache_file.exists() and use_dataset_cache:
with open(self.dataset_cache_file, "rb") as f:
dataset_cache = pickle.load(f)
else:
dataset_cache = {}
# check if our dataset is in the cache
# if there, use that metadata, if not, then regenerate the metadata
if dataset_cache.get(root) is None or not use_dataset_cache:
files = list(Path(root).iterdir())
for fname in sorted(files):
metadata, num_slices = self._retrieve_metadata(fname)
self.examples += [
(fname, slice_ind, metadata) for slice_ind in range(num_slices)
]
if dataset_cache.get(root) is None and use_dataset_cache:
dataset_cache[root] = self.examples
logging.info(f"Saving dataset cache to {self.dataset_cache_file}.")
with open(self.dataset_cache_file, "wb") as f:
pickle.dump(dataset_cache, f)
else:
logging.info(f"Using dataset cache from {self.dataset_cache_file}.")
self.examples = dataset_cache[root]
# subsample if desired
if sample_rate < 1.0: # sample by slice
random.shuffle(self.examples)
num_examples = round(len(self.examples) * sample_rate)
self.examples = self.examples[:num_examples]
elif volume_sample_rate < 1.0: # sample by volume
vol_names = sorted(list(set([f[0].stem for f in self.examples])))
random.shuffle(vol_names)
num_volumes = round(len(vol_names) * volume_sample_rate)
sampled_vols = vol_names[:num_volumes]
self.examples = [
example for example in self.examples if example[0].stem in sampled_vols
]
if num_cols:
self.examples = [
ex
for ex in self.examples
if ex[2]["encoding_size"][1] in num_cols # type: ignore
]
def _retrieve_metadata(self, fname):
with h5py.File(fname, "r") as hf:
et_root = etree.fromstring(hf["ismrmrd_header"][()])
enc = ["encoding", "encodedSpace", "matrixSize"]
enc_size = (
int(et_query(et_root, enc + ["x"])),
int(et_query(et_root, enc + ["y"])),
int(et_query(et_root, enc + ["z"])),
)
rec = ["encoding", "reconSpace", "matrixSize"]
recon_size = (
int(et_query(et_root, rec + ["x"])),
int(et_query(et_root, rec + ["y"])),
int(et_query(et_root, rec + ["z"])),
)
lims = ["encoding", "encodingLimits", "kspace_encoding_step_1"]
enc_limits_center = int(et_query(et_root, lims + ["center"]))
enc_limits_max = int(et_query(et_root, lims + ["maximum"])) + 1
padding_left = enc_size[1] // 2 - enc_limits_center
padding_right = padding_left + enc_limits_max
num_slices = hf["kspace"].shape[0]
metadata = {
"padding_left": padding_left,
"padding_right": padding_right,
"encoding_size": enc_size,
"recon_size": recon_size,
}
return metadata, num_slices
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
fname, dataslice, metadata = self.examples[i]
with h5py.File(fname, "r") as hf:
kspace = hf["kspace"][dataslice]
mask = np.asarray(hf["mask"]) if "mask" in hf else None
target = hf[self.recons_key][dataslice] if self.recons_key in hf else None
attrs = dict(hf.attrs)
attrs.update(metadata)
if self.transform is None:
sample = (kspace, mask, target, attrs, fname.name, dataslice)
else:
sample = self.transform(kspace, mask, target, attrs, fname.name, dataslice)
return sample
| 13,630 | 36.759003 | 116 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
@contextlib.contextmanager
def temp_seed(rng: np.random, seed: Optional[Union[int, Tuple[int, ...]]]):
if seed is None:
try:
yield
finally:
pass
else:
state = rng.get_state()
rng.seed(seed)
try:
yield
finally:
rng.set_state(state)
class MaskFunc:
"""
An object for GRAPPA-style sampling masks.
This crates a sampling mask that densely samples the center while
subsampling outer k-space regions based on the undersampling factor.
"""
def __init__(self, center_fractions: Sequence[float], accelerations: Sequence[int]):
"""
Args:
center_fractions: Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is
chosen uniformly each time.
accelerations: Amount of under-sampling. This should have the same
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
"""
if not len(center_fractions) == len(accelerations):
raise ValueError(
"Number of center fractions should match number of accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState() # pylint: disable=no-member
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
raise NotImplementedError
def choose_acceleration(self):
"""Choose acceleration based on class parameters."""
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
return center_fraction, acceleration
class RandomMaskFunc(MaskFunc):
"""
RandomMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies.
2. The other columns are selected uniformly at random with a
probability equal to: prob = (N / acceleration - N_low_freqs) /
(N - N_low_freqs). This ensures that the expected number of columns
selected is equal to (N / acceleration).
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the RandomMaskFunc object is called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04],
then there is a 50% probability that 4-fold acceleration with 8% center
fraction is selected and a 50% probability that 8-fold acceleration with 4%
center fraction is selected.
"""
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
"""
Create the mask.
Args:
shape: The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last
dimension.
seed: Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same
shape. The random state is reset afterwards.
Returns:
A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
# create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (
num_cols - num_low_freqs
)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
# reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
class EquispacedMaskFunc(MaskFunc):
"""
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding tovlow-frequencies.
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
Note that this function may not give equispaced samples (documented in
https://github.com/facebookresearch/fastMRI/issues/54), which will require
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
the function has been preserved to match the public multicoil data.
"""
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
"""
Args:
shape: The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last
dimension.
seed: Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same
shape. The random state is reset afterwards.
Returns:
A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
center_fraction, acceleration = self.choose_acceleration()
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (
num_low_freqs * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def create_mask_for_mask_type(
mask_type_str: str,
center_fractions: Sequence[float],
accelerations: Sequence[int],
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
"""
if mask_type_str == "random":
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == "equispaced":
return EquispacedMaskFunc(center_fractions, accelerations)
else:
raise Exception(f"{mask_type_str} not supported")
| 8,448 | 36.887892 | 88 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .mri_data import SliceDataset, CombinedSliceDataset
from .volume_sampler import VolumeSampler
| 278 | 26.9 | 63 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import fastmri
import numpy as np
import torch
from .subsample import MaskFunc
def to_tensor(data: np.ndarray) -> torch.Tensor:
"""
Convert numpy array to PyTorch tensor.
For complex arrays, the real and imaginary parts are stacked along the last
dimension.
Args:
data: Input numpy array.
Returns:
PyTorch version of data.
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def tensor_to_complex_np(data: torch.Tensor) -> np.ndarray:
"""
Converts a complex torch tensor to numpy array.
Args:
data: Input data to be converted to numpy.
Returns:
Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
def apply_mask(
data: torch.Tensor,
mask_func: MaskFunc,
seed: Optional[Union[int, Tuple[int, ...]]] = None,
padding: Optional[Sequence[int]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Subsample given k-space by multiplying with a mask.
Args:
data: The input k-space data. This should have at least 3 dimensions,
where dimensions -3 and -2 are the spatial dimensions, and the
final dimension has size 2 (for complex values).
mask_func: A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed: Seed for the random number generator.
padding: Padding value to apply for mask.
Returns:
tuple containing:
masked data: Subsampled k-space data
mask: The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, : padding[0]] = 0
mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # the + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x: torch.Tensor, mask_from: int, mask_to: int) -> torch.Tensor:
"""
Initializes a mask with the center filled in.
Args:
mask_from: Part of center to start filling.
mask_to: Part of center to end filling.
Returns:
A mask with the center filled.
"""
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input real image or batch of real images.
Args:
data: The input tensor to be center cropped. It should
have at least 2 dimensions and the cropping is applied along the
last two dimensions.
shape: The output shape. The shape should be smaller
than the corresponding dimensions of data.
Returns:
The center cropped image.
"""
if not (0 < shape[0] <= data.shape[-2] and 0 < shape[1] <= data.shape[-1]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input image or batch of complex images.
Args:
data: The complex input tensor to be center cropped. It should have at
least 3 dimensions and the cropping is applied along dimensions -3
and -2 and the last dimensions should have a size of 2.
shape: The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
The center cropped image
"""
if not (0 < shape[0] <= data.shape[-3] and 0 < shape[1] <= data.shape[-2]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(
x: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply a center crop on the larger image to the size of the smaller.
The minimum is taken over dim=-1 and dim=-2. If x is smaller than y at
dim=-1 and y is smaller than x at dim=-2, then the returned dimension will
be a mixture of the two.
Args:
x: The first image.
y: The second image.
Returns:
tuple of tensors x and y, each cropped to the minimim size.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(
data: torch.Tensor,
mean: Union[float, torch.Tensor],
stddev: Union[float, torch.Tensor],
eps: Union[float, torch.Tensor] = 0.0,
) -> torch.Tensor:
"""
Normalize the given tensor.
Applies the formula (data - mean) / (stddev + eps).
Args:
data: Input data to be normalized.
mean: Mean value.
stddev: Standard deviation.
eps: Added to stddev to prevent dividing by zero.
Returns:
Normalized tensor.
"""
return (data - mean) / (stddev + eps)
def normalize_instance(
data: torch.Tensor, eps: Union[float, torch.Tensor] = 0.0
) -> Tuple[torch.Tensor, Union[torch.Tensor], Union[torch.Tensor]]:
"""
Normalize the given tensor with instance norm/
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
are computed from the data itself.
Args:
data: Input data to be normalized
eps: Added to stddev to prevent dividing by zero.
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
class UnetDataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(
self,
which_challenge: str,
mask_func: Optional[MaskFunc] = None,
use_seed: bool = True,
):
"""
Args:
which_challenge: Challenge from ("singlecoil", "multicoil").
mask_func: Optional; A function that can create a mask of
appropriate shape.
use_seed: If true, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
"""
if which_challenge not in ("singlecoil", "multicoil"):
raise ValueError("Challenge should either be 'singlecoil' or 'multicoil'")
self.mask_func = mask_func
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(
self,
kspace: np.ndarray,
mask: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, str, int, float]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data or (rows, cols) for single coil data.
mask: Mask from the test dataset.
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
image: Zero-filled input image.
target: Target image converted to a torch.Tensor.
mean: Mean value used for normalization.
std: Standard deviation value used for normalization.
fname: File name.
slice_num: Serial number of the slice.
"""
kspace = to_tensor(kspace)
# check for max value
max_value = attrs["max"] if "max" in attrs.keys() else 0.0
# apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = apply_mask(kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# inverse Fourier transform to get zero filled solution
image = fastmri.ifft2c(masked_kspace)
# crop input to correct size
if target is not None:
crop_size = (target.shape[-2], target.shape[-1])
else:
crop_size = (attrs["recon_size"][0], attrs["recon_size"][1])
# check for FLAIR 203
if image.shape[-2] < crop_size[1]:
crop_size = (image.shape[-2], image.shape[-2])
image = complex_center_crop(image, crop_size)
# absolute value
image = fastmri.complex_abs(image)
# apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == "multicoil":
image = fastmri.rss(image)
# normalize input
image, mean, std = normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
# normalize target
if target is not None:
target = to_tensor(target)
target = center_crop(target, crop_size)
target = normalize(target, mean, std, eps=1e-11)
target = target.clamp(-6, 6)
else:
target = torch.Tensor([0])
return image, target, mean, std, fname, slice_num, max_value
class VarNetDataTransform:
"""
Data Transformer for training VarNet models.
"""
def __init__(self, mask_func: Optional[MaskFunc] = None, use_seed: bool = True):
"""
Args:
mask_func: Optional; A function that can create a mask of
appropriate shape. Defaults to None.
use_seed: If True, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
"""
self.mask_func = mask_func
self.use_seed = use_seed
def __call__(
self,
kspace: np.ndarray,
mask: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, str, int, float, torch.Tensor]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data.
mask: Mask from the test dataset.
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
masked_kspace: k-space after applying sampling mask.
mask: The applied sampling mask
target: The target image (if applicable).
fname: File name.
slice_num: The slice index.
max_value: Maximum image value.
crop_size: The size to crop the final image.
"""
if target is not None:
target = to_tensor(target)
max_value = attrs["max"]
else:
target = torch.tensor(0)
max_value = 0.0
kspace = to_tensor(kspace)
seed = None if not self.use_seed else tuple(map(ord, fname))
acq_start = attrs["padding_left"]
acq_end = attrs["padding_right"]
crop_size = torch.tensor([attrs["recon_size"][0], attrs["recon_size"][1]])
if self.mask_func:
masked_kspace, mask = apply_mask(
kspace, self.mask_func, seed, (acq_start, acq_end)
)
else:
masked_kspace = kspace
shape = np.array(kspace.shape)
num_cols = shape[-2]
shape[:-3] = 1
mask_shape = [1] * len(shape)
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
mask = mask.reshape(*mask_shape)
mask[:, :, :acq_start] = 0
mask[:, :, acq_end:] = 0
return (
masked_kspace,
mask.byte(),
target,
fname,
slice_num,
max_value,
crop_size,
)
| 12,887 | 30.205811 | 88 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/img2img_mixer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch.nn.init as init
import numpy as np
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 320,
img_channels: int = 1,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 8,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,714 | 26.932331 | 127 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.