repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
mmvae-public | mmvae-public/src/report/analyse_ms.py | """Calculate cross and joint coherence of trained model on MNIST-SVHN dataset.
Train and evaluate a linear model for latent space digit classification."""
import argparse
import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
# relative import hacks (sorry)
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) # for bash user
os.chdir(parentdir) # for pycharm user
import models
from helper import Latent_Classifier, SVHN_Classifier, MNIST_Classifier
from utils import Logger, Timer
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analysing MM-DGM results')
parser.add_argument('--save-dir', type=str, default="",
metavar='N', help='save directory of results')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA use')
cmds = parser.parse_args()
runPath = cmds.save_dir
sys.stdout = Logger('{}/ms_acc.log'.format(runPath))
args = torch.load(runPath + '/args.rar')
# cuda stuff
needs_conversion = cmds.no_cuda and args.cuda
conversion_kwargs = {'map_location': lambda st, loc: st} if needs_conversion else {}
args.cuda = not cmds.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args)
if args.cuda:
model.cuda()
model.load_state_dict(torch.load(runPath + '/model.rar', **conversion_kwargs), strict=False)
B = 256 # rough batch size heuristic
train_loader, test_loader = model.getDataLoaders(B, device=device)
N = len(test_loader.dataset)
def classify_latents(epochs, option):
model.eval()
vae = unpack_model(option)
if '_' not in args.model:
epochs *= 10 # account for the fact the mnist-svhn has more examples (roughly x10)
classifier = Latent_Classifier(args.latent_dim, 10).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=0.001)
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
total_iters = len(train_loader)
print('\n====> Epoch: {:03d} '.format(epoch))
for i, data in enumerate(train_loader):
# get the inputs
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
with torch.no_grad():
qz_x_params = vae.enc(x)
zs = vae.qz_x(*qz_x_params).rsample()
optimizer.zero_grad()
outputs = classifier(zs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i + 1) % 1000 == 0:
print('iteration {:04d}/{:d}: loss: {:6.3f}'.format(i + 1, total_iters, running_loss / 1000))
running_loss = 0.0
print('Finished Training, calculating test loss...')
classifier.eval()
total = 0
correct = 0
with torch.no_grad():
for i, data in enumerate(test_loader):
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
qz_x_params = vae.enc(x)
zs = vae.qz_x(*qz_x_params).rsample()
outputs = classifier(zs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('The classifier correctly classified {} out of {} examples. Accuracy: '
'{:.2f}%'.format(correct, total, correct / total * 100))
def _maybe_train_or_load_digit_classifier_img(path, epochs):
options = [o for o in ['mnist', 'svhn'] if not os.path.exists(path.format(o))]
for option in options:
print("Cannot find trained {} digit classifier in {}, training...".
format(option, path.format(option)))
classifier = globals()['{}_Classifier'.format(option.upper())]().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=0.001)
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
total_iters = len(train_loader)
print('\n====> Epoch: {:03d} '.format(epoch))
for i, data in enumerate(train_loader):
# get the inputs
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
optimizer.zero_grad()
outputs = classifier(x)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i + 1) % 1000 == 0:
print('iteration {:04d}/{:d}: loss: {:6.3f}'.format(i + 1, total_iters, running_loss / 1000))
running_loss = 0.0
print('Finished Training, calculating test loss...')
classifier.eval()
total = 0
correct = 0
with torch.no_grad():
for i, data in enumerate(test_loader):
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
outputs = classifier(x)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('The classifier correctly classified {} out of {} examples. Accuracy: '
'{:.2f}%'.format(correct, total, correct / total * 100))
torch.save(classifier.state_dict(), path.format(option))
mnist_net, svhn_net = MNIST_Classifier().to(device), SVHN_Classifier().to(device)
mnist_net.load_state_dict(torch.load(path.format('mnist')))
svhn_net.load_state_dict(torch.load(path.format('svhn')))
return mnist_net, svhn_net
def cross_coherence(epochs):
model.eval()
mnist_net, svhn_net = _maybe_train_or_load_digit_classifier_img("../data/{}_model.pt", epochs=epochs)
mnist_net.eval()
svhn_net.eval()
total = 0
corr_m = 0
corr_s = 0
with torch.no_grad():
for i, data in enumerate(test_loader):
mnist, svhn, targets = unpack_data_mlp(data, option='both')
mnist, svhn, targets = mnist.to(device), svhn.to(device), targets.to(device)
_, px_zs, _ = model([mnist, svhn], 1)
mnist_mnist = mnist_net(px_zs[1][0].mean.squeeze(0))
svhn_svhn = svhn_net(px_zs[0][1].mean.squeeze(0))
_, pred_m = torch.max(mnist_mnist.data, 1)
_, pred_s = torch.max(svhn_svhn.data, 1)
total += targets.size(0)
corr_m += (pred_m == targets).sum().item()
corr_s += (pred_s == targets).sum().item()
print('Cross coherence: \n SVHN -> MNIST {:.2f}% \n MNIST -> SVHN {:.2f}%'.format(
corr_m / total * 100, corr_s / total * 100))
def joint_coherence():
model.eval()
mnist_net, svhn_net = MNIST_Classifier().to(device), SVHN_Classifier().to(device)
mnist_net.load_state_dict(torch.load('../data/mnist_model.pt'))
svhn_net.load_state_dict(torch.load('../data/svhn_model.pt'))
mnist_net.eval()
svhn_net.eval()
total = 0
corr = 0
with torch.no_grad():
pzs = model.pz(*model.pz_params).sample([10000])
mnist = model.vaes[0].dec(pzs)
svhn = model.vaes[1].dec(pzs)
mnist_mnist = mnist_net(mnist[0].squeeze(1))
svhn_svhn = svhn_net(svhn[0].squeeze(1))
_, pred_m = torch.max(mnist_mnist.data, 1)
_, pred_s = torch.max(svhn_svhn.data, 1)
total += pred_m.size(0)
corr += (pred_m == pred_s).sum().item()
print('Joint coherence: {:.2f}%'.format(corr / total * 100))
def unpack_data_mlp(dataB, option='both'):
if len(dataB[0]) == 2:
if option == 'both':
return dataB[0][0], dataB[1][0], dataB[1][1]
elif option == 'svhn':
return dataB[1][0], dataB[1][1]
elif option == 'mnist':
return dataB[0][0], dataB[0][1]
else:
return dataB
def unpack_model(option='svhn'):
if 'mnist_svhn' in args.model:
return model.vaes[1] if option == 'svhn' else model.vaes[0]
else:
return model
if __name__ == '__main__':
with Timer('MM-VAE analysis') as t:
print('-' * 25 + 'latent classification accuracy' + '-' * 25)
print("Calculating latent classification accuracy for single MNIST VAE...")
classify_latents(epochs=30, option='mnist')
# #
print("\n Calculating latent classification accuracy for single SVHN VAE...")
classify_latents(epochs=30, option='svhn')
#
print('\n' + '-' * 45 + 'cross coherence' + '-' * 45)
cross_coherence(epochs=30)
#
print('\n' + '-' * 45 + 'joint coherence' + '-' * 45)
joint_coherence()
| 9,192 | 36.831276 | 113 | py |
mmvae-public | mmvae-public/src/report/helper.py | import json
import os
import pickle
from collections import Counter, OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import FastText
from nltk.tokenize import sent_tokenize, word_tokenize
from scipy.linalg import eig
from skimage.filters import threshold_yen as threshold
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered."""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def cca(views, k=None, eps=1e-12):
"""Compute (multi-view) CCA
Args:
views (list): list of views where each view `v_i` is of size `N x o_i`
k (int): joint projection dimension | if None, find using Otsu
eps (float): regulariser [default: 1e-12]
Returns:
correlations: correlations along each of the k dimensions
projections: projection matrices for each view
"""
V = len(views) # number of views
N = views[0].size(0) # number of observations (same across views)
os = [v.size(1) for v in views]
kmax = np.min(os)
ocum = np.cumsum([0] + os)
os_sum = sum(os)
A, B = np.zeros([os_sum, os_sum]), np.zeros([os_sum, os_sum])
for i in range(V):
v_i = views[i]
v_i_bar = v_i - v_i.mean(0).expand_as(v_i) # centered, N x o_i
C_ij = (1.0 / (N - 1)) * torch.mm(v_i_bar.t(), v_i_bar)
# A[ocum[i]:ocum[i + 1], ocum[i]:ocum[i + 1]] = C_ij
B[ocum[i]:ocum[i + 1], ocum[i]:ocum[i + 1]] = C_ij
for j in range(i + 1, V):
v_j = views[j] # N x o_j
v_j_bar = v_j - v_j.mean(0).expand_as(v_j) # centered
C_ij = (1.0 / (N - 1)) * torch.mm(v_i_bar.t(), v_j_bar)
A[ocum[i]:ocum[i + 1], ocum[j]:ocum[j + 1]] = C_ij
A[ocum[j]:ocum[j + 1], ocum[i]:ocum[i + 1]] = C_ij.t()
A[np.diag_indices_from(A)] += eps
B[np.diag_indices_from(B)] += eps
eigenvalues, eigenvectors = eig(A, B)
# TODO: sanity check to see that all eigenvalues are e+0i
idx = eigenvalues.argsort()[::-1] # sort descending
eigenvalues = eigenvalues[idx] # arrange in descending order
if k is None:
t = threshold(eigenvalues.real[:kmax])
k = np.abs(np.asarray(eigenvalues.real[0::10]) - t).argmin() * 10 # closest k % 10 == 0 idx
print('k unspecified, (auto-)choosing:', k)
eigenvalues = eigenvalues[idx[:k]]
eigenvectors = eigenvectors[:, idx[:k]]
correlations = torch.from_numpy(eigenvalues.real).type_as(views[0])
proj_matrices = torch.split(torch.from_numpy(eigenvectors.real).type_as(views[0]), os)
return correlations, proj_matrices
def fetch_emb(lenWindow, minOccur, emb_path, vocab_path, RESET):
if not os.path.exists(emb_path) or RESET:
with open('../data/cub/text_trainvalclasses.txt', 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
texts = []
for i, line in enumerate(sentences):
words = word_tokenize(line)
texts.append(words)
model = FastText(size=300, window=lenWindow, min_count=minOccur)
model.build_vocab(sentences=texts)
model.train(sentences=texts, total_examples=len(texts), epochs=10)
with open(vocab_path, 'rb') as file:
vocab = json.load(file)
i2w = vocab['i2w']
base = np.ones((300,), dtype=np.float32)
emb = [base * (i - 1) for i in range(3)]
for word in list(i2w.values())[3:]:
emb.append(model[word])
emb = np.array(emb)
with open(emb_path, 'wb') as file:
pickle.dump(emb, file)
else:
with open(emb_path, 'rb') as file:
emb = pickle.load(file)
return emb
def fetch_weights(weights_path, vocab_path, RESET, a=1e-3):
if not os.path.exists(weights_path) or RESET:
with open('../data/cub/text_trainvalclasses.txt', 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
occ_register = OrderedCounter()
for i, line in enumerate(sentences):
words = word_tokenize(line)
occ_register.update(words)
with open(vocab_path, 'r') as file:
vocab = json.load(file)
w2i = vocab['w2i']
weights = np.zeros(len(w2i))
total_occ = sum(list(occ_register.values()))
exc_occ = 0
for w, occ in occ_register.items():
if w in w2i.keys():
weights[w2i[w]] = a / (a + occ / total_occ)
else:
exc_occ += occ
weights[0] = a / (a + exc_occ / total_occ)
with open(weights_path, 'wb') as file:
pickle.dump(weights, file)
else:
with open(weights_path, 'rb') as file:
weights = pickle.load(file)
return weights
def fetch_pc(emb, weights, train_loader, pc_path, RESET):
sentences = torch.cat([d[1][0] for d in train_loader]).int()
emb_dataset = apply_weights(emb, weights, sentences)
if not os.path.exists(pc_path) or RESET:
_, _, V = torch.svd(emb_dataset - emb_dataset.mean(dim=0), some=True)
v = V[:, 0].unsqueeze(-1)
u = v.mm(v.t())
with open(pc_path, 'wb') as file:
pickle.dump(u, file)
else:
with open(pc_path, 'rb') as file:
u = pickle.load(file)
return u
def apply_weights(emb, weights, data):
fn_trun = lambda s: s[:np.where(s == 2)[0][0] + 1] if 2 in s else s
batch_emb = []
for sent_i in data:
emb_stacked = torch.stack([emb[idx] for idx in fn_trun(sent_i)])
weights_stacked = torch.stack([weights[idx] for idx in fn_trun(sent_i)])
batch_emb.append(torch.sum(emb_stacked * weights_stacked.unsqueeze(-1), dim=0) / emb_stacked.shape[0])
return torch.stack(batch_emb, dim=0)
def apply_pc(weighted_emb, u):
return torch.cat([e - torch.matmul(u, e.unsqueeze(-1)).squeeze() for e in weighted_emb.split(2048, 0)])
class Latent_Classifier(nn.Module):
""" Generate latent parameters for SVHN image data. """
def __init__(self, in_n, out_n):
super(Latent_Classifier, self).__init__()
self.mlp = nn.Linear(in_n, out_n)
def forward(self, x):
return self.mlp(x)
class SVHN_Classifier(nn.Module):
def __init__(self):
super(SVHN_Classifier, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(500, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class MNIST_Classifier(nn.Module):
def __init__(self):
super(MNIST_Classifier, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
| 7,712 | 32.977974 | 110 | py |
mmvae-public | mmvae-public/src/report/calculate_likelihoods.py | """Calculate data marginal likelihood p(x) evaluated on the trained generative model."""
import os
import sys
import argparse
import numpy as np
import torch
from torchvision.utils import save_image
# relative import hacks (sorry)
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) # for bash user
os.chdir(parentdir) # for pycharm user
import models
from utils import Logger, Timer, unpack_data, log_mean_exp
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analysing MM-DGM results')
parser.add_argument('--save-dir', type=str, default="",
metavar='N', help='save directory of results')
parser.add_argument('--iwae-samples', type=int, default=1000, metavar='I',
help='number of samples to estimate marginal log likelihood (default: 1000)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA use')
cmds = parser.parse_args()
runPath = cmds.save_dir
sys.stdout = Logger('{}/llik.log'.format(runPath))
args = torch.load(runPath + '/args.rar')
# cuda stuff
needs_conversion = cmds.no_cuda and args.cuda
conversion_kwargs = {'map_location': lambda st, loc: st} if needs_conversion else {}
args.cuda = not cmds.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args)
if args.cuda:
model.cuda()
model.load_state_dict(torch.load(runPath + '/model.rar', **conversion_kwargs), strict=False)
B = 12000 // cmds.iwae_samples # rough batch size heuristic
train_loader, test_loader = model.getDataLoaders(B, device=device)
N = len(test_loader.dataset)
def m_iwae(qz_xs, px_zs, zss, x):
"""IWAE estimate for log p_\theta(x) for multi-modal vae -- fully vectorised"""
lws = []
for r, qz_x in enumerate(qz_xs):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x.log_prob(zss[r]).sum(-1) for qz_x in qz_xs]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return log_mean_exp(torch.cat(lws)).sum()
def iwae(qz_x, px_z, zs, x):
"""IWAE estimate for log p_\theta(x) -- fully vectorised."""
lpz = model.pz(*model.pz_params).log_prob(zs).sum(-1)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
lqz_x = qz_x.log_prob(zs).sum(-1)
return log_mean_exp(lpz + lpx_z.sum(-1) - lqz_x).sum()
@torch.no_grad()
def joint_elbo(K):
model.eval()
llik = 0
obj = locals()[('m_' if hasattr(model, 'vaes') else '') + 'iwae']()
for dataT in test_loader:
data = unpack_data(dataT, device=device)
llik += obj(model, data, K).item()
print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'
.format(model.modelName, K, llik / N))
def cross_iwaes(qz_xs, px_zs, zss, x):
lws = []
for e, _px_zs in enumerate(px_zs): # rows are encoders
lpz = model.pz(*model.pz_params).log_prob(zss[e]).sum(-1)
lqz_x = qz_xs[e].log_prob(zss[e]).sum(-1)
_lpx_zs = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1).sum(-1)
for d, px_z in enumerate(_px_zs)]
lws.append([log_mean_exp(_lpx_z + lpz - lqz_x).sum() for _lpx_z in _lpx_zs])
return lws
def individual_iwaes(qz_xs, px_zs, zss, x):
lws = []
for d, _px_zs in enumerate(np.array(px_zs).T): # rows are decoders now
lw = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1).sum(-1)
+ model.pz(*model.pz_params).log_prob(zss[e]).sum(-1)
- log_mean_exp(torch.stack([qz_x.log_prob(zss[e]).sum(-1) for qz_x in qz_xs]))
for e, px_z in enumerate(_px_zs)]
lw = torch.cat(lw)
lws.append(log_mean_exp(lw).sum())
return lws
@torch.no_grad()
def m_llik_eval(K):
model.eval()
llik_joint = 0
llik_synergy = np.array([0 for _ in model.vaes])
lliks_cross = np.array([[0 for _ in model.vaes] for _ in model.vaes])
for dataT in test_loader:
data = unpack_data(dataT, device=device)
qz_xs, px_zs, zss = model(data, K)
objs = individual_iwaes(qz_xs, px_zs, zss, data)
objs_cross = cross_iwaes(qz_xs, px_zs, zss, data)
llik_joint += m_iwae(qz_xs, px_zs, zss, data)
llik_synergy = llik_synergy + np.array(objs)
lliks_cross = lliks_cross + np.array(objs_cross)
print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'
.format(model.modelName, K, llik_joint / N))
print('-' * 89)
for i, llik in enumerate(llik_synergy):
print('Marginal Log Likelihood of {} from {} (IWAE, K = {}): {:.4f}'
.format(model.vaes[i].modelName, model.modelName, K, (llik / N).item()))
print('-' * 89)
for e, _lliks_cross in enumerate(lliks_cross):
for d, llik_cross in enumerate(_lliks_cross):
print('Marginal Log Likelihood of {} from {} (IWAE, K = {}): {:.4f}'
.format(model.vaes[d].modelName, model.vaes[e].modelName, K, (llik_cross / N).item()))
print('-' * 89)
@torch.no_grad()
def llik_eval(K):
model.eval()
llik_joint = 0
for dataT in test_loader:
data = unpack_data(dataT, device=device)
qz_xs, px_zs, zss = model(data, K)
llik_joint += iwae(qz_xs, px_zs, zss, data)
print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'
.format(model.modelName, K, llik_joint / N))
@torch.no_grad()
def generate_sparse(D, steps, J):
"""generate `steps` perturbations for all `D` latent dimensions on `J` datapoints. """
model.eval()
for i, dataT in enumerate(test_loader):
data = unpack_data(dataT, require_length=(args.projection == 'Sft'), device=device)
qz_xs, _, zss = model(data, args.K)
for i, (qz_x, zs) in enumerate(zip(qz_xs, zss)):
embs = []
# for delta in torch.linspace(0.01, 0.99, steps=steps):
for delta in torch.linspace(-5, 5, steps=steps):
for d in range(D):
mod_emb = qz_x.mean + torch.zeros_like(qz_x.mean)
mod_emb[:, d] += model.vaes[i].pz(*model.vaes[i].pz_params).stddev[:, d] * delta
embs.append(mod_emb)
embs = torch.stack(embs).transpose(0, 1).contiguous()
for r in range(2):
samples = model.vaes[r].px_z(*model.vaes[r].dec(embs.view(-1, D)[:((J) * steps * D)])).mean
save_image(samples.cpu(), os.path.join(runPath, 'latent-traversals-{}x{}.png'.format(i, r)), nrow=D)
break
if __name__ == '__main__':
with Timer('MM-VAE analysis') as t:
# likelihood evaluation
print('-' * 89)
eval = locals()[('m_' if hasattr(model, 'vaes') else '') + 'llik_eval']
eval(cmds.iwae_samples)
print('-' * 89)
| 7,240 | 38.785714 | 116 | py |
mmvae-public | mmvae-public/src/models/vae_svhn.py | # SVHN model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
from numpy import sqrt
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
dataSize = torch.Size([3, 32, 32])
imgChans = dataSize[0]
fBase = 32 # base size of filter channels
# Classes
class Enc(nn.Module):
""" Generate latent parameters for SVHN image data. """
def __init__(self, latent_dim):
super(Enc, self).__init__()
self.enc = nn.Sequential(
# input size: 3 x 32 x 32
nn.Conv2d(imgChans, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase) x 16 x 16
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 8
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 4 x 4
)
self.c1 = nn.Conv2d(fBase * 4, latent_dim, 4, 1, 0, bias=True)
self.c2 = nn.Conv2d(fBase * 4, latent_dim, 4, 1, 0, bias=True)
# c1, c2 size: latent_dim x 1 x 1
def forward(self, x):
e = self.enc(x)
lv = self.c2(e).squeeze()
return self.c1(e).squeeze(), F.softmax(lv, dim=-1) * lv.size(-1) + Constants.eta
class Dec(nn.Module):
""" Generate a SVHN image given a sample from the latent space. """
def __init__(self, latent_dim):
super(Dec, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(latent_dim, fBase * 4, 4, 1, 0, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 4 x 4
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 8
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase) x 16 x 16
nn.ConvTranspose2d(fBase, imgChans, 4, 2, 1, bias=True),
nn.Sigmoid()
# Output size: 3 x 32 x 32
)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:]))
out = out.view(*z.size()[:-3], *out.size()[1:])
# consider also predicting the length scale
return out, torch.tensor(0.75).to(z.device) # mean, length scale
class SVHN(VAE):
""" Derive a specific sub-class of a VAE for SVHN """
def __init__(self, params):
super(SVHN, self).__init__(
dist.Laplace, # prior
dist.Laplace, # likelihood
dist.Laplace, # posterior
Enc(params.latent_dim),
Dec(params.latent_dim),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'svhn'
self.dataSize = dataSize
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(-1)
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device='cuda'):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == 'cuda' else {}
tx = transforms.ToTensor()
train = DataLoader(datasets.SVHN('../data', split='train', download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test = DataLoader(datasets.SVHN('../data', split='test', download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train, test
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(SVHN, self).generate(N, K).cpu()
# wrangle things so they come out tiled
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1)
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(SVHN, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon]).data.cpu()
save_image(comp, '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(SVHN, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 5,053 | 37 | 101 | py |
mmvae-public | mmvae-public/src/models/mmvae_cub_images_sentences.py | # cub multi-modal model specification
import matplotlib.pyplot as plt
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt, prod
from torch.utils.data import DataLoader
from torchnet.dataset import TensorDataset, ResampleDataset
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .mmvae import MMVAE
from .vae_cub_image import CUB_Image
from .vae_cub_sent import CUB_Sentence
# Constants
maxSentLen = 32
minOccur = 3
# This is required because there are 10 captions per image.
# Allows easier reuse of the same image for the corresponding set of captions.
def resampler(dataset, idx):
return idx // 10
class CUB_Image_Sentence(MMVAE):
def __init__(self, params):
super(CUB_Image_Sentence, self).__init__(dist.Laplace, params, CUB_Image, CUB_Sentence)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.vaes[0].llik_scaling = self.vaes[1].maxSentLen / prod(self.vaes[0].dataSize) \
if params.llik_scaling == 0 else params.llik_scaling
for vae in self.vaes:
vae._pz_params = self._pz_params
self.modelName = 'cubIS'
self.i2w = self.vaes[1].load_vocab()
@property
def pz_params(self):
return self._pz_params[0], \
F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(1) + Constants.eta
def getDataLoaders(self, batch_size, shuffle=True, device='cuda'):
# load base datasets
t1, s1 = self.vaes[0].getDataLoaders(batch_size, shuffle, device)
t2, s2 = self.vaes[1].getDataLoaders(batch_size, shuffle, device)
kwargs = {'num_workers': 2, 'pin_memory': True} if device == 'cuda' else {}
train_loader = DataLoader(TensorDataset([
ResampleDataset(t1.dataset, resampler, size=len(t1.dataset) * 10),
t2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(TensorDataset([
ResampleDataset(s1.dataset, resampler, size=len(s1.dataset) * 10),
s2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N = 8
samples = super(CUB_Image_Sentence, self).generate(N)
images, captions = [sample.data.cpu() for sample in samples]
captions = self._sent_preprocess(captions)
fig = plt.figure(figsize=(8, 6))
for i, (image, caption) in enumerate(zip(images, captions)):
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/gen_samples_{:03d}.png'.format(runPath, epoch))
plt.close()
def reconstruct(self, raw_data, runPath, epoch):
N = 8
recons_mat = super(CUB_Image_Sentence, self).reconstruct([d[:N] for d in raw_data])
fns = [lambda images: images.data.cpu(), lambda sentences: self._sent_preprocess(sentences)]
for r, recons_list in enumerate(recons_mat):
for o, recon in enumerate(recons_list):
data = fns[r](raw_data[r][:N])
recon = fns[o](recon.squeeze())
if r != o:
fig = plt.figure(figsize=(8, 6))
for i, (_data, _recon) in enumerate(zip(data, recon)):
image, caption = (_data, _recon) if r == 0 else (_recon, _data)
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
plt.close()
else:
if r == 0:
comp = torch.cat([data, recon])
save_image(comp, '{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
else:
with open('{}/recon_{}x{}_{:03d}.txt'.format(runPath, r, o, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(self.i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(self.i2w[str(i)] for i in r_sent)))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image_Sentence, self).analyse(data, K=10)
labels = ['Prior', *[vae.modelName.lower() for vae in self.vaes]]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
def _sent_preprocess(self, sentences):
"""make sure raw data is always passed as dim=2 to avoid argmax.
last dimension must always be word embedding."""
if len(sentences.shape) > 2:
sentences = sentences.argmax(-1).squeeze()
return [self.vaes[1].fn_trun(s) for s in self.vaes[1].fn_2i(sentences)]
def _imshow(self, image, caption, i, fig, N):
"""Imshow for Tensor."""
ax = fig.add_subplot(N // 2, 4, i * 2 + 1)
ax.axis('off')
image = image.numpy().transpose((1, 2, 0)) #
plt.imshow(image)
ax = fig.add_subplot(N // 2, 4, i * 2 + 2)
pos = ax.get_position()
ax.axis('off')
plt.text(
x=0.5 * (pos.x0 + pos.x1),
y=0.5 * (pos.y0 + pos.y1),
ha='left',
s='{}'.format(
' '.join(self.i2w[str(i)] + '\n' if (n + 1) % 5 == 0
else self.i2w[str(i)] for n, i in enumerate(caption))),
fontsize=6,
verticalalignment='center',
horizontalalignment='center'
)
return fig
| 6,015 | 42.912409 | 119 | py |
mmvae-public | mmvae-public/src/models/vae.py | # Base VAE class definition
import torch
import torch.nn as nn
from utils import get_mean, kl_divergence
from vis import embed_umap, tensors_to_df
class VAE(nn.Module):
def __init__(self, prior_dist, likelihood_dist, post_dist, enc, dec, params):
super(VAE, self).__init__()
self.pz = prior_dist
self.px_z = likelihood_dist
self.qz_x = post_dist
self.enc = enc
self.dec = dec
self.modelName = None
self.params = params
self._pz_params = None # defined in subclass
self._qz_x_params = None # populated in `forward`
self.llik_scaling = 1.0
@property
def pz_params(self):
return self._pz_params
@property
def qz_x_params(self):
if self._qz_x_params is None:
raise NameError("qz_x params not initalised yet!")
return self._qz_x_params
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
# handle merging individual datasets appropriately in sub-class
raise NotImplementedError
def forward(self, x, K=1):
self._qz_x_params = self.enc(x)
qz_x = self.qz_x(*self._qz_x_params)
zs = qz_x.rsample(torch.Size([K]))
px_z = self.px_z(*self.dec(zs))
return qz_x, px_z, zs
def generate(self, N, K):
self.eval()
with torch.no_grad():
pz = self.pz(*self.pz_params)
latents = pz.rsample(torch.Size([N]))
px_z = self.px_z(*self.dec(latents))
data = px_z.sample(torch.Size([K]))
return data.view(-1, *data.size()[3:])
def reconstruct(self, data):
self.eval()
with torch.no_grad():
qz_x = self.qz_x(*self.enc(data))
latents = qz_x.rsample() # no dim expansion
px_z = self.px_z(*self.dec(latents))
recon = get_mean(px_z)
return recon
def analyse(self, data, K):
self.eval()
with torch.no_grad():
qz_x, _, zs = self.forward(data, K=K)
pz = self.pz(*self.pz_params)
zss = [pz.sample(torch.Size([K, data.size(0)])).view(-1, pz.batch_shape[-1]),
zs.view(-1, zs.size(-1))]
zsl = [torch.zeros(zs.size(0)).fill_(i) for i, zs in enumerate(zss)]
kls_df = tensors_to_df(
[kl_divergence(qz_x, pz).cpu().numpy()],
head='KL',
keys=[r'KL$(q(z|x)\,||\,p(z))$'],
ax_names=['Dimensions', r'KL$(q\,||\,p)$']
)
return embed_umap(torch.cat(zss, 0).cpu().numpy()), \
torch.cat(zsl, 0).cpu().numpy(), \
kls_df
| 2,674 | 32.024691 | 89 | py |
mmvae-public | mmvae-public/src/models/vae_cub_image.py | # CUB Image model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt
from torchvision import datasets, transforms
from torchvision.utils import make_grid, save_image
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
imgChans = 3
fBase = 64
# Classes
class Enc(nn.Module):
""" Generate latent parameters for CUB image data. """
def __init__(self, latentDim):
super(Enc, self).__init__()
modules = [
# input size: 3 x 128 x 128
nn.Conv2d(imgChans, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# input size: 1 x 64 x 64
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 32 x 32
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 16 x 16
nn.Conv2d(fBase * 4, fBase * 8, 4, 2, 1, bias=True),
nn.ReLU(True)]
# size: (fBase * 8) x 4 x 4
self.enc = nn.Sequential(*modules)
self.c1 = nn.Conv2d(fBase * 8, latentDim, 4, 1, 0, bias=True)
self.c2 = nn.Conv2d(fBase * 8, latentDim, 4, 1, 0, bias=True)
# c1, c2 size: latentDim x 1 x 1
def forward(self, x):
e = self.enc(x)
return self.c1(e).squeeze(), F.softplus(self.c2(e)).squeeze() + Constants.eta
class Dec(nn.Module):
""" Generate an image given a sample from the latent space. """
def __init__(self, latentDim):
super(Dec, self).__init__()
modules = [nn.ConvTranspose2d(latentDim, fBase * 8, 4, 1, 0, bias=True),
nn.ReLU(True), ]
modules.extend([
nn.ConvTranspose2d(fBase * 8, fBase * 4, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 16 x 16
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 32 x 32
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase) x 64 x 64
nn.ConvTranspose2d(fBase, imgChans, 4, 2, 1, bias=True),
nn.Sigmoid()
# Output size: 3 x 128 x 128
])
self.dec = nn.Sequential(*modules)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:]))
out = out.view(*z.size()[:-3], *out.size()[1:])
return out, torch.tensor(0.01).to(z.device)
class CUB_Image(VAE):
""" Derive a specific sub-class of a VAE for a CNN sentence model. """
def __init__(self, params):
super(CUB_Image, self).__init__(
dist.Laplace, # prior
dist.Laplace, # likelihood
dist.Laplace, # posterior
Enc(params.latent_dim),
Dec(params.latent_dim),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubI'
self.dataSize = torch.Size([3, 64, 64])
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], F.softplus(self._pz_params[1]) + Constants.eta
# remember that when combining with captions, this should be x10
def getDataLoaders(self, batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = transforms.Compose([transforms.Resize([64, 64]), transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder('../data/cub/train', transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder('../data/cub/test', transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(CUB_Image, self).generate(N, K).data.cpu()
# wrangle things so they come out tiled
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1)
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples.data.cpu()]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Image, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon])
save_image(comp.data.cpu(), '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 5,350 | 37.221429 | 91 | py |
mmvae-public | mmvae-public/src/models/vae_mnist.py | # MNIST model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
from numpy import prod, sqrt
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
dataSize = torch.Size([1, 28, 28])
data_dim = int(prod(dataSize))
hidden_dim = 400
def extra_hidden_layer():
return nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(True))
# Classes
class Enc(nn.Module):
""" Generate latent parameters for MNIST image data. """
def __init__(self, latent_dim, num_hidden_layers=1):
super(Enc, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Linear(data_dim, hidden_dim), nn.ReLU(True)))
modules.extend([extra_hidden_layer() for _ in range(num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, latent_dim)
self.fc22 = nn.Linear(hidden_dim, latent_dim)
def forward(self, x):
e = self.enc(x.view(*x.size()[:-3], -1)) # flatten data
lv = self.fc22(e)
return self.fc21(e), F.softmax(lv, dim=-1) * lv.size(-1) + Constants.eta
class Dec(nn.Module):
""" Generate an MNIST image given a sample from the latent space. """
def __init__(self, latent_dim, num_hidden_layers=1):
super(Dec, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Linear(latent_dim, hidden_dim), nn.ReLU(True)))
modules.extend([extra_hidden_layer() for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(hidden_dim, data_dim)
def forward(self, z):
p = self.fc3(self.dec(z))
d = torch.sigmoid(p.view(*z.size()[:-1], *dataSize)) # reshape data
d = d.clamp(Constants.eta, 1 - Constants.eta)
return d, torch.tensor(0.75).to(z.device) # mean, length scale
class MNIST(VAE):
""" Derive a specific sub-class of a VAE for MNIST. """
def __init__(self, params):
super(MNIST, self).__init__(
dist.Laplace, # prior
dist.Laplace, # likelihood
dist.Laplace, # posterior
Enc(params.latent_dim, params.num_hidden_layers),
Dec(params.latent_dim, params.num_hidden_layers),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'mnist'
self.dataSize = dataSize
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(-1)
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = transforms.ToTensor()
train = DataLoader(datasets.MNIST('../data', train=True, download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test = DataLoader(datasets.MNIST('../data', train=False, download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train, test
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(MNIST, self).generate(N, K).cpu()
# wrangle things so they come out tiled
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1) # N x K x 1 x 28 x 28
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(MNIST, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon]).data.cpu()
save_image(comp, '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(MNIST, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 4,623 | 37.857143 | 101 | py |
mmvae-public | mmvae-public/src/models/vae_cub_sent_ft.py | # Sentence model specification - CUB image feature version
import json
import os
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.utils.data import DataLoader
from datasets import CUBSentences
from utils import Constants, FakeCategorical
from .vae import VAE
maxSentLen = 32 # max length of any description for birds dataset
minOccur = 3
embeddingDim = 128
lenWindow = 3
fBase = 32
vocabSize = 1590
vocab_path = '../data/cub/oc:{}_sl:{}_s:{}_w:{}/cub.vocab'.format(minOccur, maxSentLen, 300, lenWindow)
# Classes
class Enc(nn.Module):
""" Generate latent parameters for sentence data. """
def __init__(self, latentDim):
super(Enc, self).__init__()
self.embedding = nn.Embedding(vocabSize, embeddingDim, padding_idx=0)
self.enc = nn.Sequential(
# input size: 1 x 32 x 128
nn.Conv2d(1, fBase, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 16 x 64
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 32
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# # size: (fBase * 4) x 4 x 16
nn.Conv2d(fBase * 4, fBase * 8, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 8),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.Conv2d(fBase * 8, fBase * 16, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 16),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
)
self.c1 = nn.Conv2d(fBase * 16, latentDim, 4, 1, 0, bias=True)
self.c2 = nn.Conv2d(fBase * 16, latentDim, 4, 1, 0, bias=True)
# c1, c2 size: latentDim x 1 x 1
def forward(self, x):
e = self.enc(self.embedding(x.long()).unsqueeze(1))
mu, logvar = self.c1(e).squeeze(), self.c2(e).squeeze()
return mu, F.softplus(logvar) + Constants.eta
class Dec(nn.Module):
""" Generate a sentence given a sample from the latent space. """
def __init__(self, latentDim):
super(Dec, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(latentDim, fBase * 16, 4, 1, 0, bias=True),
nn.BatchNorm2d(fBase * 16),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
nn.ConvTranspose2d(fBase * 16, fBase * 8, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 8),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.ConvTranspose2d(fBase * 8, fBase * 4, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 4) x 8 x 32
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 16 x 64
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 32 x 128
nn.ConvTranspose2d(fBase, 1, 4, 2, 1, bias=True),
nn.ReLU(True)
# Output size: 1 x 64 x 256
)
# inverts the 'embedding' module upto one-hotness
self.toVocabSize = nn.Linear(embeddingDim, vocabSize)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:])).view(-1, embeddingDim)
return self.toVocabSize(out).view(*z.size()[:-3], maxSentLen, vocabSize),
class CUB_Sentence_ft(VAE):
""" Derive a specific sub-class of a VAE for a sentence model. """
def __init__(self, params):
super(CUB_Sentence_ft, self).__init__(
prior_dist=dist.Normal,
likelihood_dist=FakeCategorical,
post_dist=dist.Normal,
enc=Enc(params.latent_dim),
dec=Dec(params.latent_dim),
params=params)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubSft'
self.llik_scaling = 1.
self.tie_modules()
self.fn_2i = lambda t: t.cpu().numpy().astype(int)
self.fn_trun = lambda s: s[:np.where(s == 2)[0][0] + 1] if 2 in s else s
self.vocab_file = vocab_path
self.maxSentLen = maxSentLen
self.vocabSize = vocabSize
def tie_modules(self):
# This looks dumb, but is actually dumber than you might realise.
# A linear(a, b) module has a [b x a] weight matrix, but an embedding(a, b)
# module has a [a x b] weight matrix. So when we want the transpose at
# decoding time, we just use the weight matrix as is.
self.dec.toVocabSize.weight = self.enc.embedding.weight
@property
def pz_params(self):
return self._pz_params[0], F.softplus(self._pz_params[1]) + Constants.eta
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = lambda data: torch.Tensor(data)
t_data = CUBSentences('../data', split='train', transform=tx, max_sequence_length=maxSentLen)
s_data = CUBSentences('../data', split='test', transform=tx, max_sequence_length=maxSentLen)
train_loader = DataLoader(t_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(s_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Sentence_ft, self).reconstruct(data[:8]).argmax(dim=-1).squeeze()
recon, data = self.fn_2i(recon), self.fn_2i(data[:8])
recon, data = [self.fn_trun(r) for r in recon], [self.fn_trun(d) for d in data]
i2w = self.load_vocab()
print("\n Reconstruction examples (excluding <PAD>):")
for r_sent, d_sent in zip(recon[:3], data[:3]):
print('[DATA] ==> {}'.format(' '.join(i2w[str(i)] for i in d_sent)))
print('[RECON] ==> {}\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
with open('{}/recon_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
def generate(self, runPath, epoch):
N, K = 5, 4
i2w = self.load_vocab()
samples = super(CUB_Sentence_ft, self).generate(N, K).argmax(dim=-1).squeeze()
samples = samples.view(K, N, samples.size(-1)).transpose(0, 1) # N x K x 64
samples = [[self.fn_trun(s) for s in ss] for ss in self.fn_2i(samples)]
# samples = [self.fn_trun(s) for s in samples]
print("\n Generated examples (excluding <PAD>):")
for s_sent in samples[0][:3]:
print('[GEN] ==> {}'.format(' '.join(i2w[str(i)] for i in s_sent if i != 0)))
with open('{}/gen_samples_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for s_sents in samples:
for s_sent in s_sents:
txt_file.write('{}\n'.format(' '.join(i2w[str(i)] for i in s_sent)))
txt_file.write('\n')
def analyse(self, data, runPath, epoch):
pass
def load_vocab(self):
# call dataloader function to create vocab file
if not os.path.exists(self.vocab_file):
_, _ = self.getDataLoaders(256)
with open(self.vocab_file, 'r') as vocab_file:
vocab = json.load(vocab_file)
return vocab['i2w']
| 8,185 | 40.135678 | 103 | py |
mmvae-public | mmvae-public/src/models/mmvae.py | # Base MMVAE class definition
from itertools import combinations
import torch
import torch.nn as nn
from utils import get_mean, kl_divergence
from vis import embed_umap, tensors_to_df
class MMVAE(nn.Module):
def __init__(self, prior_dist, params, *vaes):
super(MMVAE, self).__init__()
self.pz = prior_dist
self.vaes = nn.ModuleList([vae(params) for vae in vaes])
self.modelName = None # filled-in per sub-class
self.params = params
self._pz_params = None # defined in subclass
@property
def pz_params(self):
return self._pz_params
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
# handle merging individual datasets appropriately in sub-class
raise NotImplementedError
def forward(self, x, K=1):
qz_xs, zss = [], []
# initialise cross-modal matrix
px_zs = [[None for _ in range(len(self.vaes))] for _ in range(len(self.vaes))]
for m, vae in enumerate(self.vaes):
qz_x, px_z, zs = vae(x[m], K=K)
qz_xs.append(qz_x)
zss.append(zs)
px_zs[m][m] = px_z # fill-in diagonal
for e, zs in enumerate(zss):
for d, vae in enumerate(self.vaes):
if e != d: # fill-in off-diagonal
px_zs[e][d] = vae.px_z(*vae.dec(zs))
return qz_xs, px_zs, zss
def generate(self, N):
self.eval()
with torch.no_grad():
data = []
pz = self.pz(*self.pz_params)
latents = pz.rsample(torch.Size([N]))
for d, vae in enumerate(self.vaes):
px_z = vae.px_z(*vae.dec(latents))
data.append(px_z.mean.view(-1, *px_z.mean.size()[2:]))
return data # list of generations---one for each modality
def reconstruct(self, data):
self.eval()
with torch.no_grad():
_, px_zs, _ = self.forward(data)
# cross-modal matrix of reconstructions
recons = [[get_mean(px_z) for px_z in r] for r in px_zs]
return recons
def analyse(self, data, K):
self.eval()
with torch.no_grad():
qz_xs, _, zss = self.forward(data, K=K)
pz = self.pz(*self.pz_params)
zss = [pz.sample(torch.Size([K, data[0].size(0)])).view(-1, pz.batch_shape[-1]),
*[zs.view(-1, zs.size(-1)) for zs in zss]]
zsl = [torch.zeros(zs.size(0)).fill_(i) for i, zs in enumerate(zss)]
kls_df = tensors_to_df(
[*[kl_divergence(qz_x, pz).cpu().numpy() for qz_x in qz_xs],
*[0.5 * (kl_divergence(p, q) + kl_divergence(q, p)).cpu().numpy()
for p, q in combinations(qz_xs, 2)]],
head='KL',
keys=[*[r'KL$(q(z|x_{})\,||\,p(z))$'.format(i) for i in range(len(qz_xs))],
*[r'J$(q(z|x_{})\,||\,q(z|x_{}))$'.format(i, j)
for i, j in combinations(range(len(qz_xs)), 2)]],
ax_names=['Dimensions', r'KL$(q\,||\,p)$']
)
return embed_umap(torch.cat(zss, 0).cpu().numpy()), \
torch.cat(zsl, 0).cpu().numpy(), \
kls_df
| 3,238 | 37.105882 | 92 | py |
mmvae-public | mmvae-public/src/models/__init__.py | from .mmvae_cub_images_sentences import CUB_Image_Sentence as VAE_cubIS
from .mmvae_cub_images_sentences_ft import CUB_Image_Sentence_ft as VAE_cubISft
from .mmvae_mnist_svhn import MNIST_SVHN as VAE_mnist_svhn
from .vae_cub_image import CUB_Image as VAE_cubI
from .vae_cub_image_ft import CUB_Image_ft as VAE_cubIft
from .vae_cub_sent import CUB_Sentence as VAE_cubS
from .vae_mnist import MNIST as VAE_mnist
from .vae_svhn import SVHN as VAE_svhn
__all__ = [VAE_mnist_svhn, VAE_mnist, VAE_svhn, VAE_cubIS, VAE_cubS,
VAE_cubI, VAE_cubISft, VAE_cubIft]
| 565 | 46.166667 | 79 | py |
mmvae-public | mmvae-public/src/models/vae_cub_sent.py | # Sentence model specification - real CUB image version
import os
import json
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.utils.data import DataLoader
from datasets import CUBSentences
from utils import Constants, FakeCategorical
from .vae import VAE
# Constants
maxSentLen = 32 # max length of any description for birds dataset
minOccur = 3
embeddingDim = 128
lenWindow = 3
fBase = 32
vocabSize = 1590
vocab_path = '../data/cub/oc:{}_sl:{}_s:{}_w:{}/cub.vocab'.format(minOccur, maxSentLen, 300, lenWindow)
# Classes
class Enc(nn.Module):
""" Generate latent parameters for sentence data. """
def __init__(self, latentDim):
super(Enc, self).__init__()
self.embedding = nn.Embedding(vocabSize, embeddingDim, padding_idx=0)
self.enc = nn.Sequential(
# input size: 1 x 32 x 128
nn.Conv2d(1, fBase, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 16 x 64
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 32
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# # size: (fBase * 4) x 4 x 16
nn.Conv2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.Conv2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
)
self.c1 = nn.Conv2d(fBase * 4, latentDim, 4, 1, 0, bias=False)
self.c2 = nn.Conv2d(fBase * 4, latentDim, 4, 1, 0, bias=False)
# c1, c2 size: latentDim x 1 x 1
def forward(self, x):
e = self.enc(self.embedding(x.long()).unsqueeze(1))
mu, logvar = self.c1(e).squeeze(), self.c2(e).squeeze()
return mu, F.softplus(logvar) + Constants.eta
class Dec(nn.Module):
""" Generate a sentence given a sample from the latent space. """
def __init__(self, latentDim):
super(Dec, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(latentDim, fBase * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
nn.ConvTranspose2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.ConvTranspose2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 4) x 8 x 32
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 16 x 64
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 32 x 128
nn.ConvTranspose2d(fBase, 1, 4, 2, 1, bias=False),
nn.ReLU(True)
# Output size: 1 x 64 x 256
)
# inverts the 'embedding' module upto one-hotness
self.toVocabSize = nn.Linear(embeddingDim, vocabSize)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:])).view(-1, embeddingDim)
return self.toVocabSize(out).view(*z.size()[:-3], maxSentLen, vocabSize),
class CUB_Sentence(VAE):
""" Derive a specific sub-class of a VAE for a sentence model. """
def __init__(self, params):
super(CUB_Sentence, self).__init__(
prior_dist=dist.Normal,
likelihood_dist=FakeCategorical,
post_dist=dist.Normal,
enc=Enc(params.latent_dim),
dec=Dec(params.latent_dim),
params=params)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubS'
self.llik_scaling = 1.
self.tie_modules()
self.fn_2i = lambda t: t.cpu().numpy().astype(int)
self.fn_trun = lambda s: s[:np.where(s == 2)[0][0] + 1] if 2 in s else s
self.vocab_file = vocab_path
self.maxSentLen = maxSentLen
self.vocabSize = vocabSize
def tie_modules(self):
# This looks dumb, but is actually dumber than you might realise.
# A linear(a, b) module has a [b x a] weight matrix, but an embedding(a, b)
# module has a [a x b] weight matrix. So when we want the transpose at
# decoding time, we just use the weight matrix as is.
self.dec.toVocabSize.weight = self.enc.embedding.weight
@property
def pz_params(self):
return self._pz_params[0], F.softplus(self._pz_params[1]) + Constants.eta
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = lambda data: torch.Tensor(data)
t_data = CUBSentences('../data', split='train', transform=tx, max_sequence_length=maxSentLen)
s_data = CUBSentences('../data', split='test', transform=tx, max_sequence_length=maxSentLen)
train_loader = DataLoader(t_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(s_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Sentence, self).reconstruct(data[:8]).argmax(dim=-1).squeeze()
recon, data = self.fn_2i(recon), self.fn_2i(data[:8])
recon, data = [self.fn_trun(r) for r in recon], [self.fn_trun(d) for d in data]
i2w = self.load_vocab()
print("\n Reconstruction examples (excluding <PAD>):")
for r_sent, d_sent in zip(recon[:3], data[:3]):
print('[DATA] ==> {}'.format(' '.join(i2w[str(i)] for i in d_sent)))
print('[RECON] ==> {}\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
with open('{}/recon_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
def generate(self, runPath, epoch):
N, K = 5, 4
i2w = self.load_vocab()
samples = super(CUB_Sentence, self).generate(N, K).argmax(dim=-1).squeeze()
samples = samples.view(K, N, samples.size(-1)).transpose(0, 1) # N x K x 64
samples = [[self.fn_trun(s) for s in ss] for ss in self.fn_2i(samples)]
# samples = [self.fn_trun(s) for s in samples]
print("\n Generated examples (excluding <PAD>):")
for s_sent in samples[0][:3]:
print('[GEN] ==> {}'.format(' '.join(i2w[str(i)] for i in s_sent if i != 0)))
with open('{}/gen_samples_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for s_sents in samples:
for s_sent in s_sents:
txt_file.write('{}\n'.format(' '.join(i2w[str(i)] for i in s_sent)))
txt_file.write('\n')
def analyse(self, data, runPath, epoch):
pass
def load_vocab(self):
# call dataloader function to create vocab file
if not os.path.exists(self.vocab_file):
_, _ = self.getDataLoaders(256)
with open(self.vocab_file, 'r') as vocab_file:
vocab = json.load(vocab_file)
return vocab['i2w']
| 8,186 | 39.935 | 103 | py |
mmvae-public | mmvae-public/src/models/mmvae_cub_images_sentences_ft.py | # cub multi-modal model specification
import matplotlib.pyplot as plt
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt, prod
from torch.utils.data import DataLoader
from torchnet.dataset import TensorDataset, ResampleDataset
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .mmvae import MMVAE
from .vae_cub_image_ft import CUB_Image_ft
from .vae_cub_sent_ft import CUB_Sentence_ft
# Constants
maxSentLen = 32
minOccur = 3
# This is required because there are 10 captions per image.
# Allows easier reuse of the same image for the corresponding set of captions.
def resampler(dataset, idx):
return idx // 10
class CUB_Image_Sentence_ft(MMVAE):
def __init__(self, params):
super(CUB_Image_Sentence_ft, self).__init__(dist.Normal, params, CUB_Image_ft, CUB_Sentence_ft)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.vaes[0].llik_scaling = self.vaes[1].maxSentLen / prod(self.vaes[0].dataSize) \
if params.llik_scaling == 0 else params.llik_scaling
for vae in self.vaes:
vae._pz_params = self._pz_params
self.modelName = 'cubISft'
self.i2w = self.vaes[1].load_vocab()
@property
def pz_params(self):
return self._pz_params[0], \
F.softplus(self._pz_params[1]) + Constants.eta
def getDataLoaders(self, batch_size, shuffle=True, device='cuda'):
# load base datasets
t1, s1 = self.vaes[0].getDataLoaders(batch_size, shuffle, device)
t2, s2 = self.vaes[1].getDataLoaders(batch_size, shuffle, device)
kwargs = {'num_workers': 2, 'pin_memory': True} if device == 'cuda' else {}
train_loader = DataLoader(TensorDataset([
ResampleDataset(t1.dataset, resampler, size=len(t1.dataset) * 10),
t2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(TensorDataset([
ResampleDataset(s1.dataset, resampler, size=len(s1.dataset) * 10),
s2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N = 8
samples = super(CUB_Image_Sentence_ft, self).generate(N)
samples[0] = self.vaes[0].unproject(samples[0], search_split='train')
images, captions = [sample.data.cpu() for sample in samples]
captions = self._sent_preprocess(captions)
fig = plt.figure(figsize=(8, 6))
for i, (image, caption) in enumerate(zip(images, captions)):
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/gen_samples_{:03d}.png'.format(runPath, epoch))
plt.close()
def reconstruct(self, raw_data, runPath, epoch):
N = 8
recons_mat = super(CUB_Image_Sentence_ft, self).reconstruct([d[:N] for d in raw_data])
fns = [lambda images: images.data.cpu(), lambda sentences: self._sent_preprocess(sentences)]
for r, recons_list in enumerate(recons_mat):
for o, recon in enumerate(recons_list):
data = fns[r](raw_data[r][:N])
recon = fns[o](recon.squeeze())
if r != o:
fig = plt.figure(figsize=(8, 6))
for i, (_data, _recon) in enumerate(zip(data, recon)):
image, caption = (_data, _recon) if r == 0 else (_recon, _data)
search_split = 'test' if r == 0 else 'train'
image = self.vaes[0].unproject(image.unsqueeze(0), search_split=search_split)
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
plt.close()
else:
if r == 0:
data_ = self.vaes[0].unproject(data, search_split='test')
recon_ = self.vaes[0].unproject(recon, search_split='train')
comp = torch.cat([data_, recon_])
save_image(comp, '{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
else:
with open('{}/recon_{}x{}_{:03d}.txt'.format(runPath, r, o, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(self.i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(self.i2w[str(i)] for i in r_sent)))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image_Sentence_ft, self).analyse(data, K=10)
labels = ['Prior', *[vae.modelName.lower() for vae in self.vaes]]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
def _sent_preprocess(self, sentences):
"""make sure raw data is always passed as dim=2 to avoid argmax.
last dimension must always be word embedding."""
if len(sentences.shape) > 2:
sentences = sentences.argmax(-1).squeeze()
return [self.vaes[1].fn_trun(s) for s in self.vaes[1].fn_2i(sentences)]
def _imshow(self, image, caption, i, fig, N):
"""Imshow for Tensor."""
ax = fig.add_subplot(N // 2, 4, i * 2 + 1)
ax.axis('off')
image = image.numpy().transpose((1, 2, 0)) #
plt.imshow(image)
ax = fig.add_subplot(N // 2, 4, i * 2 + 2)
pos = ax.get_position()
ax.axis('off')
plt.text(
x=0.5 * (pos.x0 + pos.x1),
y=0.5 * (pos.y0 + pos.y1),
ha='left',
s='{}'.format(
' '.join(self.i2w[str(i)] + '\n' if (n + 1) % 5 == 0
else self.i2w[str(i)] for n, i in enumerate(caption))),
fontsize=6,
verticalalignment='center',
horizontalalignment='center'
)
return fig
| 6,432 | 44.302817 | 119 | py |
mmvae-public | mmvae-public/src/models/vae_cub_image_ft.py | # CUB Image feature model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt
from torchvision.utils import make_grid, save_image
from datasets import CUBImageFt
from utils import Constants, NN_lookup
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
imgChans = 3
fBase = 64
class Enc(nn.Module):
""" Generate latent parameters for CUB image feature. """
def __init__(self, latent_dim, n_c):
super(Enc, self).__init__()
dim_hidden = 256
self.enc = nn.Sequential()
for i in range(int(torch.tensor(n_c / dim_hidden).log2())):
self.enc.add_module("layer" + str(i), nn.Sequential(
nn.Linear(n_c // (2 ** i), n_c // (2 ** (i + 1))),
nn.ELU(inplace=True),
))
# relies on above terminating at dim_hidden
self.fc21 = nn.Linear(dim_hidden, latent_dim)
self.fc22 = nn.Linear(dim_hidden, latent_dim)
def forward(self, x):
e = self.enc(x)
return self.fc21(e), F.softplus(self.fc22(e)) + Constants.eta
class Dec(nn.Module):
""" Generate a CUB image feature given a sample from the latent space. """
def __init__(self, latent_dim, n_c):
super(Dec, self).__init__()
self.n_c = n_c
dim_hidden = 256
self.dec = nn.Sequential()
for i in range(int(torch.tensor(n_c / dim_hidden).log2())):
indim = latent_dim if i == 0 else dim_hidden * i
outdim = dim_hidden if i == 0 else dim_hidden * (2 * i)
self.dec.add_module("out_t" if i == 0 else "layer" + str(i) + "_t", nn.Sequential(
nn.Linear(indim, outdim),
nn.ELU(inplace=True),
))
# relies on above terminating at n_c // 2
self.fc31 = nn.Linear(n_c // 2, n_c)
def forward(self, z):
p = self.dec(z.view(-1, z.size(-1)))
mean = self.fc31(p).view(*z.size()[:-1], -1)
return mean, torch.tensor([0.01]).to(mean.device)
class CUB_Image_ft(VAE):
""" Derive a specific sub-class of a VAE for a CNN sentence model. """
def __init__(self, params):
super(CUB_Image_ft, self).__init__(
dist.Normal, # prior
dist.Laplace, # likelihood
dist.Normal, # posterior
Enc(params.latent_dim, 2048),
Dec(params.latent_dim, 2048),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubIft'
self.dataSize = torch.Size([2048])
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], \
F.softplus(self._pz_params[1]) + Constants.eta
# remember that when combining with captions, this should be x10
def getDataLoaders(self, batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
train_dataset = CUBImageFt('../data', 'train', device)
test_dataset = CUBImageFt('../data', 'test', device)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size, shuffle=shuffle, **kwargs)
train_dataset._load_data()
test_dataset._load_data()
self.unproject = lambda emb_h, search_split='train', \
te=train_dataset.ft_mat, td=train_dataset.data_mat, \
se=test_dataset.ft_mat, sd=test_dataset.data_mat: \
NN_lookup(emb_h, te, td) if search_split == 'train' else NN_lookup(emb_h, se, sd)
return train_loader, test_loader
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(CUB_Image_ft, self).generate(N, K).data.cpu()
samples = self.unproject(samples, search_split='train')
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1)
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples.data.cpu()]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Image_ft, self).reconstruct(data[:8])
data_ = self.unproject(data[:8], search_split='test')
recon_ = self.unproject(recon, search_split='train')
comp = torch.cat([data_, recon_])
save_image(comp.data.cpu(), '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image_ft, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 5,311 | 38.348148 | 100 | py |
mmvae-public | mmvae-public/src/models/mmvae_mnist_svhn.py | # MNIST-SVHN multi-modal model specification
import os
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
from numpy import sqrt, prod
from torch.utils.data import DataLoader
from torchnet.dataset import TensorDataset, ResampleDataset
from torchvision.utils import save_image, make_grid
from vis import plot_embeddings, plot_kls_df
from .mmvae import MMVAE
from .vae_mnist import MNIST
from .vae_svhn import SVHN
class MNIST_SVHN(MMVAE):
def __init__(self, params):
super(MNIST_SVHN, self).__init__(dist.Laplace, params, MNIST, SVHN)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.vaes[0].llik_scaling = prod(self.vaes[1].dataSize) / prod(self.vaes[0].dataSize) \
if params.llik_scaling == 0 else params.llik_scaling
self.modelName = 'mnist-svhn'
@property
def pz_params(self):
return self._pz_params[0], F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(-1)
def getDataLoaders(self, batch_size, shuffle=True, device='cuda'):
if not (os.path.exists('../data/train-ms-mnist-idx.pt')
and os.path.exists('../data/train-ms-svhn-idx.pt')
and os.path.exists('../data/test-ms-mnist-idx.pt')
and os.path.exists('../data/test-ms-svhn-idx.pt')):
raise RuntimeError('Generate transformed indices with the script in bin')
# get transformed indices
t_mnist = torch.load('../data/train-ms-mnist-idx.pt')
t_svhn = torch.load('../data/train-ms-svhn-idx.pt')
s_mnist = torch.load('../data/test-ms-mnist-idx.pt')
s_svhn = torch.load('../data/test-ms-svhn-idx.pt')
# load base datasets
t1, s1 = self.vaes[0].getDataLoaders(batch_size, shuffle, device)
t2, s2 = self.vaes[1].getDataLoaders(batch_size, shuffle, device)
train_mnist_svhn = TensorDataset([
ResampleDataset(t1.dataset, lambda d, i: t_mnist[i], size=len(t_mnist)),
ResampleDataset(t2.dataset, lambda d, i: t_svhn[i], size=len(t_svhn))
])
test_mnist_svhn = TensorDataset([
ResampleDataset(s1.dataset, lambda d, i: s_mnist[i], size=len(s_mnist)),
ResampleDataset(s2.dataset, lambda d, i: s_svhn[i], size=len(s_svhn))
])
kwargs = {'num_workers': 2, 'pin_memory': True} if device == 'cuda' else {}
train = DataLoader(train_mnist_svhn, batch_size=batch_size, shuffle=shuffle, **kwargs)
test = DataLoader(test_mnist_svhn, batch_size=batch_size, shuffle=shuffle, **kwargs)
return train, test
def generate(self, runPath, epoch):
N = 64
samples_list = super(MNIST_SVHN, self).generate(N)
for i, samples in enumerate(samples_list):
samples = samples.data.cpu()
# wrangle things so they come out tiled
samples = samples.view(N, *samples.size()[1:])
save_image(samples,
'{}/gen_samples_{}_{:03d}.png'.format(runPath, i, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recons_mat = super(MNIST_SVHN, self).reconstruct([d[:8] for d in data])
for r, recons_list in enumerate(recons_mat):
for o, recon in enumerate(recons_list):
_data = data[r][:8].cpu()
recon = recon.squeeze(0).cpu()
# resize mnist to 32 and colour. 0 => mnist, 1 => svhn
_data = _data if r == 1 else resize_img(_data, self.vaes[1].dataSize)
recon = recon if o == 1 else resize_img(recon, self.vaes[1].dataSize)
comp = torch.cat([_data, recon])
save_image(comp, '{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(MNIST_SVHN, self).analyse(data, K=10)
labels = ['Prior', *[vae.modelName.lower() for vae in self.vaes]]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
def resize_img(img, refsize):
return F.pad(img, (2, 2, 2, 2)).expand(img.size(0), *refsize)
| 4,479 | 45.185567 | 101 | py |
mmvae-public | mmvae-public/bin/make-mnist-svhn-idx.py | import torch
from torchvision import datasets, transforms
def rand_match_on_idx(l1, idx1, l2, idx2, max_d=10000, dm=10):
"""
l*: sorted labels
idx*: indices of sorted labels in original list
"""
_idx1, _idx2 = [], []
for l in l1.unique(): # assuming both have same idxs
l_idx1, l_idx2 = idx1[l1 == l], idx2[l2 == l]
n = min(l_idx1.size(0), l_idx2.size(0), max_d)
l_idx1, l_idx2 = l_idx1[:n], l_idx2[:n]
for _ in range(dm):
_idx1.append(l_idx1[torch.randperm(n)])
_idx2.append(l_idx2[torch.randperm(n)])
return torch.cat(_idx1), torch.cat(_idx2)
if __name__ == '__main__':
max_d = 10000 # maximum number of datapoints per class
dm = 30 # data multiplier: random permutations to match
# get the individual datasets
tx = transforms.ToTensor()
train_mnist = datasets.MNIST('../data', train=True, download=True, transform=tx)
test_mnist = datasets.MNIST('../data', train=False, download=True, transform=tx)
train_svhn = datasets.SVHN('../data', split='train', download=True, transform=tx)
test_svhn = datasets.SVHN('../data', split='test', download=True, transform=tx)
# svhn labels need extra work
train_svhn.labels = torch.LongTensor(train_svhn.labels.squeeze().astype(int)) % 10
test_svhn.labels = torch.LongTensor(test_svhn.labels.squeeze().astype(int)) % 10
mnist_l, mnist_li = train_mnist.targets.sort()
svhn_l, svhn_li = train_svhn.labels.sort()
idx1, idx2 = rand_match_on_idx(mnist_l, mnist_li, svhn_l, svhn_li, max_d=max_d, dm=dm)
print('len train idx:', len(idx1), len(idx2))
torch.save(idx1, '../data/train-ms-mnist-idx.pt')
torch.save(idx2, '../data/train-ms-svhn-idx.pt')
mnist_l, mnist_li = test_mnist.targets.sort()
svhn_l, svhn_li = test_svhn.labels.sort()
idx1, idx2 = rand_match_on_idx(mnist_l, mnist_li, svhn_l, svhn_li, max_d=max_d, dm=dm)
print('len test idx:', len(idx1), len(idx2))
torch.save(idx1, '../data/test-ms-mnist-idx.pt')
torch.save(idx2, '../data/test-ms-svhn-idx.pt')
| 2,087 | 44.391304 | 90 | py |
MinkLoc3D | MinkLoc3D-master/training/train.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import argparse
import torch
from training.trainer import do_train
from misc.utils import MinkLocParams
from datasets.dataset_utils import make_dataloaders
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train Minkowski Net embeddings using BatchHard negative mining')
parser.add_argument('--config', type=str, required=True, help='Path to configuration file')
parser.add_argument('--model_config', type=str, required=True, help='Path to the model-specific configuration file')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(debug=False)
parser.add_argument('--visualize', dest='visualize', action='store_true')
parser.set_defaults(visualize=False)
args = parser.parse_args()
print('Training config path: {}'.format(args.config))
print('Model config path: {}'.format(args.model_config))
print('Debug mode: {}'.format(args.debug))
print('Visualize: {}'.format(args.visualize))
params = MinkLocParams(args.config, args.model_config)
params.print()
if args.debug:
torch.autograd.set_detect_anomaly(True)
dataloaders = make_dataloaders(params, debug=args.debug)
do_train(dataloaders, params, debug=args.debug, visualize=args.visualize)
| 1,341 | 37.342857 | 120 | py |
MinkLoc3D | MinkLoc3D-master/training/trainer.py | # Author: Jacek Komorowski
# Warsaw University of Technology
# Train on Oxford dataset (from PointNetVLAD paper) using BatchHard hard negative mining.
import os
from datetime import datetime
import numpy as np
import torch
import pickle
import tqdm
import pathlib
from torch.utils.tensorboard import SummaryWriter
from eval.evaluate import evaluate, print_eval_stats
from misc.utils import MinkLocParams, get_datetime
from models.loss import make_loss
from models.model_factory import model_factory
def print_stats(stats, phase):
if 'num_pairs' in stats:
# For batch hard contrastive loss
s = '{} - Mean loss: {:.6f} Avg. embedding norm: {:.4f} Pairs per batch (all/non-zero pos/non-zero neg): {:.1f}/{:.1f}/{:.1f}'
print(s.format(phase, stats['loss'], stats['avg_embedding_norm'], stats['num_pairs'],
stats['pos_pairs_above_threshold'], stats['neg_pairs_above_threshold']))
elif 'num_triplets' in stats:
# For triplet loss
s = '{} - Mean loss: {:.6f} Avg. embedding norm: {:.4f} Triplets per batch (all/non-zero): {:.1f}/{:.1f}'
print(s.format(phase, stats['loss'], stats['avg_embedding_norm'], stats['num_triplets'],
stats['num_non_zero_triplets']))
elif 'num_pos' in stats:
s = '{} - Mean loss: {:.6f} Avg. embedding norm: {:.4f} #positives/negatives: {:.1f}/{:.1f}'
print(s.format(phase, stats['loss'], stats['avg_embedding_norm'], stats['num_pos'], stats['num_neg']))
s = ''
l = []
if 'mean_pos_pair_dist' in stats:
s += 'Pos dist (min/mean/max): {:.4f}/{:.4f}/{:.4f} Neg dist (min/mean/max): {:.4f}/{:.4f}/{:.4f}'
l += [stats['min_pos_pair_dist'], stats['mean_pos_pair_dist'], stats['max_pos_pair_dist'],
stats['min_neg_pair_dist'], stats['mean_neg_pair_dist'], stats['max_neg_pair_dist']]
if 'pos_loss' in stats:
if len(s) > 0:
s += ' '
s += 'Pos loss: {:.4f} Neg loss: {:.4f}'
l += [stats['pos_loss'], stats['neg_loss']]
if len(l) > 0:
print(s.format(*l))
def tensors_to_numbers(stats):
stats = {e: stats[e].item() if torch.is_tensor(stats[e]) else stats[e] for e in stats}
return stats
def do_train(dataloaders, params: MinkLocParams, debug=False, visualize=False):
# Create model class
s = get_datetime()
model = model_factory(params)
model_name = 'model_' + params.model_params.model + '_' + s
print('Model name: {}'.format(model_name))
weights_path = create_weights_folder()
model_pathname = os.path.join(weights_path, model_name)
if hasattr(model, 'print_info'):
model.print_info()
else:
n_params = sum([param.nelement() for param in model.parameters()])
print('Number of model parameters: {}'.format(n_params))
# Move the model to the proper device before configuring the optimizer
if torch.cuda.is_available():
device = "cuda"
model.to(device)
else:
device = "cpu"
print('Model device: {}'.format(device))
loss_fn = make_loss(params)
# Training elements
if params.weight_decay is None or params.weight_decay == 0:
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr, weight_decay=params.weight_decay)
if params.scheduler is None:
scheduler = None
else:
if params.scheduler == 'CosineAnnealingLR':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=params.epochs+1,
eta_min=params.min_lr)
elif params.scheduler == 'MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, params.scheduler_milestones, gamma=0.1)
else:
raise NotImplementedError('Unsupported LR scheduler: {}'.format(params.scheduler))
###########################################################################
# Initialize TensorBoard writer
###########################################################################
now = datetime.now()
logdir = os.path.join("../tf_logs", now.strftime("%Y%m%d-%H%M%S"))
writer = SummaryWriter(logdir)
###########################################################################
#
###########################################################################
is_validation_set = 'val' in dataloaders
if is_validation_set:
phases = ['train', 'val']
else:
phases = ['train']
# Training statistics
stats = {'train': [], 'val': [], 'eval': []}
for epoch in tqdm.tqdm(range(1, params.epochs + 1)):
for phase in phases:
if phase == 'train':
model.train()
else:
model.eval()
running_stats = [] # running stats for the current epoch
count_batches = 0
for batch, positives_mask, negatives_mask in dataloaders[phase]:
# batch is (batch_size, n_points, 3) tensor
# labels is list with indexes of elements forming a batch
count_batches += 1
batch_stats = {}
if debug and count_batches > 2:
break
batch = {e: batch[e].to(device) for e in batch}
n_positives = torch.sum(positives_mask).item()
n_negatives = torch.sum(negatives_mask).item()
if n_positives == 0 or n_negatives == 0:
# Skip a batch without positives or negatives
print('WARNING: Skipping batch without positive or negative examples')
continue
optimizer.zero_grad()
if visualize:
#visualize_batch(batch)
pass
with torch.set_grad_enabled(phase == 'train'):
# Compute embeddings of all elements
embeddings = model(batch)
loss, temp_stats, _ = loss_fn(embeddings, positives_mask, negatives_mask)
temp_stats = tensors_to_numbers(temp_stats)
batch_stats.update(temp_stats)
batch_stats['loss'] = loss.item()
if phase == 'train':
loss.backward()
optimizer.step()
running_stats.append(batch_stats)
torch.cuda.empty_cache() # Prevent excessive GPU memory consumption by SparseTensors
# ******* PHASE END *******
# Compute mean stats for the epoch
epoch_stats = {}
for key in running_stats[0].keys():
temp = [e[key] for e in running_stats]
epoch_stats[key] = np.mean(temp)
stats[phase].append(epoch_stats)
print_stats(epoch_stats, phase)
# ******* EPOCH END *******
if scheduler is not None:
scheduler.step()
loss_metrics = {'train': stats['train'][-1]['loss']}
if 'val' in phases:
loss_metrics['val'] = stats['val'][-1]['loss']
writer.add_scalars('Loss', loss_metrics, epoch)
if 'num_triplets' in stats['train'][-1]:
nz_metrics = {'train': stats['train'][-1]['num_non_zero_triplets']}
if 'val' in phases:
nz_metrics['val'] = stats['val'][-1]['num_non_zero_triplets']
writer.add_scalars('Non-zero triplets', nz_metrics, epoch)
elif 'num_pairs' in stats['train'][-1]:
nz_metrics = {'train_pos': stats['train'][-1]['pos_pairs_above_threshold'],
'train_neg': stats['train'][-1]['neg_pairs_above_threshold']}
if 'val' in phases:
nz_metrics['val_pos'] = stats['val'][-1]['pos_pairs_above_threshold']
nz_metrics['val_neg'] = stats['val'][-1]['neg_pairs_above_threshold']
writer.add_scalars('Non-zero pairs', nz_metrics, epoch)
if params.batch_expansion_th is not None:
# Dynamic batch expansion
epoch_train_stats = stats['train'][-1]
if 'num_non_zero_triplets' not in epoch_train_stats:
print('WARNING: Batch size expansion is enabled, but the loss function is not supported')
else:
# Ratio of non-zero triplets
rnz = epoch_train_stats['num_non_zero_triplets'] / epoch_train_stats['num_triplets']
if rnz < params.batch_expansion_th:
dataloaders['train'].batch_sampler.expand_batch()
print('')
# Save final model weights
final_model_path = model_pathname + '_final.pth'
torch.save(model.state_dict(), final_model_path)
stats = {'train_stats': stats, 'params': params}
# Evaluate the final model
model.eval()
final_eval_stats = evaluate(model, device, params)
print('Final model:')
print_eval_stats(final_eval_stats)
stats['eval'] = {'final': final_eval_stats}
print('')
# Pickle training stats and parameters
pickle_path = model_pathname + '_stats.pickle'
pickle.dump(stats, open(pickle_path, "wb"))
# Append key experimental metrics to experiment summary file
model_params_name = os.path.split(params.model_params.model_params_path)[1]
config_name = os.path.split(params.params_path)[1]
_, model_name = os.path.split(model_pathname)
prefix = "{}, {}, {}".format(model_params_name, config_name, model_name)
export_eval_stats("experiment_results.txt", prefix, final_eval_stats)
def export_eval_stats(file_name, prefix, eval_stats):
s = prefix
ave_1p_recall_l = []
ave_recall_l = []
# Print results on the final model
with open(file_name, "a") as f:
for ds in ['oxford', 'university', 'residential', 'business']:
ave_1p_recall = eval_stats[ds]['ave_one_percent_recall']
ave_1p_recall_l.append(ave_1p_recall)
ave_recall = eval_stats[ds]['ave_recall'][0]
ave_recall_l.append(ave_recall)
s += ", {:0.2f}, {:0.2f}".format(ave_1p_recall, ave_recall)
mean_1p_recall = np.mean(ave_1p_recall_l)
mean_recall = np.mean(ave_recall_l)
s += ", {:0.2f}, {:0.2f}\n".format(mean_1p_recall, mean_recall)
f.write(s)
def create_weights_folder():
# Create a folder to save weights of trained models
this_file_path = pathlib.Path(__file__).parent.absolute()
temp, _ = os.path.split(this_file_path)
weights_path = os.path.join(temp, 'weights')
if not os.path.exists(weights_path):
os.mkdir(weights_path)
assert os.path.exists(weights_path), 'Cannot create weights folder: {}'.format(weights_path)
return weights_path
| 10,845 | 39.17037 | 139 | py |
MinkLoc3D | MinkLoc3D-master/eval/evaluate.py | # Author: Jacek Komorowski
# Warsaw University of Technology
# Evaluation code adapted from PointNetVlad code: https://github.com/mikacuy/pointnetvlad
from sklearn.neighbors import KDTree
import numpy as np
import pickle
import os
import argparse
import torch
import tqdm
import MinkowskiEngine as ME
import random
from misc.utils import MinkLocParams
from models.model_factory import model_factory
def evaluate(model, device, params, silent=True):
# Run evaluation on all eval datasets
assert len(params.eval_database_files) == len(params.eval_query_files)
stats = {}
for database_file, query_file in zip(params.eval_database_files, params.eval_query_files):
# Extract location name from query and database files
location_name = database_file.split('_')[0]
temp = query_file.split('_')[0]
assert location_name == temp, 'Database location: {} does not match query location: {}'.format(database_file,
query_file)
p = os.path.join(params.dataset_folder, database_file)
with open(p, 'rb') as f:
database_sets = pickle.load(f)
p = os.path.join(params.dataset_folder, query_file)
with open(p, 'rb') as f:
query_sets = pickle.load(f)
temp = evaluate_dataset(model, device, params, database_sets, query_sets, silent=silent)
stats[location_name] = temp
return stats
def evaluate_dataset(model, device, params, database_sets, query_sets, silent=True):
# Run evaluation on a single dataset
recall = np.zeros(25)
count = 0
similarity = []
one_percent_recall = []
database_embeddings = []
query_embeddings = []
model.eval()
for set in tqdm.tqdm(database_sets, disable=silent):
database_embeddings.append(get_latent_vectors(model, set, device, params))
for set in tqdm.tqdm(query_sets, disable=silent):
query_embeddings.append(get_latent_vectors(model, set, device, params))
for i in range(len(query_sets)):
for j in range(len(query_sets)):
if i == j:
continue
pair_recall, pair_similarity, pair_opr = get_recall(i, j, database_embeddings, query_embeddings, query_sets,
database_sets)
recall += np.array(pair_recall)
count += 1
one_percent_recall.append(pair_opr)
for x in pair_similarity:
similarity.append(x)
ave_recall = recall / count
average_similarity = np.mean(similarity)
ave_one_percent_recall = np.mean(one_percent_recall)
stats = {'ave_one_percent_recall': ave_one_percent_recall, 'ave_recall': ave_recall,
'average_similarity': average_similarity}
return stats
def load_pc(file_name, params):
# returns Nx3 matrix
file_path = os.path.join(params.dataset_folder, file_name)
pc = np.fromfile(file_path, dtype=np.float64)
# coords are within -1..1 range in each dimension
assert pc.shape[0] == params.num_points * 3, "Error in point cloud shape: {}".format(file_path)
pc = np.reshape(pc, (pc.shape[0] // 3, 3))
pc = torch.tensor(pc, dtype=torch.float)
return pc
def get_latent_vectors(model, set, device, params):
# Adapted from original PointNetVLAD code
model.eval()
embeddings_l = []
for elem_ndx in set:
x = load_pc(set[elem_ndx]["query"], params)
with torch.no_grad():
# coords are (n_clouds, num_points, channels) tensor
coords = ME.utils.sparse_quantize(coordinates=x,
quantization_size=params.model_params.mink_quantization_size)
bcoords = ME.utils.batched_coordinates([coords])
# Assign a dummy feature equal to 1 to each point
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
feats = torch.ones((bcoords.shape[0], 1), dtype=torch.float32)
batch = {'coords': bcoords.to(device), 'features': feats.to(device)}
embedding = model(batch)
# embedding is (1, 1024) tensor
if params.normalize_embeddings:
embedding = torch.nn.functional.normalize(embedding, p=2, dim=1) # Normalize embeddings
embedding = embedding.detach().cpu().numpy()
embeddings_l.append(embedding)
embeddings = np.vstack(embeddings_l)
return embeddings
def get_recall(m, n, database_vectors, query_vectors, query_sets, database_sets):
# Original PointNetVLAD code
database_output = database_vectors[m]
queries_output = query_vectors[n]
# When embeddings are normalized, using Euclidean distance gives the same
# nearest neighbour search results as using cosine distance
database_nbrs = KDTree(database_output)
num_neighbors = 25
recall = [0] * num_neighbors
top1_similarity_score = []
one_percent_retrieved = 0
threshold = max(int(round(len(database_output)/100.0)), 1)
num_evaluated = 0
for i in range(len(queries_output)):
# i is query element ndx
query_details = query_sets[n][i] # {'query': path, 'northing': , 'easting': }
true_neighbors = query_details[m]
if len(true_neighbors) == 0:
continue
num_evaluated += 1
distances, indices = database_nbrs.query(np.array([queries_output[i]]), k=num_neighbors)
for j in range(len(indices[0])):
if indices[0][j] in true_neighbors:
if j == 0:
similarity = np.dot(queries_output[i], database_output[indices[0][j]])
top1_similarity_score.append(similarity)
recall[j] += 1
break
if len(list(set(indices[0][0:threshold]).intersection(set(true_neighbors)))) > 0:
one_percent_retrieved += 1
one_percent_recall = (one_percent_retrieved/float(num_evaluated))*100
recall = (np.cumsum(recall)/float(num_evaluated))*100
return recall, top1_similarity_score, one_percent_recall
def print_eval_stats(stats):
for database_name in stats:
print('Dataset: {}'.format(database_name))
t = 'Avg. top 1% recall: {:.2f} Avg. similarity: {:.4f} Avg. recall @N:'
print(t.format(stats[database_name]['ave_one_percent_recall'], stats[database_name]['average_similarity']))
print(stats[database_name]['ave_recall'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate model on PointNetVLAD (Oxford) dataset')
parser.add_argument('--config', type=str, required=True, help='Path to configuration file')
parser.add_argument('--model_config', type=str, required=True, help='Path to the model-specific configuration file')
parser.add_argument('--weights', type=str, required=False, help='Trained model weights')
args = parser.parse_args()
print('Config path: {}'.format(args.config))
print('Model config path: {}'.format(args.model_config))
if args.weights is None:
w = 'RANDOM WEIGHTS'
else:
w = args.weights
print('Weights: {}'.format(w))
print('')
params = MinkLocParams(args.config, args.model_config)
params.print()
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print('Device: {}'.format(device))
model = model_factory(params)
if args.weights is not None:
assert os.path.exists(args.weights), 'Cannot open network weights: {}'.format(args.weights)
print('Loading weights: {}'.format(args.weights))
model.load_state_dict(torch.load(args.weights, map_location=device))
model.to(device)
stats = evaluate(model, device, params, silent=False)
print_eval_stats(stats)
| 7,883 | 36.542857 | 120 | py |
MinkLoc3D | MinkLoc3D-master/models/minkloc.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import torch
import MinkowskiEngine as ME
from models.minkfpn import MinkFPN
from models.netvlad import MinkNetVladWrapper
import layers.pooling as pooling
class MinkLoc(torch.nn.Module):
def __init__(self, model, in_channels, feature_size, output_dim, planes, layers, num_top_down, conv0_kernel_size):
super().__init__()
self.model = model
self.in_channels = in_channels
self.feature_size = feature_size # Size of local features produced by local feature extraction block
self.output_dim = output_dim # Dimensionality of the global descriptor
self.backbone = MinkFPN(in_channels=in_channels, out_channels=self.feature_size, num_top_down=num_top_down,
conv0_kernel_size=conv0_kernel_size, layers=layers, planes=planes)
self.n_backbone_features = output_dim
if model == 'MinkFPN_Max':
assert self.feature_size == self.output_dim, 'output_dim must be the same as feature_size'
self.pooling = pooling.MAC()
elif model == 'MinkFPN_GeM':
assert self.feature_size == self.output_dim, 'output_dim must be the same as feature_size'
self.pooling = pooling.GeM()
elif model == 'MinkFPN_NetVlad':
self.pooling = MinkNetVladWrapper(feature_size=self.feature_size, output_dim=self.output_dim,
cluster_size=64, gating=False)
elif model == 'MinkFPN_NetVlad_CG':
self.pooling = MinkNetVladWrapper(feature_size=self.feature_size, output_dim=self.output_dim,
cluster_size=64, gating=True)
else:
raise NotImplementedError('Model not implemented: {}'.format(model))
def forward(self, batch):
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
x = ME.SparseTensor(batch['features'], coordinates=batch['coords'])
x = self.backbone(x)
# x is (num_points, n_features) tensor
assert x.shape[1] == self.feature_size, 'Backbone output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.feature_size)
x = self.pooling(x)
assert x.dim() == 2, 'Expected 2-dimensional tensor (batch_size,output_dim). Got {} dimensions.'.format(x.dim())
assert x.shape[1] == self.output_dim, 'Output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.output_dim)
# x is (batch_size, output_dim) tensor
return x
def print_info(self):
print('Model class: MinkLoc')
n_params = sum([param.nelement() for param in self.parameters()])
print('Total parameters: {}'.format(n_params))
n_params = sum([param.nelement() for param in self.backbone.parameters()])
print('Backbone parameters: {}'.format(n_params))
n_params = sum([param.nelement() for param in self.pooling.parameters()])
print('Aggregation parameters: {}'.format(n_params))
if hasattr(self.backbone, 'print_info'):
self.backbone.print_info()
if hasattr(self.pooling, 'print_info'):
self.pooling.print_info()
| 3,242 | 50.47619 | 141 | py |
MinkLoc3D | MinkLoc3D-master/models/model_factory.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import models.minkloc as minkloc
def model_factory(params):
in_channels = 1
if 'MinkFPN' in params.model_params.model:
model = minkloc.MinkLoc(params.model_params.model, in_channels=in_channels,
feature_size=params.model_params.feature_size,
output_dim=params.model_params.output_dim, planes=params.model_params.planes,
layers=params.model_params.layers, num_top_down=params.model_params.num_top_down,
conv0_kernel_size=params.model_params.conv0_kernel_size)
else:
raise NotImplementedError('Model not implemented: {}'.format(params.model_params.model))
return model
| 793 | 38.7 | 113 | py |
MinkLoc3D | MinkLoc3D-master/models/resnet.py | # Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(nn.Module):
block = None
layers = ()
init_dim = 64
planes = (64, 128, 256, 512)
def __init__(self, in_channels, out_channels, D=3):
nn.Module.__init__(self)
self.D = D
assert self.block is not None
self.network_initialization(in_channels, out_channels, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, D):
self.inplanes = self.init_dim
self.conv1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = ME.MinkowskiAvgPooling(kernel_size=2, stride=2, dimension=D)
self.layer1 = self._make_layer(
self.block, self.planes[0], self.layers[0], stride=2)
self.layer2 = self._make_layer(
self.block, self.planes[1], self.layers[1], stride=2)
self.layer3 = self._make_layer(
self.block, self.planes[2], self.layers[2], stride=2)
self.layer4 = self._make_layer(
self.block, self.planes[3], self.layers[3], stride=2)
self.conv5 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D)
self.bn5 = ME.MinkowskiBatchNorm(self.inplanes)
self.glob_avg = ME.MinkowskiGlobalMaxPooling()
self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode='fan_out', nonlinearity='relu')
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
ME.MinkowskiConvolution(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
dimension=self.D),
ME.MinkowskiBatchNorm(planes * block.expansion))
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
dimension=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
dimension=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
x = self.glob_avg(x)
return self.final(x)
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
| 5,315 | 31.814815 | 87 | py |
MinkLoc3D | MinkLoc3D-master/models/minkfpn.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock
from models.resnet import ResNetBase
class MinkFPN(ResNetBase):
# Feature Pyramid Network (FPN) architecture implementation using Minkowski ResNet building blocks
def __init__(self, in_channels, out_channels, num_top_down=1, conv0_kernel_size=5, block=BasicBlock,
layers=(1, 1, 1), planes=(32, 64, 64)):
assert len(layers) == len(planes)
assert 1 <= len(layers)
assert 0 <= num_top_down <= len(layers)
self.num_bottom_up = len(layers)
self.num_top_down = num_top_down
self.conv0_kernel_size = conv0_kernel_size
self.block = block
self.layers = layers
self.planes = planes
self.lateral_dim = out_channels
self.init_dim = planes[0]
ResNetBase.__init__(self, in_channels, out_channels, D=3)
def network_initialization(self, in_channels, out_channels, D):
assert len(self.layers) == len(self.planes)
assert len(self.planes) == self.num_bottom_up
self.convs = nn.ModuleList() # Bottom-up convolutional blocks with stride=2
self.bn = nn.ModuleList() # Bottom-up BatchNorms
self.blocks = nn.ModuleList() # Bottom-up blocks
self.tconvs = nn.ModuleList() # Top-down tranposed convolutions
self.conv1x1 = nn.ModuleList() # 1x1 convolutions in lateral connections
# The first convolution is special case, with kernel size = 5
self.inplanes = self.planes[0]
self.conv0 = ME.MinkowskiConvolution(in_channels, self.inplanes, kernel_size=self.conv0_kernel_size,
dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
for plane, layer in zip(self.planes, self.layers):
self.convs.append(ME.MinkowskiConvolution(self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D))
self.bn.append(ME.MinkowskiBatchNorm(self.inplanes))
self.blocks.append(self._make_layer(self.block, plane, layer))
# Lateral connections
for i in range(self.num_top_down):
self.conv1x1.append(ME.MinkowskiConvolution(self.planes[-1 - i], self.lateral_dim, kernel_size=1,
stride=1, dimension=D))
self.tconvs.append(ME.MinkowskiConvolutionTranspose(self.lateral_dim, self.lateral_dim, kernel_size=2,
stride=2, dimension=D))
# There's one more lateral connection than top-down TConv blocks
if self.num_top_down < self.num_bottom_up:
# Lateral connection from Conv block 1 or above
self.conv1x1.append(ME.MinkowskiConvolution(self.planes[-1 - self.num_top_down], self.lateral_dim, kernel_size=1,
stride=1, dimension=D))
else:
# Lateral connection from Con0 block
self.conv1x1.append(ME.MinkowskiConvolution(self.planes[0], self.lateral_dim, kernel_size=1,
stride=1, dimension=D))
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
# *** BOTTOM-UP PASS ***
# First bottom-up convolution is special (with bigger stride)
feature_maps = []
x = self.conv0(x)
x = self.bn0(x)
x = self.relu(x)
if self.num_top_down == self.num_bottom_up:
feature_maps.append(x)
# BOTTOM-UP PASS
for ndx, (conv, bn, block) in enumerate(zip(self.convs, self.bn, self.blocks)):
x = conv(x) # Decreases spatial resolution (conv stride=2)
x = bn(x)
x = self.relu(x)
x = block(x)
if self.num_bottom_up - 1 - self.num_top_down <= ndx < len(self.convs) - 1:
feature_maps.append(x)
assert len(feature_maps) == self.num_top_down
x = self.conv1x1[0](x)
# TOP-DOWN PASS
for ndx, tconv in enumerate(self.tconvs):
x = tconv(x) # Upsample using transposed convolution
x = x + self.conv1x1[ndx+1](feature_maps[-ndx - 1])
return x
| 4,364 | 44.947368 | 125 | py |
MinkLoc3D | MinkLoc3D-master/models/loss.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import numpy as np
import torch
from pytorch_metric_learning import losses, miners, reducers
from pytorch_metric_learning.distances import LpDistance
def make_loss(params):
if params.loss == 'BatchHardTripletMarginLoss':
# BatchHard mining with triplet margin loss
# Expects input: embeddings, positives_mask, negatives_mask
loss_fn = BatchHardTripletLossWithMasks(params.margin, params.normalize_embeddings)
elif params.loss == 'BatchHardContrastiveLoss':
loss_fn = BatchHardContrastiveLossWithMasks(params.pos_margin, params.neg_margin, params.normalize_embeddings)
else:
print('Unknown loss: {}'.format(params.loss))
raise NotImplementedError
return loss_fn
class HardTripletMinerWithMasks:
# Hard triplet miner
def __init__(self, distance):
self.distance = distance
# Stats
self.max_pos_pair_dist = None
self.max_neg_pair_dist = None
self.mean_pos_pair_dist = None
self.mean_neg_pair_dist = None
self.min_pos_pair_dist = None
self.min_neg_pair_dist = None
def __call__(self, embeddings, positives_mask, negatives_mask):
assert embeddings.dim() == 2
d_embeddings = embeddings.detach()
with torch.no_grad():
hard_triplets = self.mine(d_embeddings, positives_mask, negatives_mask)
return hard_triplets
def mine(self, embeddings, positives_mask, negatives_mask):
# Based on pytorch-metric-learning implementation
dist_mat = self.distance(embeddings)
(hardest_positive_dist, hardest_positive_indices), a1p_keep = get_max_per_row(dist_mat, positives_mask)
(hardest_negative_dist, hardest_negative_indices), a2n_keep = get_min_per_row(dist_mat, negatives_mask)
a_keep_idx = torch.where(a1p_keep & a2n_keep)
a = torch.arange(dist_mat.size(0)).to(hardest_positive_indices.device)[a_keep_idx]
p = hardest_positive_indices[a_keep_idx]
n = hardest_negative_indices[a_keep_idx]
self.max_pos_pair_dist = torch.max(hardest_positive_dist).item()
self.max_neg_pair_dist = torch.max(hardest_negative_dist).item()
self.mean_pos_pair_dist = torch.mean(hardest_positive_dist).item()
self.mean_neg_pair_dist = torch.mean(hardest_negative_dist).item()
self.min_pos_pair_dist = torch.min(hardest_positive_dist).item()
self.min_neg_pair_dist = torch.min(hardest_negative_dist).item()
return a, p, n
def get_max_per_row(mat, mask):
non_zero_rows = torch.any(mask, dim=1)
mat_masked = mat.clone()
mat_masked[~mask] = 0
return torch.max(mat_masked, dim=1), non_zero_rows
def get_min_per_row(mat, mask):
non_inf_rows = torch.any(mask, dim=1)
mat_masked = mat.clone()
mat_masked[~mask] = float('inf')
return torch.min(mat_masked, dim=1), non_inf_rows
class BatchHardTripletLossWithMasks:
def __init__(self, margin, normalize_embeddings):
self.margin = margin
self.normalize_embeddings = normalize_embeddings
self.distance = LpDistance(normalize_embeddings=normalize_embeddings, collect_stats=True)
# We use triplet loss with Euclidean distance
self.miner_fn = HardTripletMinerWithMasks(distance=self.distance)
reducer_fn = reducers.AvgNonZeroReducer(collect_stats=True)
self.loss_fn = losses.TripletMarginLoss(margin=self.margin, swap=True, distance=self.distance,
reducer=reducer_fn, collect_stats=True)
def __call__(self, embeddings, positives_mask, negatives_mask):
hard_triplets = self.miner_fn(embeddings, positives_mask, negatives_mask)
dummy_labels = torch.arange(embeddings.shape[0]).to(embeddings.device)
loss = self.loss_fn(embeddings, dummy_labels, hard_triplets)
stats = {'loss': loss.item(), 'avg_embedding_norm': self.loss_fn.distance.final_avg_query_norm,
'num_non_zero_triplets': self.loss_fn.reducer.triplets_past_filter,
'num_triplets': len(hard_triplets[0]),
'mean_pos_pair_dist': self.miner_fn.mean_pos_pair_dist,
'mean_neg_pair_dist': self.miner_fn.mean_neg_pair_dist,
'max_pos_pair_dist': self.miner_fn.max_pos_pair_dist,
'max_neg_pair_dist': self.miner_fn.max_neg_pair_dist,
'min_pos_pair_dist': self.miner_fn.min_pos_pair_dist,
'min_neg_pair_dist': self.miner_fn.min_neg_pair_dist
}
return loss, stats, hard_triplets
class BatchHardContrastiveLossWithMasks:
def __init__(self, pos_margin, neg_margin, normalize_embeddings):
self.pos_margin = pos_margin
self.neg_margin = neg_margin
self.distance = LpDistance(normalize_embeddings=normalize_embeddings, collect_stats=True)
self.miner_fn = HardTripletMinerWithMasks(distance=self.distance)
# We use contrastive loss with squared Euclidean distance
reducer_fn = reducers.AvgNonZeroReducer(collect_stats=True)
self.loss_fn = losses.ContrastiveLoss(pos_margin=self.pos_margin, neg_margin=self.neg_margin,
distance=self.distance, reducer=reducer_fn, collect_stats=True)
def __call__(self, embeddings, positives_mask, negatives_mask):
hard_triplets = self.miner_fn(embeddings, positives_mask, negatives_mask)
dummy_labels = torch.arange(embeddings.shape[0]).to(embeddings.device)
loss = self.loss_fn(embeddings, dummy_labels, hard_triplets)
stats = {'loss': loss.item(), 'avg_embedding_norm': self.loss_fn.distance.final_avg_query_norm,
'pos_pairs_above_threshold': self.loss_fn.reducer.reducers['pos_loss'].pos_pairs_above_threshold,
'neg_pairs_above_threshold': self.loss_fn.reducer.reducers['neg_loss'].neg_pairs_above_threshold,
'pos_loss': self.loss_fn.reducer.reducers['pos_loss'].pos_loss.item(),
'neg_loss': self.loss_fn.reducer.reducers['neg_loss'].neg_loss.item(),
'num_pairs': 2*len(hard_triplets[0]),
'mean_pos_pair_dist': self.miner_fn.mean_pos_pair_dist,
'mean_neg_pair_dist': self.miner_fn.mean_neg_pair_dist,
'max_pos_pair_dist': self.miner_fn.max_pos_pair_dist,
'max_neg_pair_dist': self.miner_fn.max_neg_pair_dist,
'min_pos_pair_dist': self.miner_fn.min_pos_pair_dist,
'min_neg_pair_dist': self.miner_fn.min_neg_pair_dist
}
return loss, stats, hard_triplets
| 6,696 | 49.353383 | 118 | py |
MinkLoc3D | MinkLoc3D-master/models/netvlad.py | # Code taken from PointNetVLAD Pytorch implementation: https://github.com/cattaneod/PointNetVlad-Pytorch
# Adapted by Jacek Komorowski
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import math
# NOTE: The toolbox can only pool lists of features of the same length. It was specifically optimized to efficiently
# o so. One way to handle multiple lists of features of variable length is to create, via a data augmentation
# technique, a tensor of shape: 'batch_size'x'max_samples'x'feature_size'. Where 'max_samples' would be the maximum
# number of feature per list. Then for each list, you would fill the tensor with 0 values.
class NetVLADLoupe(nn.Module):
def __init__(self, feature_size, cluster_size, output_dim, gating=True, add_batch_norm=True):
super().__init__()
self.feature_size = feature_size
self.output_dim = output_dim
self.gating = gating
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.softmax = nn.Softmax(dim=-1)
self.cluster_weights = nn.Parameter(torch.randn(feature_size, cluster_size) * 1 / math.sqrt(feature_size))
self.cluster_weights2 = nn.Parameter(torch.randn(1, feature_size, cluster_size) * 1 / math.sqrt(feature_size))
self.hidden1_weights = nn.Parameter(
torch.randn(cluster_size * feature_size, output_dim) * 1 / math.sqrt(feature_size))
if add_batch_norm:
self.cluster_biases = None
self.bn1 = nn.BatchNorm1d(cluster_size)
else:
self.cluster_biases = nn.Parameter(torch.randn(cluster_size) * 1 / math.sqrt(feature_size))
self.bn1 = None
self.bn2 = nn.BatchNorm1d(output_dim)
if gating:
self.context_gating = GatingContext(output_dim, add_batch_norm=add_batch_norm)
def forward(self, x):
# Expects (batch_size, num_points, channels) tensor
assert x.dim() == 3
num_points = x.shape[1]
activation = torch.matmul(x, self.cluster_weights)
if self.add_batch_norm:
# activation = activation.transpose(1,2).contiguous()
activation = activation.view(-1, self.cluster_size)
activation = self.bn1(activation)
activation = activation.view(-1, num_points, self.cluster_size)
# activation = activation.transpose(1,2).contiguous()
else:
activation = activation + self.cluster_biases
activation = self.softmax(activation)
activation = activation.view((-1, num_points, self.cluster_size))
a_sum = activation.sum(-2, keepdim=True)
a = a_sum * self.cluster_weights2
activation = torch.transpose(activation, 2, 1)
x = x.view((-1, num_points, self.feature_size))
vlad = torch.matmul(activation, x)
vlad = torch.transpose(vlad, 2, 1)
vlad = vlad - a
vlad = F.normalize(vlad, dim=1, p=2)
vlad = vlad.reshape((-1, self.cluster_size * self.feature_size))
vlad = F.normalize(vlad, dim=1, p=2)
vlad = torch.matmul(vlad, self.hidden1_weights)
vlad = self.bn2(vlad)
if self.gating:
vlad = self.context_gating(vlad)
return vlad
class GatingContext(nn.Module):
def __init__(self, dim, add_batch_norm=True):
super(GatingContext, self).__init__()
self.dim = dim
self.add_batch_norm = add_batch_norm
self.gating_weights = nn.Parameter(
torch.randn(dim, dim) * 1 / math.sqrt(dim))
self.sigmoid = nn.Sigmoid()
if add_batch_norm:
self.gating_biases = None
self.bn1 = nn.BatchNorm1d(dim)
else:
self.gating_biases = nn.Parameter(
torch.randn(dim) * 1 / math.sqrt(dim))
self.bn1 = None
def forward(self, x):
gates = torch.matmul(x, self.gating_weights)
if self.add_batch_norm:
gates = self.bn1(gates)
else:
gates = gates + self.gating_biases
gates = self.sigmoid(gates)
activation = x * gates
return activation
class MinkNetVladWrapper(torch.nn.Module):
# Wrapper around NetVlad class to process sparse tensors from Minkowski networks
def __init__(self, feature_size, output_dim, cluster_size=64, gating=True):
super().__init__()
self.feature_size = feature_size
self.output_dim = output_dim
self.net_vlad = NetVLADLoupe(feature_size=feature_size, cluster_size=cluster_size, output_dim=output_dim,
gating=gating, add_batch_norm=True)
def forward(self, x):
# x is SparseTensor
assert x.F.shape[1] == self.feature_size
features = x.decomposed_features
# features is a list of (n_points, feature_size) tensors with variable number of points
batch_size = len(features)
features = torch.nn.utils.rnn.pad_sequence(features, batch_first=True)
# features is (batch_size, n_points, feature_size) tensor padded with zeros
x = self.net_vlad(features)
assert x.shape[0] == batch_size
assert x.shape[1] == self.output_dim
return x # Return (batch_size, output_dim) tensor
| 5,287 | 38.17037 | 118 | py |
MinkLoc3D | MinkLoc3D-master/datasets/dataset_utils.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import numpy as np
import torch
from torch.utils.data import DataLoader
import MinkowskiEngine as ME
from datasets.oxford import OxfordDataset, TrainTransform, TrainSetTransform
from datasets.samplers import BatchSampler
from misc.utils import MinkLocParams
def make_datasets(params: MinkLocParams, debug=False):
# Create training and validation datasets
datasets = {}
train_transform = TrainTransform(params.aug_mode)
train_set_transform = TrainSetTransform(params.aug_mode)
datasets['train'] = OxfordDataset(params.dataset_folder, params.train_file, train_transform,
set_transform=train_set_transform)
val_transform = None
if params.val_file is not None:
datasets['val'] = OxfordDataset(params.dataset_folder, params.val_file, val_transform)
return datasets
def make_collate_fn(dataset: OxfordDataset, mink_quantization_size=None):
# set_transform: the transform to be applied to all batch elements
def collate_fn(data_list):
# Constructs a batch object
clouds = [e[0] for e in data_list]
labels = [e[1] for e in data_list]
batch = torch.stack(clouds, dim=0) # Produces (batch_size, n_points, 3) tensor
if dataset.set_transform is not None:
# Apply the same transformation on all dataset elements
batch = dataset.set_transform(batch)
if mink_quantization_size is None:
# Not a MinkowskiEngine based model
batch = {'cloud': batch}
else:
coords = [ME.utils.sparse_quantize(coordinates=e, quantization_size=mink_quantization_size)
for e in batch]
coords = ME.utils.batched_coordinates(coords)
# Assign a dummy feature equal to 1 to each point
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
feats = torch.ones((coords.shape[0], 1), dtype=torch.float32)
batch = {'coords': coords, 'features': feats}
# Compute positives and negatives mask
# Compute positives and negatives mask
positives_mask = [[in_sorted_array(e, dataset.queries[label].positives) for e in labels] for label in labels]
negatives_mask = [[not in_sorted_array(e, dataset.queries[label].non_negatives) for e in labels] for label in labels]
positives_mask = torch.tensor(positives_mask)
negatives_mask = torch.tensor(negatives_mask)
# Returns (batch_size, n_points, 3) tensor and positives_mask and
# negatives_mask which are batch_size x batch_size boolean tensors
return batch, positives_mask, negatives_mask
return collate_fn
def make_dataloaders(params: MinkLocParams, debug=False):
"""
Create training and validation dataloaders that return groups of k=2 similar elements
:param train_params:
:param model_params:
:return:
"""
datasets = make_datasets(params, debug=debug)
dataloders = {}
train_sampler = BatchSampler(datasets['train'], batch_size=params.batch_size,
batch_size_limit=params.batch_size_limit,
batch_expansion_rate=params.batch_expansion_rate)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
train_collate_fn = make_collate_fn(datasets['train'], params.model_params.mink_quantization_size)
dataloders['train'] = DataLoader(datasets['train'], batch_sampler=train_sampler, collate_fn=train_collate_fn,
num_workers=params.num_workers, pin_memory=True)
if 'val' in datasets:
val_sampler = BatchSampler(datasets['val'], batch_size=params.batch_size)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
# Currently validation dataset has empty set_transform function, but it may change in the future
val_collate_fn = make_collate_fn(datasets['val'], params.model_params.mink_quantization_size)
dataloders['val'] = DataLoader(datasets['val'], batch_sampler=val_sampler, collate_fn=val_collate_fn,
num_workers=params.num_workers, pin_memory=True)
return dataloders
def in_sorted_array(e: int, array: np.ndarray) -> bool:
pos = np.searchsorted(array, e)
if pos == len(array) or pos == -1:
return False
else:
return array[pos] == e
| 4,547 | 44.48 | 125 | py |
MinkLoc3D | MinkLoc3D-master/datasets/oxford.py | # Author: Jacek Komorowski
# Warsaw University of Technology
# Dataset wrapper for Oxford laser scans dataset from PointNetVLAD project
# For information on dataset see: https://github.com/mikacuy/pointnetvlad
import os
import pickle
import numpy as np
import math
from scipy.linalg import expm, norm
import random
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import tqdm
class OxfordDataset(Dataset):
"""
Dataset wrapper for Oxford laser scans dataset from PointNetVLAD project.
"""
def __init__(self, dataset_path: str, query_filename: str, image_path: str = None,
lidar2image_ndx=None, transform=None, set_transform=None, image_transform=None, use_cloud=True):
assert os.path.exists(dataset_path), 'Cannot access dataset path: {}'.format(dataset_path)
self.dataset_path = dataset_path
self.query_filepath = os.path.join(dataset_path, query_filename)
assert os.path.exists(self.query_filepath), 'Cannot access query file: {}'.format(self.query_filepath)
self.transform = transform
self.set_transform = set_transform
self.queries: Dict[int, TrainingTuple] = pickle.load(open(self.query_filepath, 'rb'))
self.image_path = image_path
self.lidar2image_ndx = lidar2image_ndx
self.image_transform = image_transform
self.n_points = 4096 # pointclouds in the dataset are downsampled to 4096 points
self.image_ext = '.png'
self.use_cloud = use_cloud
print('{} queries in the dataset'.format(len(self)))
def __len__(self):
return len(self.queries)
def __getitem__(self, ndx):
# Load point cloud and apply transform
file_pathname = os.path.join(self.dataset_path, self.queries[ndx].rel_scan_filepath)
query_pc = self.load_pc(file_pathname)
if self.transform is not None:
query_pc = self.transform(query_pc)
return query_pc, ndx
def get_positives(self, ndx):
return self.queries[ndx].positives
def get_non_negatives(self, ndx):
return self.queries[ndx].non_negatives
def load_pc(self, filename):
# Load point cloud, does not apply any transform
# Returns Nx3 matrix
file_path = os.path.join(self.dataset_path, filename)
pc = np.fromfile(file_path, dtype=np.float64)
# coords are within -1..1 range in each dimension
assert pc.shape[0] == self.n_points * 3, "Error in point cloud shape: {}".format(file_path)
pc = np.reshape(pc, (pc.shape[0] // 3, 3))
pc = torch.tensor(pc, dtype=torch.float)
return pc
class TrainingTuple:
# Tuple describing an element for training/validation
def __init__(self, id: int, timestamp: int, rel_scan_filepath: str, positives: np.ndarray,
non_negatives: np.ndarray, position: np.ndarray):
# id: element id (ids start from 0 and are consecutive numbers)
# ts: timestamp
# rel_scan_filepath: relative path to the scan
# positives: sorted ndarray of positive elements id
# negatives: sorted ndarray of elements id
# position: x, y position in meters (northing, easting)
assert position.shape == (2,)
self.id = id
self.timestamp = timestamp
self.rel_scan_filepath = rel_scan_filepath
self.positives = positives
self.non_negatives = non_negatives
self.position = position
class TrainTransform:
def __init__(self, aug_mode):
# 1 is default mode, no transform
self.aug_mode = aug_mode
if self.aug_mode == 1:
t = [JitterPoints(sigma=0.001, clip=0.002), RemoveRandomPoints(r=(0.0, 0.1)),
RandomTranslation(max_delta=0.01), RemoveRandomBlock(p=0.4)]
else:
raise NotImplementedError('Unknown aug_mode: {}'.format(self.aug_mode))
self.transform = transforms.Compose(t)
def __call__(self, e):
if self.transform is not None:
e = self.transform(e)
return e
class TrainSetTransform:
def __init__(self, aug_mode):
# 1 is default mode, no transform
self.aug_mode = aug_mode
self.transform = None
t = [RandomRotation(max_theta=5, max_theta2=0, axis=np.array([0, 0, 1])),
RandomFlip([0.25, 0.25, 0.])]
self.transform = transforms.Compose(t)
def __call__(self, e):
if self.transform is not None:
e = self.transform(e)
return e
class RandomFlip:
def __init__(self, p):
# p = [p_x, p_y, p_z] probability of flipping each axis
assert len(p) == 3
assert 0 < sum(p) <= 1, 'sum(p) must be in (0, 1] range, is: {}'.format(sum(p))
self.p = p
self.p_cum_sum = np.cumsum(p)
def __call__(self, coords):
r = random.random()
if r <= self.p_cum_sum[0]:
# Flip the first axis
coords[..., 0] = -coords[..., 0]
elif r <= self.p_cum_sum[1]:
# Flip the second axis
coords[..., 1] = -coords[..., 1]
elif r <= self.p_cum_sum[2]:
# Flip the third axis
coords[..., 2] = -coords[..., 2]
return coords
class RandomRotation:
def __init__(self, axis=None, max_theta=180, max_theta2=15):
self.axis = axis
self.max_theta = max_theta # Rotation around axis
self.max_theta2 = max_theta2 # Smaller rotation in random direction
def _M(self, axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta)).astype(np.float32)
def __call__(self, coords):
if self.axis is not None:
axis = self.axis
else:
axis = np.random.rand(3) - 0.5
R = self._M(axis, (np.pi * self.max_theta / 180) * 2 * (np.random.rand(1) - 0.5))
if self.max_theta2 is None:
coords = coords @ R
else:
R_n = self._M(np.random.rand(3) - 0.5, (np.pi * self.max_theta2 / 180) * 2 * (np.random.rand(1) - 0.5))
coords = coords @ R @ R_n
return coords
class RandomTranslation:
def __init__(self, max_delta=0.05):
self.max_delta = max_delta
def __call__(self, coords):
trans = self.max_delta * np.random.randn(1, 3)
return coords + trans.astype(np.float32)
class RandomScale:
def __init__(self, min, max):
self.scale = max - min
self.bias = min
def __call__(self, coords):
s = self.scale * np.random.rand(1) + self.bias
return coords * s.astype(np.float32)
class RandomShear:
def __init__(self, delta=0.1):
self.delta = delta
def __call__(self, coords):
T = np.eye(3) + self.delta * np.random.randn(3, 3)
return coords @ T.astype(np.float32)
class JitterPoints:
def __init__(self, sigma=0.01, clip=None, p=1.):
assert 0 < p <= 1.
assert sigma > 0.
self.sigma = sigma
self.clip = clip
self.p = p
def __call__(self, e):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
sample_shape = (e.shape[0],)
if self.p < 1.:
# Create a mask for points to jitter
m = torch.distributions.categorical.Categorical(probs=torch.tensor([1 - self.p, self.p]))
mask = m.sample(sample_shape=sample_shape)
else:
mask = torch.ones(sample_shape, dtype=torch.int64 )
mask = mask == 1
jitter = self.sigma * torch.randn_like(e[mask])
if self.clip is not None:
jitter = torch.clamp(jitter, min=-self.clip, max=self.clip)
e[mask] = e[mask] + jitter
return e
class RemoveRandomPoints:
def __init__(self, r):
if type(r) is list or type(r) is tuple:
assert len(r) == 2
assert 0 <= r[0] <= 1
assert 0 <= r[1] <= 1
self.r_min = float(r[0])
self.r_max = float(r[1])
else:
assert 0 <= r <= 1
self.r_min = None
self.r_max = float(r)
def __call__(self, e):
n = len(e)
if self.r_min is None:
r = self.r_max
else:
# Randomly select removal ratio
r = random.uniform(self.r_min, self.r_max)
mask = np.random.choice(range(n), size=int(n*r), replace=False) # select elements to remove
e[mask] = torch.zeros_like(e[mask])
return e
class RemoveRandomBlock:
"""
Randomly remove part of the point cloud. Similar to PyTorch RandomErasing but operating on 3D point clouds.
Erases fronto-parallel cuboid.
Instead of erasing we set coords of removed points to (0, 0, 0) to retain the same number of points
"""
def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3)):
self.p = p
self.scale = scale
self.ratio = ratio
def get_params(self, coords):
# Find point cloud 3D bounding box
flattened_coords = coords.view(-1, 3)
min_coords, _ = torch.min(flattened_coords, dim=0)
max_coords, _ = torch.max(flattened_coords, dim=0)
span = max_coords - min_coords
area = span[0] * span[1]
erase_area = random.uniform(self.scale[0], self.scale[1]) * area
aspect_ratio = random.uniform(self.ratio[0], self.ratio[1])
h = math.sqrt(erase_area * aspect_ratio)
w = math.sqrt(erase_area / aspect_ratio)
x = min_coords[0] + random.uniform(0, 1) * (span[0] - w)
y = min_coords[1] + random.uniform(0, 1) * (span[1] - h)
return x, y, w, h
def __call__(self, coords):
if random.random() < self.p:
x, y, w, h = self.get_params(coords) # Fronto-parallel cuboid to remove
mask = (x < coords[..., 0]) & (coords[..., 0] < x+w) & (y < coords[..., 1]) & (coords[..., 1] < y+h)
coords[mask] = torch.zeros_like(coords[mask])
return coords
| 10,148 | 33.40339 | 115 | py |
MinkLoc3D | MinkLoc3D-master/datasets/samplers.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import random
import copy
from torch.utils.data import Sampler
from datasets.oxford import OxfordDataset
class ListDict(object):
def __init__(self, items=None):
if items is not None:
self.items = copy.deepcopy(items)
self.item_to_position = {item: ndx for ndx, item in enumerate(items)}
else:
self.items = []
self.item_to_position = {}
def add(self, item):
if item in self.item_to_position:
return
self.items.append(item)
self.item_to_position[item] = len(self.items)-1
def remove(self, item):
position = self.item_to_position.pop(item)
last_item = self.items.pop()
if position != len(self.items):
self.items[position] = last_item
self.item_to_position[last_item] = position
def choose_random(self):
return random.choice(self.items)
def __contains__(self, item):
return item in self.item_to_position
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
class BatchSampler(Sampler):
# Sampler returning list of indices to form a mini-batch
# Samples elements in groups consisting of k=2 similar elements (positives)
# Batch has the following structure: item1_1, ..., item1_k, item2_1, ... item2_k, itemn_1, ..., itemn_k
def __init__(self, dataset: OxfordDataset, batch_size: int, batch_size_limit: int = None,
batch_expansion_rate: float = None, max_batches: int = None):
if batch_expansion_rate is not None:
assert batch_expansion_rate > 1., 'batch_expansion_rate must be greater than 1'
assert batch_size <= batch_size_limit, 'batch_size_limit must be greater or equal to batch_size'
self.batch_size = batch_size
self.batch_size_limit = batch_size_limit
self.batch_expansion_rate = batch_expansion_rate
self.max_batches = max_batches
self.dataset = dataset
self.k = 2 # Number of positive examples per group must be 2
if self.batch_size < 2 * self.k:
self.batch_size = 2 * self.k
print('WARNING: Batch too small. Batch size increased to {}.'.format(self.batch_size))
self.batch_idx = [] # Index of elements in each batch (re-generated every epoch)
self.elems_ndx = list(self.dataset.queries) # List of point cloud indexes
def __iter__(self):
# Re-generate batches every epoch
self.generate_batches()
for batch in self.batch_idx:
yield batch
def __len(self):
return len(self.batch_idx)
def expand_batch(self):
if self.batch_expansion_rate is None:
print('WARNING: batch_expansion_rate is None')
return
if self.batch_size >= self.batch_size_limit:
return
old_batch_size = self.batch_size
self.batch_size = int(self.batch_size * self.batch_expansion_rate)
self.batch_size = min(self.batch_size, self.batch_size_limit)
print('=> Batch size increased from: {} to {}'.format(old_batch_size, self.batch_size))
def generate_batches(self):
# Generate training/evaluation batches.
# batch_idx holds indexes of elements in each batch as a list of lists
self.batch_idx = []
unused_elements_ndx = ListDict(self.elems_ndx)
current_batch = []
assert self.k == 2, 'sampler can sample only k=2 elements from the same class'
while True:
if len(current_batch) >= self.batch_size or len(unused_elements_ndx) == 0:
# Flush out batch, when it has a desired size, or a smaller batch, when there's no more
# elements to process
if len(current_batch) >= 2*self.k:
# Ensure there're at least two groups of similar elements, otherwise, it would not be possible
# to find negative examples in the batch
assert len(current_batch) % self.k == 0, 'Incorrect bach size: {}'.format(len(current_batch))
self.batch_idx.append(current_batch)
current_batch = []
if (self.max_batches is not None) and (len(self.batch_idx) >= self.max_batches):
break
if len(unused_elements_ndx) == 0:
break
# Add k=2 similar elements to the batch
selected_element = unused_elements_ndx.choose_random()
unused_elements_ndx.remove(selected_element)
positives = self.dataset.get_positives(selected_element)
if len(positives) == 0:
# Broken dataset element without any positives
continue
unused_positives = [e for e in positives if e in unused_elements_ndx]
# If there're unused elements similar to selected_element, sample from them
# otherwise sample from all similar elements
if len(unused_positives) > 0:
second_positive = random.choice(unused_positives)
unused_elements_ndx.remove(second_positive)
else:
second_positive = random.choice(list(positives))
current_batch += [selected_element, second_positive]
for batch in self.batch_idx:
assert len(batch) % self.k == 0, 'Incorrect bach size: {}'.format(len(batch))
| 5,532 | 38.805755 | 114 | py |
MinkLoc3D | MinkLoc3D-master/layers/pooling.py | # Code taken from: https://github.com/filipradenovic/cnnimageretrieval-pytorch
# and ported to MinkowskiEngine by Jacek Komorowski
import torch
import torch.nn as nn
import MinkowskiEngine as ME
class MAC(nn.Module):
def __init__(self):
super().__init__()
self.f = ME.MinkowskiGlobalMaxPooling()
def forward(self, x: ME.SparseTensor):
x = self.f(x)
return x.F # Return (batch_size, n_features) tensor
class SPoC(nn.Module):
def __init__(self):
super().__init__()
self.f = ME.MinkowskiGlobalAvgPooling()
def forward(self, x: ME.SparseTensor):
x = self.f(x)
return x.F # Return (batch_size, n_features) tensor
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM, self).__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
self.f = ME.MinkowskiGlobalAvgPooling()
def forward(self, x: ME.SparseTensor):
# This implicitly applies ReLU on x (clamps negative values)
temp = ME.SparseTensor(x.F.clamp(min=self.eps).pow(self.p), coordinates=x.C)
temp = self.f(temp) # Apply ME.MinkowskiGlobalAvgPooling
return temp.F.pow(1./self.p) # Return (batch_size, n_features) tensor
| 1,280 | 30.243902 | 84 | py |
MinkLoc3D | MinkLoc3D-master/misc/utils.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import os
import configparser
import time
import numpy as np
class ModelParams:
def __init__(self, model_params_path):
config = configparser.ConfigParser()
config.read(model_params_path)
params = config['MODEL']
self.model_params_path = model_params_path
self.model = params.get('model')
self.output_dim = params.getint('output_dim', 256) # Size of the final descriptor
# Add gating as the last step
if 'vlad' in self.model.lower():
self.cluster_size = params.getint('cluster_size', 64) # Size of NetVLAD cluster
self.gating = params.getboolean('gating', True) # Use gating after the NetVlad
#######################################################################
# Model dependent
#######################################################################
if 'MinkFPN' in self.model:
# Models using MinkowskiEngine
self.mink_quantization_size = params.getfloat('mink_quantization_size')
# Size of the local features from backbone network (only for MinkNet based models)
# For PointNet-based models we always use 1024 intermediary features
self.feature_size = params.getint('feature_size', 256)
if 'planes' in params:
self.planes = [int(e) for e in params['planes'].split(',')]
else:
self.planes = [32, 64, 64]
if 'layers' in params:
self.layers = [int(e) for e in params['layers'].split(',')]
else:
self.layers = [1, 1, 1]
self.num_top_down = params.getint('num_top_down', 1)
self.conv0_kernel_size = params.getint('conv0_kernel_size', 5)
def print(self):
print('Model parameters:')
param_dict = vars(self)
for e in param_dict:
print('{}: {}'.format(e, param_dict[e]))
print('')
def get_datetime():
return time.strftime("%Y%m%d_%H%M")
def xyz_from_depth(depth_image, depth_intrinsic, depth_scale=1000.):
# Return X, Y, Z coordinates from a depth map.
# This mimics OpenCV cv2.rgbd.depthTo3d() function
fx = depth_intrinsic[0, 0]
fy = depth_intrinsic[1, 1]
cx = depth_intrinsic[0, 2]
cy = depth_intrinsic[1, 2]
# Construct (y, x) array with pixel coordinates
y, x = np.meshgrid(range(depth_image.shape[0]), range(depth_image.shape[1]), sparse=False, indexing='ij')
X = (x - cx) * depth_image / (fx * depth_scale)
Y = (y - cy) * depth_image / (fy * depth_scale)
xyz = np.stack([X, Y, depth_image / depth_scale], axis=2)
xyz[depth_image == 0] = np.nan
return xyz
class MinkLocParams:
"""
Params for training MinkLoc models on Oxford dataset
"""
def __init__(self, params_path, model_params_path):
"""
Configuration files
:param path: General configuration file
:param model_params: Model-specific configuration
"""
assert os.path.exists(params_path), 'Cannot find configuration file: {}'.format(params_path)
assert os.path.exists(model_params_path), 'Cannot find model-specific configuration file: {}'.format(model_params_path)
self.params_path = params_path
self.model_params_path = model_params_path
self.model_params_path = model_params_path
config = configparser.ConfigParser()
config.read(self.params_path)
params = config['DEFAULT']
self.num_points = params.getint('num_points', 4096)
self.dataset_folder = params.get('dataset_folder')
params = config['TRAIN']
self.num_workers = params.getint('num_workers', 0)
self.batch_size = params.getint('batch_size', 128)
# Set batch_expansion_th to turn on dynamic batch sizing
# When number of non-zero triplets falls below batch_expansion_th, expand batch size
self.batch_expansion_th = params.getfloat('batch_expansion_th', None)
if self.batch_expansion_th is not None:
assert 0. < self.batch_expansion_th < 1., 'batch_expansion_th must be between 0 and 1'
self.batch_size_limit = params.getint('batch_size_limit', 256)
# Batch size expansion rate
self.batch_expansion_rate = params.getfloat('batch_expansion_rate', 1.5)
assert self.batch_expansion_rate > 1., 'batch_expansion_rate must be greater than 1'
else:
self.batch_size_limit = self.batch_size
self.batch_expansion_rate = None
self.lr = params.getfloat('lr', 1e-3)
self.scheduler = params.get('scheduler', 'MultiStepLR')
if self.scheduler is not None:
if self.scheduler == 'CosineAnnealingLR':
self.min_lr = params.getfloat('min_lr')
elif self.scheduler == 'MultiStepLR':
scheduler_milestones = params.get('scheduler_milestones')
self.scheduler_milestones = [int(e) for e in scheduler_milestones.split(',')]
else:
raise NotImplementedError('Unsupported LR scheduler: {}'.format(self.scheduler))
self.epochs = params.getint('epochs', 20)
self.weight_decay = params.getfloat('weight_decay', None)
self.normalize_embeddings = params.getboolean('normalize_embeddings', True) # Normalize embeddings during training and evaluation
self.loss = params.get('loss')
if 'Contrastive' in self.loss:
self.pos_margin = params.getfloat('pos_margin', 0.2)
self.neg_margin = params.getfloat('neg_margin', 0.65)
elif 'Triplet' in self.loss:
self.margin = params.getfloat('margin', 0.4) # Margin used in loss function
else:
raise 'Unsupported loss function: {}'.format(self.loss)
self.aug_mode = params.getint('aug_mode', 1) # Augmentation mode (1 is default)
self.train_file = params.get('train_file')
self.val_file = params.get('val_file', None)
self.eval_database_files = ['oxford_evaluation_database.pickle', 'business_evaluation_database.pickle',
'residential_evaluation_database.pickle', 'university_evaluation_database.pickle']
self.eval_query_files = ['oxford_evaluation_query.pickle', 'business_evaluation_query.pickle',
'residential_evaluation_query.pickle', 'university_evaluation_query.pickle']
assert len(self.eval_database_files) == len(self.eval_query_files)
# Read model parameters
self.model_params = ModelParams(self.model_params_path)
self._check_params()
def _check_params(self):
assert os.path.exists(self.dataset_folder), 'Cannot access dataset: {}'.format(self.dataset_folder)
def print(self):
print('Parameters:')
param_dict = vars(self)
for e in param_dict:
if e != 'model_params':
print('{}: {}'.format(e, param_dict[e]))
self.model_params.print()
print('')
| 7,153 | 39.88 | 140 | py |
MinkLoc3D | MinkLoc3D-master/generating_queries/generate_test_sets.py | # PointNetVLAD datasets: based on Oxford RobotCar and Inhouse
# Code adapted from PointNetVLAD repo: https://github.com/mikacuy/pointnetvlad
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import KDTree
import pickle
import argparse
# For training and test data splits
X_WIDTH = 150
Y_WIDTH = 150
# For Oxford
P1 = [5735712.768124, 620084.402381]
P2 = [5735611.299219, 620540.270327]
P3 = [5735237.358209, 620543.094379]
P4 = [5734749.303802, 619932.693364]
# For University Sector
P5 = [363621.292362, 142864.19756]
P6 = [364788.795462, 143125.746609]
P7 = [363597.507711, 144011.414174]
# For Residential Area
P8 = [360895.486453, 144999.915143]
P9 = [362357.024536, 144894.825301]
P10 = [361368.907155, 145209.663042]
P_DICT = {"oxford": [P1, P2, P3, P4], "university": [P5, P6, P7], "residential": [P8, P9, P10], "business": []}
def check_in_test_set(northing, easting, points):
in_test_set = False
for point in points:
if point[0] - X_WIDTH < northing < point[0] + X_WIDTH and point[1] - Y_WIDTH < easting < point[1] + Y_WIDTH:
in_test_set = True
break
return in_test_set
def output_to_file(output, base_path, filename):
file_path = os.path.join(base_path, filename)
with open(file_path, 'wb') as handle:
pickle.dump(output, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
def construct_query_and_database_sets(base_path, runs_folder, folders, pointcloud_fols, filename, p, output_name):
database_trees = []
test_trees = []
for folder in folders:
print(folder)
df_database = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_test = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
# df_locations['timestamp']=runs_folder+folder+pointcloud_fols+df_locations['timestamp'].astype(str)+'.bin'
# df_locations=df_locations.rename(columns={'timestamp':'file'})
for index, row in df_locations.iterrows():
# entire business district is in the test set
if output_name == "business":
df_test = df_test.append(row, ignore_index=True)
elif check_in_test_set(row['northing'], row['easting'], p):
df_test = df_test.append(row, ignore_index=True)
df_database = df_database.append(row, ignore_index=True)
database_tree = KDTree(df_database[['northing', 'easting']])
test_tree = KDTree(df_test[['northing', 'easting']])
database_trees.append(database_tree)
test_trees.append(test_tree)
test_sets = []
database_sets = []
for folder in folders:
database = {}
test = {}
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
df_locations['timestamp'] = runs_folder + folder + pointcloud_fols + \
df_locations['timestamp'].astype(str) + '.bin'
df_locations = df_locations.rename(columns={'timestamp': 'file'})
for index, row in df_locations.iterrows():
# entire business district is in the test set
if output_name == "business":
test[len(test.keys())] = {'query': row['file'], 'northing': row['northing'], 'easting': row['easting']}
elif check_in_test_set(row['northing'], row['easting'], p):
test[len(test.keys())] = {'query': row['file'], 'northing': row['northing'], 'easting': row['easting']}
database[len(database.keys())] = {'query': row['file'], 'northing': row['northing'],
'easting': row['easting']}
database_sets.append(database)
test_sets.append(test)
for i in range(len(database_sets)):
tree = database_trees[i]
for j in range(len(test_sets)):
if i == j:
continue
for key in range(len(test_sets[j].keys())):
coor = np.array([[test_sets[j][key]["northing"], test_sets[j][key]["easting"]]])
index = tree.query_radius(coor, r=25)
# indices of the positive matches in database i of each query (key) in test set j
test_sets[j][key][i] = index[0].tolist()
output_to_file(database_sets, base_path, output_name + '_evaluation_database.pickle')
output_to_file(test_sets, base_path, output_name + '_evaluation_query.pickle')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate evaluation datasets')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset root folder')
args = parser.parse_args()
print('Dataset root: {}'.format(args.dataset_root))
assert os.path.exists(args.dataset_root), f"Cannot access dataset root folder: {args.dataset_root}"
base_path = args.dataset_root
# For Oxford
folders = []
runs_folder = "oxford/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
index_list = [5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 24, 31, 32, 33, 38, 39, 43, 44]
print(len(index_list))
for index in index_list:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_20m/",
"pointcloud_locations_20m.csv", P_DICT["oxford"], "oxford")
# For University Sector
folders = []
runs_folder = "inhouse_datasets/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
uni_index = range(10, 15)
for index in uni_index:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_25m_25/",
"pointcloud_centroids_25.csv", P_DICT["university"], "university")
# For Residential Area
folders = []
runs_folder = "inhouse_datasets/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
res_index = range(5, 10)
for index in res_index:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_25m_25/",
"pointcloud_centroids_25.csv", P_DICT["residential"], "residential")
# For Business District
folders = []
runs_folder = "inhouse_datasets/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
bus_index = range(5)
for index in bus_index:
folders.append(all_folders[index])
print(folders)
construct_query_and_database_sets(base_path, runs_folder, folders, "/pointcloud_25m_25/",
"pointcloud_centroids_25.csv", P_DICT["business"], "business")
| 6,937 | 40.54491 | 119 | py |
MinkLoc3D | MinkLoc3D-master/generating_queries/generate_training_tuples_baseline.py | # PointNetVLAD datasets: based on Oxford RobotCar and Inhouse
# Code adapted from PointNetVLAD repo: https://github.com/mikacuy/pointnetvlad
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import KDTree
import pickle
import argparse
import tqdm
from datasets.oxford import TrainingTuple
# Import test set boundaries
from generating_queries.generate_test_sets import P1, P2, P3, P4, check_in_test_set
# Test set boundaries
P = [P1, P2, P3, P4]
RUNS_FOLDER = "oxford/"
FILENAME = "pointcloud_locations_20m_10overlap.csv"
POINTCLOUD_FOLS = "/pointcloud_20m_10overlap/"
def construct_query_dict(df_centroids, base_path, filename, ind_nn_r, ind_r_r=50):
# ind_nn_r: threshold for positive examples
# ind_r_r: threshold for negative examples
# Baseline dataset parameters in the original PointNetVLAD code: ind_nn_r=10, ind_r=50
# Refined dataset parameters in the original PointNetVLAD code: ind_nn_r=12.5, ind_r=50
tree = KDTree(df_centroids[['northing', 'easting']])
ind_nn = tree.query_radius(df_centroids[['northing', 'easting']], r=ind_nn_r)
ind_r = tree.query_radius(df_centroids[['northing', 'easting']], r=ind_r_r)
queries = {}
for anchor_ndx in range(len(ind_nn)):
anchor_pos = np.array(df_centroids.iloc[anchor_ndx][['northing', 'easting']])
query = df_centroids.iloc[anchor_ndx]["file"]
# Extract timestamp from the filename
scan_filename = os.path.split(query)[1]
assert os.path.splitext(scan_filename)[1] == '.bin', f"Expected .bin file: {scan_filename}"
timestamp = int(os.path.splitext(scan_filename)[0])
positives = ind_nn[anchor_ndx]
non_negatives = ind_r[anchor_ndx]
positives = positives[positives != anchor_ndx]
# Sort ascending order
positives = np.sort(positives)
non_negatives = np.sort(non_negatives)
# Tuple(id: int, timestamp: int, rel_scan_filepath: str, positives: List[int], non_negatives: List[int])
queries[anchor_ndx] = TrainingTuple(id=anchor_ndx, timestamp=timestamp, rel_scan_filepath=query,
positives=positives, non_negatives=non_negatives, position=anchor_pos)
file_path = os.path.join(base_path, filename)
with open(file_path, 'wb') as handle:
pickle.dump(queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Baseline training dataset')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset root folder')
args = parser.parse_args()
print('Dataset root: {}'.format(args.dataset_root))
assert os.path.exists(args.dataset_root), f"Cannot access dataset root folder: {args.dataset_root}"
base_path = args.dataset_root
all_folders = sorted(os.listdir(os.path.join(base_path, RUNS_FOLDER)))
folders = []
# All runs are used for training (both full and partial)
index_list = range(len(all_folders) - 1)
print("Number of runs: " + str(len(index_list)))
for index in index_list:
folders.append(all_folders[index])
print(folders)
df_train = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_test = pd.DataFrame(columns=['file', 'northing', 'easting'])
for folder in tqdm.tqdm(folders):
df_locations = pd.read_csv(os.path.join(base_path, RUNS_FOLDER, folder, FILENAME), sep=',')
df_locations['timestamp'] = RUNS_FOLDER + folder + POINTCLOUD_FOLS + df_locations['timestamp'].astype(str) + '.bin'
df_locations = df_locations.rename(columns={'timestamp': 'file'})
for index, row in df_locations.iterrows():
if check_in_test_set(row['northing'], row['easting'], P):
df_test = df_test.append(row, ignore_index=True)
else:
df_train = df_train.append(row, ignore_index=True)
print("Number of training submaps: " + str(len(df_train['file'])))
print("Number of non-disjoint test submaps: " + str(len(df_test['file'])))
# ind_nn_r is a threshold for positive elements - 10 is in original PointNetVLAD code for refined dataset
construct_query_dict(df_train, base_path, "training_queries_baseline.pickle", ind_nn_r=10)
construct_query_dict(df_test, base_path, "test_queries_baseline.pickle", ind_nn_r=10)
| 4,386 | 43.765306 | 123 | py |
MinkLoc3D | MinkLoc3D-master/generating_queries/generate_training_tuples_refine.py | # PointNetVLAD datasets: based on Oxford RobotCar and Inhouse
# Code adapted from PointNetVLAD repo: https://github.com/mikacuy/pointnetvlad
import os
import pandas as pd
import argparse
import tqdm
# Import test set boundaries
from generating_queries.generate_test_sets import P1, P2, P3, P4, P5, P6, P7, P8, P9, P10, check_in_test_set
from generating_queries.generate_training_tuples_baseline import construct_query_dict
# Test set boundaries
P = [P1, P2, P3, P4, P5, P6, P7, P8, P9, P10]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Refined training dataset')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset root folder')
args = parser.parse_args()
print('Dataset root: {}'.format(args.dataset_root))
assert os.path.exists(args.dataset_root), f"Cannot access dataset root folder: {args.dataset_root}"
base_path = args.dataset_root
runs_folder = "inhouse_datasets/"
filename = "pointcloud_centroids_10.csv"
pointcloud_fols = "/pointcloud_25m_10/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
folders = []
index_list = range(5, 15)
for index in index_list:
folders.append(all_folders[index])
print(folders)
####Initialize pandas DataFrame
df_train = pd.DataFrame(columns=['file', 'northing', 'easting'])
for folder in tqdm.tqdm(folders):
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
df_locations['timestamp'] = runs_folder + folder + pointcloud_fols + df_locations['timestamp'].astype(str) + '.bin'
df_locations = df_locations.rename(columns={'timestamp': 'file'})
for index, row in df_locations.iterrows():
if check_in_test_set(row['northing'], row['easting'], P):
continue
else:
df_train = df_train.append(row, ignore_index=True)
print(len(df_train['file']))
##Combine with Oxford data
runs_folder = "oxford/"
filename = "pointcloud_locations_20m_10overlap.csv"
pointcloud_fols = "/pointcloud_20m_10overlap/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
folders = []
index_list = range(len(all_folders) - 1)
for index in index_list:
folders.append(all_folders[index])
print(folders)
for folder in folders:
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
df_locations['timestamp'] = runs_folder + folder + pointcloud_fols + df_locations['timestamp'].astype(str) + '.bin'
df_locations = df_locations.rename(columns={'timestamp': 'file'})
for index, row in df_locations.iterrows():
if check_in_test_set(row['northing'], row['easting'], P):
continue
else:
df_train = df_train.append(row, ignore_index=True)
print("Number of training submaps: " + str(len(df_train['file'])))
# ind_nn_r is a threshold for positive elements - 12.5 is in original PointNetVLAD code for refined dataset
construct_query_dict(df_train, base_path, "training_queries_refine.pickle", ind_nn_r=12.5)
| 3,216 | 38.716049 | 123 | py |
RadarCommDataset | RadarCommDataset-main/load_dataset.py | import h5py
dkeys = [] # labels of each entry in the h5
W = [] # dataset container
with h5py.File('RadComDynamic.hdf5', 'r') as f:
for key in f.keys():
dkeys.append(key)
W.append(f[key][:])
f.close()
# W can be now split into training, validation,and testing sets to run your ML algorithm on it
| 324 | 24 | 94 | py |
RadarCommDataset | RadarCommDataset-main/visualize.py | import h5py
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
"Parse the command line arguments"
parser = argparse.ArgumentParser()
parser.add_argument("-num", type=int,default="0",
help="Which sample to pick. 0 to 699")
parser.add_argument("-snr", type=int,default="10",
help="SNR: -20 to 18 in 2 step increments")
parser.add_argument("-mod",default="pulsed",
help="Modulation options: pulsed,fmcw,bpsk,amdsb,amssb,ask")
parser.add_argument("-sig",default="Airborne-detection",
help="Signal type options: Airborne-detection,Airborne-range,Air-Ground-MTI,Ground mapping,Radar-Altimeter,Satcom,AM radio,short-range")
return parser.parse_args()
def main():
args = parse_args()
with h5py.File('RadComDynamic.hdf5', 'r') as f:
key = args.mod,args.sig,args.snr,args.num
waveform = f[str(key)][:]
real = waveform[0:128]
imag = waveform[128:]
f.close()
# Plot and visualize the selected sample
plt.figure(figsize=[8, 6])
plt.plot(real,'-go')
plt.plot(imag,'-bo')
plt.title(str(key), fontsize=16)
plt.show()
if __name__ == "__main__":
sys.exit(not main())
| 1,281 | 32.736842 | 160 | py |
pydeps | pydeps-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pydeps - Python module dependency visualization
"""
# pragma: nocover
import io
import sys
import setuptools
from setuptools.command.test import test as TestCommand
version='1.12.12'
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setuptools.setup(
name='pydeps',
version=version,
packages=setuptools.find_packages(exclude=['tests*']),
install_requires=[
'enum34; python_version < "3.4"',
'stdlib_list',
],
long_description=io.open('README.rst', encoding='utf8').read(),
entry_points={
'console_scripts': [
'pydeps = pydeps.pydeps:pydeps',
]
},
url='https://github.com/thebjorn/pydeps',
cmdclass={'test': PyTest},
license='BSD',
author='bjorn',
author_email='bp@datakortet.no',
description='Display module dependencies',
keywords='Python Module Dependency graphs',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 1,918 | 26.811594 | 74 | py |
pydeps | pydeps-master/tasks.py | # pragma: nocover
from invoke import Collection, task
from dktasklib import version
from dktasklib import upversion
from dktasklib import publish
from dktasklib import docs
from dktasklib.package import Package, package
@task
def freeze(ctx):
"pip freeze, but without -e installed packages"
ctx.run("pip list --exclude-editable --format freeze")
@task
def outdated(ctx):
"list all outdated requirements"
ctx.run("pip list --outdated")
ns = Collection(
'pydeps',
freeze,
outdated,
version,
upversion,
publish,
docs,
package
)
ns.configure({
'pkg': Package(),
'run': {
'echo': True
}
})
| 659 | 16.368421 | 58 | py |
pydeps | pydeps-master/pydeps/target.py | # -*- coding: utf-8 -*-
"""
Abstracting the target for pydeps to work on.
"""
from __future__ import print_function
import json
import os
import re
import shutil
import sys
import tempfile
from contextlib import contextmanager
import logging
log = logging.getLogger(__name__)
class Target(object):
"""The compilation target.
"""
def __init__(self, path):
# log.debug("CURDIR: %s, path: %s, exists: %s", os.getcwd(), path, os.path.exists(path))
# print("Target::CURDIR: %s, path: %s, exists: %s" % (os.getcwd(), path, os.path.exists(path)))
self.calling_fname = path
self.calling_dir = os.getcwd()
self.exists = os.path.exists(path)
if self.exists:
self.path = os.path.realpath(path)
else: # pragma: nocover
print("No such file or directory:", repr(path), file=sys.stderr)
if os.path.exists(path + '.py'):
print("..did you mean:", path + '.py', '?', file=sys.stderr)
sys.exit(1)
self.is_dir = os.path.isdir(self.path)
self.is_module = self.is_dir and '__init__.py' in os.listdir(self.path)
self.is_pysource = os.path.splitext(self.path)[1] in ('.py', '.pyc', '.pyo', '.pyw')
self.fname = os.path.basename(self.path)
if self.is_dir:
self.dirname = self.fname
self.modname = self.fname
else:
self.dirname = os.path.dirname(self.path)
self.modname = os.path.splitext(self.fname)[0]
if self.is_pysource:
# we will work directly on the file (in-situ)
self.workdir = os.path.dirname(self.path)
else:
self.workdir = os.path.realpath(tempfile.mkdtemp())
self.syspath_dir = self.get_package_root()
# split path such that syspath_dir + relpath == path
self.relpath = self.path[len(self.syspath_dir):].lstrip(os.path.sep)
if self.is_dir:
self.modpath = self.relpath.replace(os.path.sep, '.')
else:
self.modpath = os.path.splitext(self.relpath)[0].replace(os.path.sep, '.')
self.package_root = os.path.join(
self.syspath_dir,
self._path_parts(self.relpath)[0]
)
@contextmanager
def chdir_work(self):
try:
os.chdir(self.workdir)
sys.path.insert(0, self.syspath_dir)
yield
finally:
os.chdir(self.calling_dir)
if sys.path[0] == self.syspath_dir:
sys.path = sys.path[1:]
self.close()
def get_package_root(self):
for d in self.get_parents():
if '__init__.py' not in os.listdir(d):
return d
raise Exception(
"do you have an __init__.py file at the "
"root of the drive..?") # pragma: nocover
def get_parents(self):
def _parent_iter():
parts = self._path_parts(self.path)
for i in range(1, len(parts)):
yield os.path.join(*parts[:-i])
return list(_parent_iter())
def _path_parts(self, pth):
"""Return a list of all directories in the path ``pth``.
"""
res = re.split(r"[\\/]", pth)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res
def __del__(self):
self.close()
def close(self):
"""Clean up after ourselves.
"""
try:
# make sure we don't delete the user's source file if we're working
# on it in-situ.
if not self.is_pysource and hasattr(self, 'workdir'):
shutil.rmtree(self.workdir)
except OSError:
pass
def __repr__(self): # pragma: nocover
return json.dumps(
{k: v for k, v in self.__dict__.items() if not k.startswith('_')},
indent=4, sort_keys=True
)
| 3,933 | 32.058824 | 103 | py |
pydeps | pydeps-master/pydeps/dot.py | # -*- coding: utf-8 -*-
"""
Graphviz interface.
"""
import os
import platform
import sys
from subprocess import Popen
import subprocess
import shlex
from . import cli
win32 = sys.platform == 'win32'
def is_unicode(s): # pragma: nocover
"""Test unicode with py3 support.
"""
try:
return isinstance(s, unicode)
except NameError:
return False
def to_bytes(s): # pragma: nocover
"""Convert an item into bytes.
"""
if isinstance(s, bytes):
return s
if isinstance(s, str) or is_unicode(s):
return s.encode("utf-8")
try:
return unicode(s).encode("utf-8")
except NameError:
return str(s).encode("utf-8")
def cmd2args(cmd):
"""Prepare a command line for execution by Popen.
"""
if isinstance(cmd, str):
return cmd if win32 else shlex.split(cmd)
return cmd
def pipe(cmd, txt):
"""Pipe `txt` into the command `cmd` and return the output.
"""
return Popen(
cmd2args(cmd),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=win32
).communicate(txt)[0]
def dot(src, **kw):
"""Execute the dot command to create an svg output.
"""
cmd = "dot -Gstart=1 -T%s" % kw.pop('T', 'svg')
for k, v in list(kw.items()):
if v is True:
cmd += " -%s" % k
else:
cmd += " -%s%s" % (k, v)
return pipe(cmd, to_bytes(src))
def call_graphviz_dot(src, fmt):
"""Call dot command, and provide helpful error message if we
cannot find it.
"""
try:
svg = dot(src, T=fmt)
except OSError as e: # pragma: nocover
if e.errno == 2:
cli.error("""
cannot find 'dot'
pydeps calls dot (from graphviz) to create svg diagrams,
please make sure that the dot executable is available
on your path.
""")
raise
return svg
def in_wsl():
"""Are we running under wsl?
"""
return 'microsoft-standard' in platform.uname().release
def display_svg(kw, fname): # pragma: nocover
"""Try to display the svg file on this platform.
Note that this is also used to display PNG files, despite the name.
"""
display = kw['display']
if not display:
display = os.getenv('PYDEPS_DISPLAY', os.getenv('BROWSER', None))
if not display:
cli.verbose("Displaying:", fname)
if sys.platform == 'win32':
os.startfile(fname)
else:
if sys.platform == "darwin":
display = "open"
elif in_wsl():
# this is still borked...
display = "/usr/bin/wslview"
else:
display = "xdg-open"
subprocess.check_call([display, fname])
else:
cli.verbose(display + " " + fname)
subprocess.check_call([display, fname])
| 2,906 | 23.024793 | 74 | py |
pydeps | pydeps-master/pydeps/__main__.py | from .pydeps import pydeps
pydeps()
| 36 | 11.333333 | 26 | py |
pydeps | pydeps-master/pydeps/pystdlib.py | # -*- coding: utf-8 -*-
import sys
import stdlib_list
import warnings
def pystdlib():
"""Return a set of all module-names in the Python standard library.
"""
if sys.version_info[:2] >= (3, 10):
# Python 3.10 has this functionality built-in.
return list(sys.stdlib_module_names | set(sys.builtin_module_names))
curver = '.'.join(str(x) for x in sys.version_info[:2])
if curver not in stdlib_list.short_versions:
# if stdlib_list doesn't know about our version, then use the last
# version that stdlib_list knows about (not perfect, but it will
# allow downstream packages to test on pre-release versions of
# Python - which are difficult for stdlib_list to support).
warnings.warn(
("stdlib_list does't support Python %s yet, "
"pydeps will use symbols from %s for now.") % (
curver, stdlib_list.long_versions[-1]
)
)
curver = stdlib_list.long_versions[-1]
return (set(stdlib_list.stdlib_list(curver)) | {
'_LWPCookieJar', '_MozillaCookieJar', '_abcoll', 'email._parseaddr',
'email.base64mime', 'email.feedparser', 'email.quoprimime',
'encodings', 'genericpath', 'ntpath', 'nturl2path', 'os2emxpath',
'posixpath', 'sre_compile', 'sre_parse', 'unittest.case',
'unittest.loader', 'unittest.main', 'unittest.result',
'unittest.runner', 'unittest.signals', 'unittest.suite',
'unittest.util', '_threading_local', 'sre_constants', 'strop',
'repr', 'opcode', 'nt', 'encodings.aliases',
'_bisect', '_codecs', '_collections', '_functools', '_hashlib',
'_heapq', '_io', '_locale', '_LWPCookieJar', '_md5',
'_MozillaCookieJar', '_random', '_sha', '_sha256', '_sha512',
'_socket', '_sre', '_ssl', '_struct', '_subprocess',
'_threading_local', '_warnings', '_weakref', '_weakrefset',
'_winreg'
}) - {'__main__'}
| 1,969 | 43.772727 | 76 | py |
pydeps | pydeps-master/pydeps/depgraph2dot.py | # Based on original code, Copyright 2004 Toby Dickenson,
# with changes 2014 (c) Bjorn Pettersen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .render_context import RenderBuffer
from . import colors
class PyDepGraphDot(object):
def __init__(self, **kw):
self.kw = kw
def render(self, depgraph, ctx):
with ctx.graph():
visited = set()
drawn = set()
for aname, bname in depgraph.cyclerelations:
try:
a = depgraph.sources[aname]
b = depgraph.sources[bname]
except KeyError:
continue
drawn.add((bname, aname))
ctx.write_rule(
aname, bname,
weight=depgraph.proximity_metric(a, b),
minlen=depgraph.dissimilarity_metric(a, b),
)
for a, b in sorted(depgraph):
# b imports a
aname = a.name
bname = b.name
if (bname, aname) in drawn:
continue
drawn.add((bname, aname))
ctx.write_rule(
aname, bname,
weight=depgraph.proximity_metric(a, b),
minlen=depgraph.dissimilarity_metric(a, b))
visited.add(a)
visited.add(b)
space = colors.ColorSpace(visited)
for src in sorted(visited):
bg, fg = depgraph.get_colors(src, space)
kwargs = {}
if src.name in depgraph.cyclenodes:
kwargs['shape'] = 'octagon'
ctx.write_node(
src.name,
label=src.get_label(splitlength=14,
rmprefix=self.kw.get('rmprefix')),
fillcolor=colors.rgb2css(bg),
fontcolor=colors.rgb2css(fg),
**kwargs
)
return ctx.text()
class CycleGraphDot(object):
def __init__(self, **kw):
self.kw = kw
def render(self, depgraph, ctx):
with ctx.graph(concentrate=False):
visited = set()
drawn = set()
for aname, bname in depgraph.cyclerelations:
try:
a = depgraph.sources[aname]
b = depgraph.sources[bname]
except KeyError:
continue
drawn.add((bname, aname))
ctx.write_rule(
bname, aname,
weight=depgraph.proximity_metric(a, b),
minlen=depgraph.dissimilarity_metric(a, b),
)
visited.add(a)
visited.add(b)
space = colors.ColorSpace(visited)
for src in visited:
bg, fg = depgraph.get_colors(src, space)
kwargs = {}
if src.name in depgraph.cyclenodes:
kwargs['shape'] = 'octagon'
ctx.write_node(
src.name, label=src.label,
fillcolor=colors.rgb2css(bg),
fontcolor=colors.rgb2css(fg),
**kwargs
)
return ctx.text()
def dep2dot(target, depgraph, **kw):
dotter = PyDepGraphDot(**kw)
ctx = RenderBuffer(target, **kw)
return dotter.render(depgraph, ctx)
def cycles2dot(target, depgraph, **kw):
dotter = CycleGraphDot(**kw)
ctx = RenderBuffer(target, **kw)
return dotter.render(depgraph, ctx)
| 4,638 | 33.110294 | 74 | py |
pydeps | pydeps-master/pydeps/arguments.py | from __future__ import print_function, unicode_literals
from io import StringIO
import textwrap
import json
import argparse
# from devtools import debug
from .configs import Config, typefns, identity
DEFAULT_NONE = '____'
class Argument(object):
def __init__(self, *flags, **args):
if 'choices' in args and args['choices'] is None:
del args['choices']
if 'container' in args:
del args['container']
self._args = args
self._flags = flags
# self.__dict__.update(args)
def __json__(self):
return self.__dict__
def typename(self):
if 'kind' in self._args:
return self._args['kind']
if self._args.get('action') in {'store_true', 'store_false'}:
return 'BOOL'
tp = self._args.get('type')
if tp is not None:
return tp.__name__.upper()
v = self._args.get('default')
if v is None:
return DEFAULT_NONE
return v.__class__.__name__.upper()
def typefn(self):
return typefns.get(self.typename(), identity)
def pytype(self):
argname = self.argname()
if argname == 'fname':
return 'str'
if 'choices' in self._args:
return 'Literal[%s]' % ', '.join(repr(x) for x in self._args['choices'])
if 'type' in self._args:
res = self._args['type']
elif self._args.get('action') in {'store_true', 'store_false'}:
res = bool
elif self._args.get('kind', 'unknown') is None:
return 'None'
elif self._args.get('kind', '').startswith("FNAME"):
res = str
else:
res = self._args.get('default').__class__
if res is None:
return 'None'
return res.__name__
def argname(self):
if 'dest' in self._args:
return self._args['dest']
return self._flags[0].lstrip('-').replace('-', '_')
# return self._flags[-1].lstrip('-').replace('-', '_')
def help(self):
if 'help' in self._args:
return self._args['help']
return ''
def default(self):
if 'default' in self._args:
return self._args['default']
if self._args.get('action') == 'store_true':
return False
if self._args.get('action') == 'store_false':
return True
return DEFAULT_NONE
def add_to_parser(self, parser):
args = self._args
args.pop('default', None) # remove default value
args.pop('kind', None) # remove our attributes
if args.get('action') in {'store_true', 'store_false'}:
args['default'] = None
# debug("ADD_TO_PARSER:", self._flags, args)
parser.add_argument(*self._flags, **args)
class Namespace(object):
def __init__(self, ns):
self.ns = ns
def __repr__(self):
return json.dumps(vars(self.ns), indent=4, sort_keys=True)
def items(self):
return list(dict(vars(self.ns)).items())
def __getitem__(self, key):
return getattr(self.ns, key)
def __setitem__(self, key, val):
setattr(self.ns, key, val)
def __delitem__(self, key):
delattr(self.ns, key)
def __getattr__(self, key):
return getattr(self.ns, key)
class Arguments(object):
def __init__(self, config_files=None, debug=False, *posargs, parents=None, **kwargs):
if config_files is None:
config_files = []
# passthrough to argparse
self.posargs = posargs
self.kwargs = kwargs
# print("CONFIG_FILES:", config_files)
self.debug = debug
self.arglist = []
self.args = {}
self.config_files = config_files
self.argtypes = {}
self.defaults = {}
self.parents = parents
def load_config_files(self):
for filename in self.config_files:
if filename == '.pydeps' or filename.endswith('.pydeps'):
self.load_pydeps_config(filename)
def write_default_config(self):
"""Utility function to create .configs.Config
# XXX: a more general utility to create configs from argparse
# would be nice...
"""
fp = StringIO()
# fp = sys.stdout
print("class Config(object):", file=fp)
arglist = self.arglist
if self.parents:
parent_actions = []
for parent in self.parents:
for action in parent._actions:
# if isinstance(action, argparse._StoreAction):
parent_actions.append(Argument(
*action.option_strings, **action.__dict__
))
arglist = parent_actions + arglist
for arg in arglist:
debug(arg._args)
default = arg.default()
if default == DEFAULT_NONE:
default = None
elif isinstance(default, str):
default = repr(default)
help = textwrap.wrap(arg.help(), 80 - 7) # 7 = len(" #: ")
for helpline in help:
print("", file=fp)
print(" #: {help}".format(help=helpline), end="", file=fp)
print("", file=fp)
typename = arg.pytype()
if typename == 'list' and default is None:
typename = 'Optional[List[str]]'
elif default is None:
typename = f'Optional[{typename}]'
elif default == [] and typename == 'list':
typename = 'List[str]'
# add py3 type annotations
# print(f" {arg.argname()}: {typename} = {default}", file=fp)
# witout type annotations
print(" {argname} = {default}".format(argname=arg.argname(), default=default), file=fp)
print("", file=fp)
print(" def set_field(self, field, value):", file=fp)
for arg in self.arglist:
print(" if field == '{argname}':".format(argname=arg.argname()), file=fp)
print(" self.{argname} = {typefn}(value)".format(argname=arg.argname(), typefn=arg.typefn().__name__), file=fp)
print("", file=fp)
# debug(res)
res = fp.getvalue()
print(res)
return res
def parse_args(self, argv):
self.kwargs['parents'] = self.parents
p = argparse.ArgumentParser(*self.posargs, **self.kwargs)
for arg in self.arglist:
arg.add_to_parser(p)
args = Namespace(p.parse_args(argv))
config = Config.load(self.config_files)
config.update({k : v for k, v in args.items() if v is not None})
# print('---- yaml ---------------')
# print(config.write_yaml())
# print('---- json ---------------')
# print(config.write_json())
# print('---- ini ---------------')
# print(config.write_ini())
# print('---- toml ---------------')
# print(config.write_toml())
# print('-------------------')
return config
def add(self, *flags, **kwargs):
# debug(flags, kwargs)
arg = Argument(*flags, **kwargs)
self.arglist.append(arg)
argname = arg.argname()
self.args[argname] = arg
self.argtypes[argname] = arg.typename()
self.defaults[argname] = arg.default()
def __repr__(self):
return json.dumps(dict(
types=self.argtypes,
defaults=self.defaults,
), indent=4, sort_keys=True)
| 7,599 | 31.067511 | 134 | py |
pydeps | pydeps-master/pydeps/mfimp.py | """
Python's modulefinder._find_module has a bug that breaks a number of popular
packages.
This is a copy of the standard lib's imp._find_module which sort of does the
right thing, ie. ignores namespace packages instead of crashing.
This is vendorized/copied here to prevent the warning error that the regular
imp module causes.
"""
from _imp import is_builtin, is_frozen
from importlib import machinery
import sys
import os
import tokenize
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
def _get_suffixes():
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
def find_module(name, path=None):
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'path' must be None or a list, "
"not {}".format(type(path)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
for suffix, mode, type_ in _get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError('No module named {!r}'.format(name), name=name)
encoding = None
if 'b' not in mode:
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
| 2,445 | 31.613333 | 79 | py |
pydeps | pydeps-master/pydeps/render_context.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from io import StringIO
from contextlib import contextmanager
import textwrap
import enum
def to_unicode(s):
try:
return unicode(s)
except NameError:
return s
class Rankdir(enum.Enum):
BOTTOM_TOP = 'BT'
TOP_BOTTOM = 'TB'
LEFT_RIGHT = 'LR'
RIGHT_LEFT = 'RL'
def reverse(self):
return Rankdir(self.value[::-1])
class RenderContext(object):
def __init__(self, out=None, reverse=False, rankdir=Rankdir.TOP_BOTTOM):
self.out = out
self.fp = StringIO()
self.fillcolor = '#ffffff'
self.fontcolor = '#000000'
self.name = None
self.concentrate = None
self.compound = None
self.width = 0.75
self.reverse = reverse
self.rankdir = rankdir
@contextmanager
def graph(self, **kw):
"""Set up a graphviz graph context.
"""
self.name = kw.get('name', 'G')
self.fillcolor = kw.get('fillcolor', '#ffffff')
self.fontcolor = kw.get('fontcolor', '#000000')
if kw.get('concentrate', True):
self.concentrate = 'concentrate = true;'
else:
self.concentrate = ''
self.compound = 'compound = true;' if kw.get('compound') else ''
self.dedent("""
digraph {self.name} {{
{self.concentrate}
{self.compound}
rankdir = {self.rankdir.value};
node [style=filled,fillcolor="{self.fillcolor}",fontcolor="{self.fontcolor}",fontname=Helvetica,fontsize=10];
""".format(self=self))
yield
self.writeln('}')
def text(self):
"""Get value of output stream (StringIO).
"""
if self.out:
self.out.close() # pragma: nocover
return self.fp.getvalue()
def write_rule(self, a, b, **attrs):
"""a -> b [a1=x,a2=y];
"""
if self.reverse:
a, b = b, a
with self.rule():
self.write('%s -> %s' % (self._nodename(a), self._nodename(b)))
# remove default values from output
self._delattr(attrs, 'weight', 1)
self._delattr(attrs, 'minlen', 1)
self._delattr(attrs, 'len', 1)
self.write_attributes(attrs)
def write_node(self, a, **attrs):
"""a [a1=x,a2=y];
"""
with self.rule():
nodename = self._nodename(a)
self.write(nodename)
# remove default values from output
self._delattr(attrs, 'label', nodename)
self._delattr(attrs, 'fillcolor', self.fillcolor)
self._delattr(attrs, 'fontcolor', self.fontcolor)
self._delattr(attrs, 'width', self.width)
self.write_attributes(attrs)
# -- end of external/public interface --
def write(self, txt):
"""Write ``txt`` to file and output stream (StringIO).
"""
self.fp.write(to_unicode(txt))
if self.out:
self.out.write(txt) # pragma: nocover
def writeln(self, txt):
"""Write ``txt`` and add newline.
"""
self.write(txt + '\n')
def dedent(self, txt):
"""Write ``txt`` dedented.
"""
self.write(textwrap.dedent(txt))
def write_attributes(self, attrs):
"""Write comma separated attribute values (if exists).
"""
if attrs:
self.write(
' [' + ','.join('%s="%s"' % kv for kv in sorted(attrs.items())) + ']'
)
else: # pragma: nocover
pass
def _nodename(self, x):
"""Return a valid node name.
"""
return x.replace('.', '_')
def _delattr(self, attr, key, value):
if attr.get(key) == value:
del attr[key]
@contextmanager
def rule(self):
"""Write indented rule.
"""
self.write(' ')
yield
self.writeln(';')
class RenderBuffer(object):
def __init__(self, target,
reverse=False,
rankdir=Rankdir.TOP_BOTTOM,
cluster=False,
min_cluster_size=0,
max_cluster_size=1,
keep_target_cluster=False,
collapse_target_cluster=False, **kw):
self.target = target
self.nodes = []
self.clusters = defaultdict(list)
self.rules = {}
self.reverse = reverse
self.rankdir = Rankdir(rankdir)
if self.reverse:
self.rankdir = self.rankdir.reverse()
self.cluster = cluster
self.min_cluster_size = min_cluster_size
self.max_cluster_size = max_cluster_size
self.graph_attrs = {}
self.keep_target_cluster = keep_target_cluster
self.collapse_target_cluster = collapse_target_cluster
def _nodecolor(self, n):
for node, attrs in self.nodes:
if node == n:
return attrs['fillcolor']
return '#000000'
def cluster_stats(self):
maxnodes = max(len(v) for v in self.clusters.values())
minnodes = min(len(v) for v in self.clusters.values())
return minnodes, maxnodes
def _remove_small_clusters(self):
# remove clusters that are too small
target_cluster = self._target_clusterid()
_remove = []
for clusterid, nodes in sorted(self.clusters.items()):
if clusterid == target_cluster:
# Target cluster must always be there, don't remove it even if it's small. We can get here
# when --collapse-target-cluster flag is used.
continue
if len(nodes) < self.min_cluster_size:
# print("REMOVING:CLUSTER:", clusterid, nodes)
self.nodes += nodes
_remove.append(clusterid)
for _r in _remove:
del self.clusters[_r]
def _collapse_cluster(self, clusterid, nodes):
"""Add a single cluster node (with a label listing contents?)
and change all rules to reference this node instead.
"""
first_node, first_attrs = nodes[0]
first_attrs['shape'] = 'folder'
first_attrs['label'] = clusterid
self.nodes.append((clusterid, first_attrs))
for node, attrs in nodes: # for each node in this cluster
# check all rules for in/out relations
rules = list(self.rules.items())
self.rules = {}
for (a, b), rule_attrs in rules:
# orig = (a, b)
if a == node:
a = clusterid
if b == node:
b = clusterid
# if orig != (a, b):
# print("CHANGED[{}|{}]: {} TO {}".format(clusterid, node, orig, (a, b)))
self.rules[(a, b)] = rule_attrs
del self.clusters[clusterid]
def triage_clusters(self):
target_cluster = self._target_clusterid()
if not self.collapse_target_cluster and not self.keep_target_cluster:
# don't put nodes from the target into a cluster
self.nodes += self.clusters[target_cluster]
del self.clusters[target_cluster]
self._remove_small_clusters()
# collapse target cluster if requested
if self.collapse_target_cluster:
self._collapse_cluster(target_cluster, self.clusters[target_cluster])
# collapse clusters that are too big
for clusterid, nodes in sorted(self.clusters.items()):
if len(nodes) > self.max_cluster_size and clusterid != target_cluster:
self._collapse_cluster(clusterid, nodes)
def text(self):
ctx = RenderContext(reverse=self.reverse, rankdir=self.rankdir)
if self.cluster:
self.triage_clusters()
if self.clusters: # are there any clusters left after triage?
self.graph_attrs['compound'] = True
self.graph_attrs['concentrate'] = False
with ctx.graph(**self.graph_attrs):
clusters = set()
for clusterid, nodes in sorted(self.clusters.items()):
clusters.add(clusterid)
ctx.writeln('subgraph cluster_%s {' % clusterid)
ctx.writeln(' label = %s;' % clusterid)
for n, attrs in nodes:
ctx.write_node(n, **attrs)
ctx.writeln('}')
# non-clustered nodes
for n, attrs in self.nodes:
ctx.write_node(n, **attrs)
intercluster = set()
for (a, b), attrs in sorted(self.rules.items()):
if a == b:
continue
cida = self._clusterid(a)
cidb = self._clusterid(b)
if cida == cidb:
if self.reverse:
attrs['fillcolor'] = self._nodecolor(b)
else:
attrs['fillcolor'] = self._nodecolor(a)
ctx.write_rule(a, b, **attrs)
elif cida in clusters and cidb in clusters:
if (cida, cidb) not in intercluster:
intercluster.add((cida, cidb))
if self.reverse:
attrs['lhead'] = 'cluster_' + cida
attrs['ltail'] = 'cluster_' + cidb
attrs['fillcolor'] = self._nodecolor(b)
else:
attrs['ltail'] = 'cluster_' + cida
attrs['lhead'] = 'cluster_' + cidb
attrs['fillcolor'] = self._nodecolor(a)
ctx.write_rule(a, b, **attrs)
else:
if cida in clusters:
if self.reverse:
attrs['lhead'] = 'cluster_' + cida
else:
attrs['ltail'] = 'cluster_' + cida
if cidb in clusters:
if self.reverse:
attrs['ltail'] = 'cluster_' + cidb
else:
attrs['lhead'] = 'cluster_' + cidb
if self.reverse:
attrs['fillcolor'] = self._nodecolor(b)
else:
attrs['fillcolor'] = self._nodecolor(a)
ctx.write_rule(a, b, **attrs)
return ctx.text()
@contextmanager
def graph(self, **kw):
self.graph_attrs.update(kw)
yield
def _clusterid(self, n):
return n.split('.')[0]
def write_node(self, n, **attrs):
clusterid = self._clusterid(n)
if self.cluster:
self.clusters[clusterid].append((n, attrs))
else:
self.nodes.append((n, attrs))
def write_rule(self, a, b, **attrs):
self.rules[(a, b)] = attrs
def _target_clusterid(self):
return self._clusterid(self.target.fname)
| 11,072 | 33.388199 | 125 | py |
pydeps | pydeps-master/pydeps/dummymodule.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import textwrap
import logging
from . import cli
log = logging.getLogger(__name__)
def is_module(directory):
"""A directory is a module if it contains an ``__init__.py`` file.
"""
return os.path.isdir(directory) and '__init__.py' in os.listdir(directory)
def is_pysource(fname):
"""A file name is a python source file iff it ends with '.py(w?)' and
doesn't start with a dot.
"""
return not fname.startswith('.') and fname.endswith(('.py', '.pyw'))
def fname2modname(fname, package_root):
subpath = os.path.splitext(fname)[0][len(package_root):]
modname = subpath.lstrip(os.path.sep).replace(os.path.sep, '.')
return modname
def python_sources_below(directory, package=True):
for root, dirs, files in os.walk(directory):
if package and '__init__.py' not in files:
continue
dotdirs = [d for d in dirs if d.startswith('.')]
for d in dotdirs:
dirs.remove(d)
if 'migrations' in dirs:
dirs.remove('migrations')
for fname in files:
if is_pysource(fname): # and fname not in args['exclude']:
if fname == '__init__.py':
yield os.path.abspath(root)
else:
yield os.path.abspath(os.path.join(root, fname))
class DummyModule(object):
"""We create a file that imports the module to be investigated.
"""
def __init__(self, target, **args):
self._legal_mnames = {}
self.target = target
self.fname = '_dummy_' + target.modpath.replace('.', '_') + '.py'
self.absname = os.path.join(target.workdir, self.fname)
if target.is_module:
cli.verbose(1, "target is a PACKAGE")
with open(self.fname, 'w') as fp:
for fname in python_sources_below(target.package_root):
modname = fname2modname(fname, target.syspath_dir)
self.print_import(fp, modname)
elif target.is_dir:
# FIXME?: not sure what the intended semantics was here, as it is
# this will almost certainly not do the right thing...
cli.verbose(1, "target is a DIRECTORY")
log.debug('curdir: %r', os.getcwd())
log.debug('fname: %r', self.fname)
log.debug('target.dirname: %r', target.dirname)
with open(self.fname, 'w') as fp:
dirname = os.path.abspath(os.path.join(target.calling_dir, target.calling_fname))
for fname in os.listdir(dirname):
fname = os.path.join(dirname, fname)
log.debug("fname: %r", fname)
if is_pysource(fname):
self.print_import(fp, fname2modname(fname, ''))
elif is_module(fname):
log.debug("fname is a module: %r", fname)
for fnamea in python_sources_below(fname):
modname = fname2modname(fnamea, target.syspath_dir)
self.print_import(fp, modname)
else:
assert target.is_pysource
cli.verbose(1, "target is a FILE")
# if working on a single file, we don't need to create a dummy
# module, this also avoids problems with file names that are
# not importable (e.g. `foo.bar.py)
# self.fname = target.calling_fname
self.fname = target.fname
self.absname = target.package_root
# with open(self.fname, 'w') as fp:
# self.print_import(fp, target.modpath)
log.debug("dummy-filename: %r (%s)[module=%s, dir=%s, file=%s]",
self.fname, self.absname, target.is_module, target.is_dir, target.is_pysource)
def text(self):
"""Return the content of the dummy module.
"""
log.debug("Getting text from %r", self.fname)
# log.debug("sys.path: %r", sys.path)
with open(self.fname) as fp:
return fp.read()
def legal_module_name(self, name):
"""Legal module names are dotted strings where each part
is a valid Python identifier.
(and not a keyword, and support unicode identifiers in
Python3, ..)
"""
if name in self._legal_mnames:
return self._legal_mnames[name]
for part in name.split('.'):
try:
exec("%s = 42" % part, {}, {})
except: # pragma: nocover # noqa
self._legal_mnames[name] = False
return False
self._legal_mnames[name] = True
return True
def print_header(self, fp): # pragma: nocover
# we're not executing the file in fp, so really not necessary to
# catch import errors
print(textwrap.dedent("""
import sys
import traceback
"""), file=fp)
def print_import(self, fp, module):
if not self.legal_module_name(module):
log.warning("SKIPPING ILLEGAL MODULE_NAME: %s", module)
return
mparts = module.rsplit('.', 1)
# we're not executing the file in fp, so really not necessary to
# catch import errors
if len(mparts) == 1:
print(textwrap.dedent("""\
import {module}
""").format(module=module), file=fp)
else:
print(textwrap.dedent("""\
from {prefix} import {mname}
""").format(prefix=mparts[0], mname=mparts[1]), file=fp)
| 5,638 | 36.344371 | 97 | py |
pydeps | pydeps-master/pydeps/cli.py | # -*- coding: utf-8 -*-
"""
command line interface (cli) code.
"""
# pylint: disable=line-too-long
from __future__ import print_function
import argparse
from pydeps.configs import Config
from .arguments import Arguments
# import json
# from .pycompat import configparser
import logging
import os
import sys
import subprocess
import textwrap
from . import __version__
def error(*args, **kwargs): # pragma: nocover
"""Print an error message and exit.
"""
kwargs['file'] = sys.stderr
print("\n\tERROR:", *args, **kwargs)
if args and args[0].startswith("[Errno 2] No such file or directory"):
print("\t(Did you forget to include an __init__.py?)")
sys.exit(1)
#: the (will become) verbose function
verbose = None
def _not_verbose(*args, **kwargs): # pragma: nocover
pass
verbose = _not_verbose
def _mkverbose(level):
def _verbose(n, *args, **kwargs):
if not isinstance(n, int): # we're only interested in small integers
# this allows the simpler usage cli.verbose(msg)
args = (n,) + args
n = 1
if 0 < level <= n:
print(*args, **kwargs)
return _verbose
def _find_current_package():
cwd = os.getcwd()
while 'setup.py' not in os.listdir(cwd) and cwd != os.path.dirname(cwd):
cwd = os.path.dirname(cwd)
if 'setup.py' not in os.listdir(cwd):
raise Exception("--find-package didn't find setup.py in current, or any parent, directory")
os.chdir(cwd)
package_name = subprocess.check_output("python setup.py --name", shell=True).decode('u8').strip()
return package_name
def base_argparser(argv=()):
"""Initial parser that can set values for the rest of the parsing process.
"""
global verbose
verbose = _not_verbose
_p = argparse.ArgumentParser(add_help=False)
_p.add_argument('--debug', action='store_true', dest='debug', help="turn on all the show and verbose options (mainly for debugging pydeps itself)")
_p.add_argument('--config', help="specify config file", metavar="FILE")
_p.add_argument('--no-config', help="disable processing of config files", action='store_true')
_p.add_argument('--version', action='store_true', help='print pydeps version')
_p.add_argument('-L', '--log', help=textwrap.dedent('''
set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET.
'''))
_p.add_argument('--find-package', action='store_true', help="tries to automatically find the name of the current package.")
_args, argv = _p.parse_known_args(argv)
if _args.log:
loglevels = "CRITICAL DEBUG ERROR FATAL INFO WARN"
if _args.log not in loglevels: # pragma: nocover
error('legal values for the -L parameter are:', loglevels)
loglevel = getattr(logging, _args.log)
else:
loglevel = None
logging.basicConfig(
level=loglevel,
format='%(filename)s:%(lineno)d: %(levelname)s: %(message)s'
)
if _args.version: # pragma: nocover
print("pydeps v" + __version__)
sys.exit(0)
return _p, _args, argv # return parsed and remaining args
def parse_args(argv=()):
"""Parse command line arguments, and return a dict.
"""
_p, _args, argv = base_argparser(argv)
find_package = _args.find_package
config_files = []
if not _args.no_config: # process config files
# extra config file specified with --config <fname> has highest precedence
if _args.config:
config_files.append(_args.config)
# .pydeps file specified in current directory is next
local_pydeps = os.path.join(os.getcwd(), '.pydeps')
if os.path.exists(local_pydeps):
config_files.append(local_pydeps)
# pydeps.yml file specified in current directory is next
local_pydeps = os.path.join(os.getcwd(), 'pydeps.yml')
if os.path.exists(local_pydeps):
config_files.append(local_pydeps)
# is there a pyproject.toml file?
pyproj = os.path.join(os.getcwd(), 'pyproject.toml')
if os.path.exists(pyproj):
config_files.append(pyproj)
# is there a setup.cfg file?
pyproj = os.path.join(os.getcwd(), 'setup.cfg')
if os.path.exists(pyproj):
config_files.append(pyproj)
# finally the .pydeps file in the the user's homedir
home = os.environ['USERPROFILE' if sys.platform == 'win32' else 'HOME']
home_pydeps = os.path.join(home, '.pydeps')
if os.path.exists(home_pydeps):
config_files.append(home_pydeps)
args = Arguments(config_files, debug=True, parents=[_p])
if not find_package:
args.add('fname', kind="FNAME:input", help='filename')
else:
args.add('--fname', kind="FNAME:input", help='filename')
args.add('-v', '--verbose', default=0, dest='verbose', action='count', help="be more verbose (-vv, -vvv for more verbosity)")
args.add('-o', default=None, kind="FNAME:output", dest='output', metavar="file", help="write output to 'file'")
args.add('-T', default='svg', dest='format', help="output format (svg|png)")
args.add('--display', kind="FNAME:exe", default=None, help="program to use to display the graph (png or svg file depending on the T parameter)", metavar="PROGRAM")
args.add('--noshow', '--no-show', action='store_true', default=False, dest='no_show', help="don't call external program to display graph")
args.add('--show-deps', action='store_true', help="show output of dependency analysis")
args.add('--show-raw-deps', action='store_true', help="show output of dependency analysis before removing skips")
args.add('--deps-output', dest='deps_out', default=None, kind="FNAME:output", help="write output of dependency analysis to 'file'")
args.add('--show-dot', action='store_true', help="show output of dot conversion")
args.add('--dot-output', dest='dot_out', default=None, kind="FNAME:output", help="write dot code to 'file'")
args.add('--nodot', '--no-dot', action='store_true', help="skip dot conversion")
args.add('--no-output', action='store_true', help="don't create .svg/.png file, implies --no-show (-t/-o will be ignored)")
args.add('--show-cycles', action='store_true', help="show only import cycles")
args.add('--debug-mf', default=0, type=int, metavar="INT", help="set the ModuleFinder.debug flag to this value")
args.add('--noise-level', default=200, type=int, metavar="INT", help="exclude sources or sinks with degree greater than noise-level")
args.add('--max-bacon', default=2, type=int, metavar="INT", help="exclude nodes that are more than n hops away (default=2, 0 -> infinite)")
args.add('--max-module-depth', default=0, type=int, metavar="INT", help="coalesce deep modules to at most n levels")
args.add('--pylib', action='store_true', help="include python std lib modules")
args.add('--pylib-all', action='store_true', help="include python all std lib modules (incl. C modules)")
args.add('--include-missing', action='store_true', help="include modules that are not installed (or can't be found on sys.path)")
args.add('-x', '--exclude', default=[], nargs="+", metavar="PATTERN", help="input files to skip (e.g. `foo.*`), multiple file names can be provided")
args.add('-xx', '--exclude-exact', default=[], nargs="+", metavar="MODULE", help="same as --exclude, except requires the full match. `-xx foo.bar` will exclude foo.bar, but not foo.bar.blob")
args.add('--only', default=[], nargs="+", metavar="MODULE_PATH", help="only include modules that start with MODULE_PATH")
args.add('--externals', action='store_true', help='create list of direct external dependencies')
args.add('--reverse', action='store_true', help="draw arrows to (instead of from) imported modules")
args.add('--rankdir', default='TB', type=str, choices=['TB', 'BT', 'LR', 'RL'], help="set the direction of the graph, legal values are TB (default, imported modules above importing modules), "
"BT (opposite direction of TB), BT (opposite direction of TB), LR (left-to-right) and RL (right-to-left)")
args.add('--cluster', action='store_true', help="draw external dependencies as separate clusters")
args.add('--min-cluster-size', default=0, type=int, metavar="INT", help="the minimum number of nodes a dependency must have before being clustered (default=0)")
args.add('--max-cluster-size', default=0, type=int, metavar="INT", help="the maximum number of nodes a dependency can have before the cluster is collapsed to a single node (default=0)")
args.add('--keep-target-cluster', action='store_true', help="draw target module as a cluster")
args.add('--collapse-target-cluster', action='store_true', help="collapse target module (--keep-target-cluster will be ignored)")
args.add('--rmprefix', default=[], nargs="+", metavar="PREFIX", help="remove PREFIX from the displayed name of the nodes")
args.add('--start-color', default=0, type=int, metavar="INT", help="starting value for hue from 0 (red/default) to 360.")
# args.write_default_config()
_args = args.parse_args(argv)
if _args.externals:
return Config(externals=True, fname=_args.fname, max_bacon=10,
include_missing=True, no_show=True)
if _args.no_output:
_args.no_show = True
_args.show = not _args.no_show
if _args.no_dot and _args.show_cycles:
error("Can't use --no=dot and --show-cycles together") # pragma: nocover
if _args.no_dot:
_args.show_dot = False
if _args.max_bacon == 0:
_args.max_bacon = sys.maxsize
if (
_args.keep_target_cluster
or _args.min_cluster_size > 0
or _args.max_cluster_size > 0
or _args.collapse_target_cluster
):
_args.cluster = True
if find_package:
_args.fname = _find_current_package()
_args.format = getattr(_args, 'format', 'svg')
verbose = _mkverbose(max(_args.verbose, int(_args.debug)))
verbose(2, _args, '\n')
if _args.debug: # pragma: nocover
_args.verbose = 1
_args.show = True
_args.show_deps = True
_args.show_dot = True
return vars(_args)
| 10,311 | 46.302752 | 196 | py |
pydeps | pydeps-master/pydeps/pycompat.py | # -*- coding: utf-8 -*-
"""
Compatibility imports between py2/py3
"""
# pragma: nocover
try:
from itertools import zip_longest # noqa
except ImportError:
from itertools import izip_longest as zip_longest # noqa
try:
import configparser # noqa
except ImportError:
import ConfigParser as configparser # noqa
| 391 | 25.133333 | 62 | py |
pydeps | pydeps-master/pydeps/package_names.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import site
def _find_top_level_file(site_pkg_dir, pth):
if pth.endswith('.dist-info') or pth.endswith('.egg-info'):
top_level_fname = os.path.join(site_pkg_dir, pth, 'top_level.txt')
elif pth.endswith('.egg'):
top_level_fname = os.path.join(site_pkg_dir, pth, 'EGG-INFO', 'top_level.txt')
else:
top_level_fname = None
return top_level_fname
def _extract_pkg_name(pth):
name = pth.rsplit('.', 1)[0]
name_no_version = name.split('-', 1)[0]
return name_no_version.replace('_', '-')
def find_package_names():
# initialize with well-known packages that don't seem to have a top_level.txt
res = {
'yaml': 'PyYAML',
'Crypto': 'pycrypto',
}
site_package_dirs = [site.getusersitepackages()]
site_package_dirs += site.getsitepackages()
for site_packages in reversed(site_package_dirs):
if not os.path.isdir(site_packages):
continue
for pth in os.listdir(site_packages):
top_level_fname = _find_top_level_file(site_packages, pth)
if top_level_fname is None:
continue
pkgname = _extract_pkg_name(pth)
if not os.path.exists(top_level_fname):
if pkgname not in res.values():
print("ERR:", pth, 'has not top_level.txt')
continue
with open(top_level_fname) as fp:
modnames = fp.read().split()
for modname in modnames:
modname = modname.replace('/', '.')
if modname.startswith(r'win32\lib'):
modname = modname.rsplit('\\')[1]
res[modname] = pkgname
return res
if __name__ == "__main__":
import json
print(json.dumps(find_package_names(), indent=4, sort_keys=True))
| 1,892 | 29.047619 | 86 | py |
pydeps | pydeps-master/pydeps/__init__.py | # -*- coding: utf-8 -*-
"""
Python module dependency visualization. This package installs the ``pydeps``
command, and normal usage will be to use it from the command line.
"""
__version__ = "1.12.12"
| 200 | 27.714286 | 76 | py |
pydeps | pydeps-master/pydeps/mf27.py |
# from .mf.mf_next import * # for debugging next version
import modulefinder
from modulefinder import (
ModuleFinder as NativeModuleFinder
)
from importlib.util import MAGIC_NUMBER
import marshal
import dis
from . import mfimp
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
# from stdlib's modulefinder
_PY_SOURCE = mfimp.PY_SOURCE
_PY_COMPILED = mfimp.PY_COMPILED
_PKG_DIRECTORY = mfimp.PKG_DIRECTORY
# monkey-patch broken modulefinder._find_module
# (https://github.com/python/cpython/issues/84530)
# in Python 3.8-3.10
if hasattr(modulefinder, '_find_module'):
modulefinder._find_module = mfimp.find_module
class ModuleFinder(NativeModuleFinder):
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook: name(%s) caller(%s) fromlist(%s) level(%s)" % (name, caller, fromlist, level))
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
if q.shortname in ('__future__', 'future'): # [pydeps] the future package causes recursion overflow
return None
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def load_module(self, fqname, fp, pathname, file_info):
# fqname = dotted module name we're loading
suffix, mode, kind = file_info
kstr = {
_PKG_DIRECTORY: 'PKG_DIRECTORY',
_PY_SOURCE: 'PY_SOURCE',
_PY_COMPILED: 'PY_COMPILED',
}.get(kind, 'unknown-kind')
self.msgin(2, "load_module(%s) fqname=%s, fp=%s, pathname=%s" % (kstr, fqname, fp and "fp", pathname))
if kind == _PKG_DIRECTORY:
module = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", module)
return module
if kind == _PY_SOURCE:
txt = fp.read()
txt += b'\n' if isinstance(txt, bytes) else '\n'
co = compile(
txt,
pathname,
'exec', # compile code block
dont_inherit=True # [pydeps] don't inherit future statements from current environment
)
elif kind == _PY_COMPILED:
# a .pyc file is a binary file containing only three things:
# 1. a four-byte magic number
# 2. a four byte modification timestamp, and
# 3. a Marshalled code object
# from: https://nedbatchelder.com/blog/200804/the_structure_of_pyc_files.html
if fp.read(4) != MAGIC_NUMBER:
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError("Bad magic number in %s" % pathname)
fp.read(4) # skip modification timestamp
co = marshal.load(fp) # load marshalled code object.
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def scan_code(self, co, m):
code = co.co_code # noqa
# if sys.version_info >= (3, 4):
# scanner = self.scan_opcodes
# elif sys.version_info >= (2, 5):
# scanner = self.scan_opcodes_25
# else:
# scanner = self.scan_opcodes_24
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import":
level = 0
else:
level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
# m is still the caller here... [bp]
self._safe_import_hook(parent.__name__, m, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
| 5,842 | 38.47973 | 112 | py |
pydeps | pydeps-master/pydeps/pydeps.py | # -*- coding: utf-8 -*-
"""cli entrypoints.
"""
from __future__ import print_function
import json
import os
import sys
from pydeps.configs import Config
from . import py2depgraph, cli, dot, target
from .depgraph2dot import dep2dot, cycles2dot
import logging
from . import colors
log = logging.getLogger(__name__)
def _pydeps(trgt, **kw):
# Pass args as a **kw dict since we need to pass it down to functions
# called, but extract locally relevant parameters first to make the
# code prettier (and more fault tolerant).
# print("KW:", kw, '\n', os.getcwd())
# print('abspath:', os.path.abspath(kw.get('deps_out')))
# print('target', trgt.workdir)
# print('target', trgt)
colors.START_COLOR = kw.get('start_color')
# show_cycles = kw.get('show_cycles')
nodot = kw.get('no_dot')
no_output = kw.get('no_output')
output = kw.get('output')
fmt = kw['format']
show_svg = kw.get('show')
deps_out = kw.get('deps_out')
dot_out = kw.get('dot_out')
# reverse = kw.get('reverse')
if os.getcwd() != trgt.workdir:
# the tests are calling _pydeps directoy
os.chdir(trgt.workdir)
dep_graph = py2depgraph.py2dep(trgt, **kw)
if kw.get('show_deps'):
cli.verbose("DEPS:")
if deps_out:
# make sure output files are written to sensible directories
directory, _fname = os.path.split(deps_out)
if not directory:
deps_out = os.path.join(trgt.calling_dir, deps_out)
with open(deps_out, 'w') as fp:
fp.write(dep_graph.__json__())
else:
print(dep_graph.__json__())
dotsrc = depgraph_to_dotsrc(trgt, dep_graph, **kw)
if not nodot:
if kw.get('show_dot'):
cli.verbose("DOTSRC:")
if dot_out:
# make sure output files are written to sensible directories
directory, _fname = os.path.split(dot_out)
if not directory:
dot_out = os.path.join(trgt.calling_dir, dot_out)
with open(dot_out, 'w') as fp:
fp.write(dotsrc)
else:
print(dotsrc)
if not no_output:
try:
svg = dot.call_graphviz_dot(dotsrc, fmt)
except OSError as cause:
raise RuntimeError("While rendering {!r}: {}".format(output, cause))
if fmt == 'svg':
svg = svg.replace(b'</title>', b'</title><style>.edge>path:hover{stroke-width:8}</style>')
try:
with open(output, 'wb') as fp:
cli.verbose("Writing output to:", output)
fp.write(svg)
except OSError as cause:
raise RuntimeError("While writing {!r}: {}".format(output, cause))
if show_svg:
try:
dot.display_svg(kw, output)
except OSError as cause:
helpful = ""
if cause.errno == 2:
helpful = " (can be caused by not finding the program to open this file)"
raise RuntimeError("While opening {!r}: {}{}".format(output, cause, helpful))
def depgraph_to_dotsrc(target, dep_graph, **kw):
"""Convert the dependency graph (DepGraph class) to dot source code.
"""
if kw.get('show_cycles'):
dotsrc = cycles2dot(target, dep_graph, **kw)
elif not kw.get('no_dot'):
dotsrc = dep2dot(target, dep_graph, **kw)
else:
dotsrc = None
return dotsrc
def externals(trgt, **kwargs):
"""Return a list of direct external dependencies of ``pkgname``.
Called for the ``pydeps --externals`` command.
"""
kw = dict(
T='svg', config=None, debug=False, display=None, exclude=[], exclude_exact=[],
externals=True, format='svg', max_bacon=2**65, no_config=True, nodot=False,
noise_level=2**65, no_show=True, output=None, pylib=True, pylib_all=True,
show=False, show_cycles=False, show_deps=False, show_dot=False,
show_raw_deps=False, verbose=0, include_missing=True, start_color=0
)
kw.update(kwargs)
depgraph = py2depgraph.py2dep(trgt, **kw)
pkgname = trgt.fname
log.info("DEPGRAPH: %s", depgraph)
pkgname = os.path.splitext(pkgname)[0]
res = {}
ext = set()
for k, src in list(depgraph.sources.items()):
if k.startswith('_'):
continue
if not k.startswith(pkgname):
continue
if src.imports:
imps = [imp for imp in src.imports if not imp.startswith(pkgname)]
if imps:
for imp in imps:
ext.add(imp.split('.')[0])
res[k] = imps
# return res # debug
return list(sorted(ext))
def pydeps(**args):
"""Entry point for the ``pydeps`` command.
This function should do all the initial parameter and environment
munging before calling ``_pydeps`` (so that function has a clean
execution path).
"""
sys.setrecursionlimit(10000)
_args = dict(iter(Config(**args))) if args else cli.parse_args(sys.argv[1:])
_args['curdir'] = os.getcwd()
inp = target.Target(_args['fname'])
log.debug("Target: %r", inp)
if _args.get('output'):
_args['output'] = os.path.abspath(_args['output'])
else:
_args['output'] = os.path.join(
inp.calling_dir,
inp.modpath.replace('.', '_') + '.' + _args.get('format', 'svg')
)
with inp.chdir_work():
# log.debug("Current directory: %s", os.getcwd())
_args['fname'] = inp.fname
_args['isdir'] = inp.is_dir
if _args.get('externals'):
del _args['fname']
exts = externals(inp, **_args)
print(json.dumps(exts, indent=4))
# return exts # so the tests can assert
else:
# this is the call you're looking for :-)
try:
return _pydeps(inp, **_args)
except (OSError, RuntimeError) as cause:
if log.isEnabledFor(logging.DEBUG):
# we only want to log the exception if we're in debug mode
log.exception("While running pydeps:")
cli.error(str(cause))
def call_pydeps(file_or_dir, **kwargs):
"""Programatic entry point for pydeps.
See :class:`pydeps.configs.Config` class for the available options.
"""
sys.setrecursionlimit(10000)
inp = target.Target(file_or_dir)
log.debug("Target: %r", inp)
config = Config(**kwargs)
if config.output:
config.output = os.path.abspath(config.output)
else:
config.output = os.path.join(
inp.calling_dir,
inp.modpath.replace('.', '_') + '.' + config.format
)
ctx = dict(iter(config))
with inp.chdir_work():
ctx['fname'] = inp.fname
ctx['isdir'] = inp.is_dir
if config.externals:
del ctx['fname']
return externals(inp, **ctx)
return _pydeps(inp, **ctx)
if __name__ == '__main__': # pragma: nocover
pydeps()
| 7,175 | 32.376744 | 106 | py |
pydeps | pydeps-master/pydeps/py2depgraph.py | # Copyright 2004,2009 Toby Dickenson
# Changes 2014 (c) Bjorn Pettersen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import json
import os
import sys
from collections import defaultdict
import enum
from .dummymodule import DummyModule
from .pystdlib import pystdlib
from . import depgraph
from . import mf27
import logging
log = logging.getLogger(__name__)
PYLIB_PATH = depgraph.PYLIB_PATH
class imp(enum.Enum):
C_BUILTIN = 6
C_EXTENSION = 3
IMP_HOOK = 9
PKG_DIRECTORY = 5
PY_CODERESOURCE = 8
PY_COMPILED = 2
PY_FROZEN = 7
PY_RESOURCE = 4
PY_SOURCE = 1
class Module(object):
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
@property
def shortname(self):
if self.__name__ == "__main__":
return self.__file__[:-3].replace('\\', '.')
return self.__name__
def __repr__(self):
return "Module(name=%s, file=%r, path=%r)" % (
self.__name__,
(self.__file__ or "").replace('\\', '/'),
self.__path__
)
class MyModuleFinder(mf27.ModuleFinder):
def __init__(self, syspath, *args, **kwargs):
self.args = kwargs
self.verbose = kwargs.get('verbose', 0)
# include all of python std lib (incl. C modules)
self.include_pylib_all = kwargs.pop('pylib_all', False)
# include python std lib modules.
# self.include_pylib = kwargs.pop('pylib', self.include_pylib_all)
self.include_pylib = kwargs.pop('pylib', self.include_pylib_all)
self._depgraph = defaultdict(dict)
self._types = {}
self._last_caller = None
# path=None, debug=0, excludes=[], replace_paths=[]
debug = 5 if self.verbose >= 4 else 0
mf27.ModuleFinder.__init__(self,
path=syspath,
debug=debug,
# debug=3,
excludes=kwargs.get('excludes', []))
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def import_hook(self, name, caller=None, fromlist=None, level=-1):
old_last_caller = self._last_caller
try:
self._last_caller = caller
# print " last_CALLER:", caller, "OLD-lastcaller:", old_last_caller
return mf27.ModuleFinder.import_hook(self, name, caller, fromlist, level)
finally:
self._last_caller = old_last_caller
def _add_import(self, module):
if module is not None:
if self._last_caller:
# self._depgraph[self._last_caller.__name__][module.__name__] = module.__file__
if hasattr(module, '__file__') or self.include_pylib_all:
pylib_p = []
if not module.__file__:
pass
else:
rpath = os.path.split(module.__file__)[0].lower()
if 'site-packages' not in rpath:
pylib_p = [rpath.startswith(pp) for pp in PYLIB_PATH]
if self.include_pylib or not any(pylib_p):
# if self._last_caller.__name__ != module.__name__:
# self._depgraph[self._last_caller.__name__][module.__name__] = module.__file__
self._depgraph[self._last_caller.__name__][module.__name__] = module.__file__
def import_module(self, partnam, fqname, parent):
module = mf27.ModuleFinder.import_module(self, partnam, fqname, parent)
self._add_import(module)
return module
def load_module(self, fqname, fp, pathname, suffix_mode_kind):
(suffix, mode, kind) = suffix_mode_kind
try:
module = mf27.ModuleFinder.load_module(
self, fqname, fp, pathname, (suffix, mode, kind)
)
except SyntaxError:
# this happened first when pyinvoke tried to load yaml3/2 based on
# an `if six.PY3`
# print "SYNTAX_ERROR"
module = None
except AttributeError as e:
# See issues #139 and #140...
print("ERROR trying to load {} from {} (py38+ _find_module reimplementation of imp.find_module called .is_package on None...)\n{}".format(
fqname, pathname,
e
))
module = None
if module is not None:
self._types[module.__name__] = kind
return module
def ensure_fromlist(self, module, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", module, fromlist, recursive)
for sub in fromlist:
# print " for sub:", sub, "in fromlisst:", fromlist, "hasattr(module, sub):", hasattr(module, sub)
if sub == "*":
# print "STAR"
if not recursive:
submodules = self.find_all_submodules(module)
if submodules:
self.ensure_fromlist(module, submodules, 1)
elif not hasattr(module, sub):
# print "ELIF......"
subname = "%s.%s" % (module.__name__, sub)
submod = self.import_module(sub, subname, module)
if not submod:
raise ImportError("No module named " + subname)
else:
self._add_import(getattr(module, sub))
# print " SUB:", sub, "lastcaller:", self._last_caller
class RawDependencies(object):
def __init__(self, fname, **kw):
path = sys.path[:]
exclude = []
mf = MyModuleFinder(path, exclude, **kw)
mf.run_script(fname)
self.depgraph = mf._depgraph
self.types = mf._types
def py2dep(target, **kw) -> depgraph.DepGraph:
""""Calculate dependencies for ``pattern`` and return a DepGraph.
"""
log.info("py2dep(%r)", target)
dummy = DummyModule(target, **kw)
kw['dummyname'] = dummy.fname
syspath = sys.path[:]
syspath.insert(0, target.syspath_dir)
# remove exclude so we don't pass it twice to modulefinder
exclude = ['migrations'] + kw.pop('exclude', [])
log.debug("Exclude: %r", exclude)
log.debug("KW: %r", kw)
if 'fname' in kw:
del kw['fname']
mf = MyModuleFinder(
syspath, # module search path for this module finder
excludes=exclude, # folders to exclude
**kw
)
mf.debug = max(mf.debug, kw.get('debug_mf', 0))
if log.isEnabledFor(logging.DEBUG):
log.debug("CURDIR: %s", os.getcwd())
log.debug("FNAME: %r, CONTENT:\n%s\n", dummy.fname, dummy.text())
mf.run_script(dummy.fname)
log.info("mf._depgraph:\n%s", json.dumps(dict(mf._depgraph), indent=4))
log.info("mf.badmodules:\n%s", json.dumps(mf.badmodules, indent=4))
if kw.get('include_missing'):
for k, vdict in list(mf.badmodules.items()):
if k not in mf._depgraph:
mf._depgraph[k] = {}
for v in vdict:
if not target.is_pysource and v not in mf._depgraph['__main__']:
mf._depgraph['__main__'][v] = None
if v in mf._depgraph:
mf._depgraph[v][k] = None
else:
mf._depgraph[v] = {k: None}
log.info("mf._depgraph:\n%s", json.dumps(dict(mf._depgraph), indent=4))
kw['exclude'] = exclude
if kw.get('pylib'):
mf_depgraph = mf._depgraph
for k, v in list(mf._depgraph.items()):
log.debug('depgraph item: %r %r', k, v)
# mf_modules = {k: os.syspath.abspath(v.__file__)
# for k, v in mf.modules.items()}
else:
pylib = pystdlib()
mf_depgraph = {}
for k, v in list(mf._depgraph.items()):
log.debug('depgraph item: %r %r', k, v)
if k in pylib:
continue
vals = {vk: vv for vk, vv in v.items() if vk not in pylib}
mf_depgraph[k] = vals
# mf_modules = {k: os.syspath.abspath(v.__file__)
# for k, v in mf.modules.items()
# if k not in pylib}
try:
import yaml
log.info("mf_depgraph:\n%s",
yaml.dump(dict(mf_depgraph), default_flow_style=False))
# log.error("mf._types:\n%s", yaml.dump(mf._types, default_flow_style=False))
# log.debug("mf_modules:\n%s", yaml.dump(mf_modules, default_flow_style=False))
except ImportError:
log.info("mf_depgraph:\n%s", json.dumps(dict(mf_depgraph), indent=4))
return depgraph.DepGraph(mf_depgraph, mf._types, target, **kw)
def py2depgraph():
_fname = sys.argv[1]
_graph = RawDependencies(_fname)
sys.stdout.write(
json.dumps(_graph.__dict__, indent=4)
)
if __name__ == '__main__':
py2depgraph()
| 10,490 | 35.681818 | 150 | py |
pydeps | pydeps-master/pydeps/depgraph.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
import fnmatch
from .pycompat import zip_longest
import json
import os
import pprint
import re
import enum
from . import colors, cli
import sys
import logging
log = logging.getLogger(__name__)
# we're normally not interested in imports of std python packages.
PYLIB_PATH = {
# in virtualenvs that see the system libs, these will be different.
os.path.split(os.path.split(pprint.__file__)[0])[0].lower(),
os.path.split(os.__file__)[0].lower()
}
class imp(enum.Enum):
C_BUILTIN = 6
C_EXTENSION = 3
IMP_HOOK = 9
PKG_DIRECTORY = 5
PY_CODERESOURCE = 8
PY_COMPILED = 2
PY_FROZEN = 7
PY_RESOURCE = 4
PY_SOURCE = 1
UNKNOWN = 0
class Source(object):
"""A node (contained) in the dependency graph.
It contains info about which modules are imported by this source,
and which modules import this source.
"""
def __init__(self, name, path=None, imports=(), exclude=False, args=None):
self.args = args or {}
self.name = name
# self.kind = kind
self.path = path # needed here..?
self.imports = set(imports) # modules we import
self.imported_by = set() # modules that import us
self.bacon = sys.maxsize # bacon distance
self.excluded = exclude
@property
def name_parts(self):
return self.name.split('.')
@property
def module_depth(self):
return self.name.count('.')
@property
def path_parts(self):
p = self.path or ""
return p.replace('\\', '/').lower().split('/')
@property
def in_degree(self):
"""Number of incoming arrows.
"""
return len(self.imports)
@property
def out_degree(self):
"""Number of outgoing arrows.
"""
return len(self.imported_by)
@property
def degree(self):
return self.in_degree + self.out_degree
def is_noise(self):
"""Is this module just noise? (too common either at top or bottom of
the graph).
"""
noise = self.args['noise_level']
if not (self.in_degree and self.out_degree):
return self.degree > noise
return False
def __json__(self):
res = dict(
name=self.name,
path=self.path,
# kind=str(self.kind),
bacon=self.bacon,
)
if self.excluded:
res['excluded'] = 'EXCLUDED'
if self.imports:
res['imports'] = list(sorted(self.imports))
if self.imported_by:
res['imported_by'] = list(sorted(self.imported_by))
return res
def __str__(self):
return "%s (%s)" % (self.name, self.path)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
def __le__(self, other):
return self.name <= other.name
def __gt__(self, other):
return self.name > other.name
def __ge__(self, other):
return self.name >= other.name
def __repr__(self):
return json.dumps(self.__json__(), indent=4)
def __iadd__(self, other):
"""Merge other into self.
"""
if self.name == other.name and self.imports == other.imports and self.bacon == other.bacon:
return self
log.debug("iadd lhs: %r", self)
log.debug("iadd rhs: %r", other)
assert self.name == other.name
self.path = self.path or other.path
self.imports |= other.imports
self.imported_by |= other.imported_by
self.bacon = min(self.bacon, other.bacon)
self.excluded = self.excluded or other.excluded
log.debug("iadd result: %r", self)
return self
def get_label(self, splitlength=0, rmprefix=None):
name = self.name
if rmprefix:
for prefix in rmprefix:
if self.name.startswith(prefix):
name = self.name[len(prefix):]
break
if splitlength and len(name) > splitlength and '.' in name:
return '\\.\\n'.join(name.split('.')) # pragma: nocover
return name
@property
def label(self):
"""Convert a module name to a formatted node label. This is a default
policy - please override.
"""
return self.get_label(splitlength=14)
class DepGraph(object):
"""The dependency graph.
It is the output of :func:`pydeps.py2depgraph.py2dep`
"""
skip_modules = """
os sys qt time __future__ types re string bdb pdb __main__
south
""".split()
def __init__(self, depgraf, types, target, **args):
# depgraph is py2depgraph.MyModulefinder._depgraph
log.debug("DepGraph: depgraf=%r", depgraf)
self.curhue = 150 # start with a green-ish color
self.colors = {}
self.cycles = []
self.cyclenodes = set()
self.cyclerelations = set()
self.max_module_depth = args.get('max_module_depth', 0)
self.target = target
self.args = args
#: dict[module_name] -> Source object
self.sources = {}
self.skiplist = [re.compile(fnmatch.translate(arg)) for arg in args['exclude']]
self.skiplist += [re.compile('^%s$' % fnmatch.translate(arg)) for arg in args['exclude_exact']]
# depgraf = {name: imports for (name, imports) in depgraf.items()}
for name, imports in depgraf.items():
log.debug("depgraph name=%r imports=%r", name, imports)
src = Source(
name=self.source_name(name),
imports=[self.source_name(n) for n in imports.keys()], # values handled below
args=args,
exclude=self._exclude(name),
)
log.debug("depgraph src=%r", src)
self.add_source(src)
for iname, path in imports.items():
src = Source(
name=self.source_name(iname, path),
path=path,
args=args,
exclude=self._exclude(iname)
)
self.add_source(src)
self.module_count = len(self.sources)
cli.verbose(1, "there are", self.module_count, "total modules")
self.connect_generations()
if self.args['show_cycles']:
self.find_import_cycles()
self.calculate_bacon()
if self.args['show_raw_deps']:
print(self)
self.exclude_noise()
self.exclude_bacon(self.args['max_bacon'])
self.only_filter(self.args.get('only'))
excluded = [v for v in list(self.sources.values()) if v.excluded]
# print "EXCLUDED:", excluded
self.skip_count = len(excluded)
cli.verbose(1, "skipping", self.skip_count, "modules")
for module in excluded:
# print 'exclude:', module.name
cli.verbose(2, " ", module.name)
self.remove_excluded()
if not self.args['show_deps']:
cli.verbose(3, self)
def source_name(self, name, path=None):
"""Returns the module name, possibly limited by --max-module-depth.
"""
res = name
if name == "__main__" and self.target.is_pysource:
# use the target file name directly if we're working on a
# single file
return self.target.fname
if name == "__main__" and path:
# use the path to the main module if we're working on a module.
res = path.replace('\\', '/').replace('/', '.')
if self.args.get('verbose', 0) >= 2: # pragma: nocover
print("changing __main__ =>", res)
if self.max_module_depth > 0:
res = '.'.join(res.split('.')[:self.max_module_depth])
return res
def __json__(self):
return json.dumps(self.sources, indent=4, sort_keys=True,
default=lambda obj: obj.__json__() if hasattr(obj, '__json__') else obj)
def levelcounts(self):
pass
def get_colors(self, src, colorspace=None):
if colorspace is None:
if src.basename not in self.colors:
h = self.curhue
# self.curhue += 7 # relative prime with 360
self.curhue += 37 # relative prime with 360
self.curhue %= 360
# print "NAME:", src.name, "BASENAME:", src.basename
bg = colors.name2rgb(h)
black = (0, 0, 0)
white = (255, 255, 255)
fg = colors.foreground(bg, black, white)
self.colors[src.basename] = bg, fg
return self.colors[src.basename]
else:
return colorspace.color(src)
def _is_pylib(self, path):
log.info('path %r in PYLIB_PATH %r => %s', path, PYLIB_PATH, path in PYLIB_PATH)
return path in PYLIB_PATH
def proximity_metric(self, a, b):
"""Return the weight of the dependency from a to b. Higher weights
usually have shorter straighter edges. Return 1 if it has normal
weight. A value of 4 is usually good for ensuring that a related
pair of modules are drawn next to each other.
Returns an int between 1 (unknown, default), and 4 (very related).
"""
res = 0
for ap, bp, n in zip(a.name_parts, b.name_parts, list(range(4))):
res += ap == bp
if n >= 3:
break
if res == 0: res = 1 # noqa
return 4 if res > 4 else res
def dissimilarity_metric(self, a, b):
"""Return non-zero if references to this module are strange, and
should be drawn extra-long. The value defines the length, in
rank. This is also good for putting some vertical space between
seperate subsystems.
Returns an int between 1 (default) and 4 (highly unrelated).
"""
res = 4
for an, bn, n in zip_longest(a.name_parts, b.name_parts, list(range(4))):
res -= an == bn
if n >= 3:
break
return 4 if res > 4 else res
def _exclude(self, name):
return any(skip.match(name) for skip in self.skiplist)
def add_source(self, src):
if src.name in self.sources:
log.debug("ADD-SOURCE[+=]\n%r", src)
self.sources[src.name] += src # merge
else:
log.debug("ADD-SOURCE[=]\n%r", src)
self.sources[src.name] = src
def __getitem__(self, item):
return self.sources[item]
def __iter__(self):
visited = set(self.skip_modules) | set(self.args['exclude'])
def visit(src):
if src.name in visited:
return
visited.add(src.name)
for name in src.imports:
impmod = self.sources[name]
# FIXME: why do we want to exclude **/*/__init__.py? This line
# causes `collections` package in py3 to be excluded.
# if impmod.path and not impmod.path.endswith('__init__.py'):
if not src.name.startswith(impmod.name + "."):
yield impmod, src
visit(impmod)
for _src in self.sources.values():
for source in visit(_src):
cli.verbose(4, "Yielding", source[0], source[1])
yield source
def __repr__(self):
return json.dumps(self.sources, indent=4, sort_keys=True,
default=lambda obj: obj.__json__() if hasattr(obj, '__json__') else obj)
def find_import_cycles(self):
def traverse(node, path):
if node.name in self.cyclenodes:
return
if node.name in path:
# found cycle
cycle = path[path.index(node.name):] + [node.name]
self.cycles.append(cycle)
for nodename in cycle:
self.cyclenodes.add(nodename)
for i in range(len(cycle) - 1):
self.cyclerelations.add(
(cycle[i], cycle[i + 1])
)
return
for impmod in node.imports:
traverse(self.sources[impmod], path + [node.name])
for src in list(self.sources.values()):
traverse(src, [])
def connect_generations(self):
"""Traverse depth-first adding imported_by.
"""
for src in self.sources.values():
for _child in src.imports:
if _child in self.sources:
child = self.sources[_child]
child.imported_by.add(src.name)
def calculate_bacon(self):
count = defaultdict(int)
def bacon(src, n):
count[src.name] += 1
if src.bacon <= n:
return
src.bacon = min(src.bacon, n)
for imp in src.imports:
bacon(self.sources[imp], n + 1)
if '__main__' in self.sources:
bacon(self.sources['__main__'], 0)
elif self.args['dummyname'] in self.sources:
bacon(self.sources[self.args['dummyname']], 0)
def exclude_noise(self):
for src in list(self.sources.values()):
if src.excluded:
continue
if src.is_noise():
cli.verbose(2, "excluding", src, "because it is noisy:", src.degree)
src.excluded = True
self._add_skip(src.name)
def exclude_bacon(self, limit):
"""Exclude modules that are more than `limit` hops away from __main__.
"""
for src in list(self.sources.values()):
if src.bacon > limit:
src.excluded = True
self._add_skip(src.name)
def only_filter(self, paths):
"""Exclude nodes that have a prefix in paths.
"""
if not paths:
return
paths = set(paths)
def should_include(node):
for p in paths:
if node.name.startswith(p):
return True
return False
for src in list(self.sources.values()):
if not should_include(src):
src.excluded = True
# print "Excluding bacon:", src.name
self._add_skip(src.name)
def remove_excluded(self):
"""Remove all sources marked as excluded.
"""
sources = list(self.sources.values())
for src in sources:
if src.excluded:
del self.sources[src.name]
src.imports = [m for m in src.imports if not self._exclude(m)]
src.imported_by = [m for m in src.imported_by if not self._exclude(m)]
def _add_skip(self, name):
# print 'add skip:', name
self.skiplist.append(re.compile(fnmatch.translate(name)))
| 15,089 | 32.019694 | 103 | py |
pydeps | pydeps-master/pydeps/configs.py |
from io import StringIO
import json
import warnings
import logging
log = logging.getLogger(__name__)
# from devtools import debug
HAVE_TOML = False
try:
import tomllib as toml
HAVE_TOML = True
except ImportError:
try:
import tomlkit as toml
HAVE_TOML = True
except ImportError:
try:
import toml
HAVE_TOML = True
except ImportError:
pass
def is_string(v):
return isinstance(v, str)
def boolval(v):
if isinstance(v, bool):
return v
if isinstance(v, int):
return bool(v)
if is_string(v):
v = v.lower()
if v in {'j', 'y', 'ja', 'yes', '1', 'true'}:
return True
if v in {'n', 'nei', 'no', '0', 'false'}:
return False
raise ValueError("Don't know how to convert %r to bool" % v)
def listval(v):
if is_string(v):
return [x for x in v.split() if x.strip()]
if isinstance(v, (list, tuple)):
return v
raise ValueError("Don't know how to convert %r to list" % v)
def identity(v):
return v
typefns = {
'BOOL': boolval,
'INT': int,
'LIST': listval,
'STR': str,
}
def load_toml(filename):
if not HAVE_TOML:
return {}
try:
with open(filename) as fp:
res = toml.loads(fp.read())
return res['tool']['pydeps']
except Exception as e:
log.debug("Couldn't load toml file %s: %s", filename, e)
return {}
def load_json(filename):
try:
with open(filename) as fp:
res = json.loads(fp.read())
return res['pydeps']
except (json.JSONDecodeError, KeyError):
return {}
def load_yaml(filename):
try:
import yaml
from yaml import Loader
with open(filename) as fp:
res = yaml.load(fp.read(), Loader=Loader)
return res['pydeps']
except (yaml.YAMLError, KeyError, ImportError):
return {}
def load_ini(filename):
import configparser
conf = configparser.ConfigParser()
conf.read(filename)
try:
config = conf.items("pydeps")
return dict(config)
except (configparser.NoOptionError, configparser.NoSectionError):
warnings.warn(' '.join("""
Couldn't find a [pydeps] section in your config files
%r -- or it was empty
""".split()) % filename)
return {}
def load_config(filename):
if filename.endswith('.toml'):
return load_toml(filename)
if filename.endswith('.json'):
return load_json(filename)
if filename.endswith('.yaml'):
return load_yaml(filename)
raise ValueError("Unknown config file type: %s" % filename)
# this class is generated from pydeps.arguments.Arguments.write_default_config()
class Config(object):
#: turn on all the show and verbose options (mainly for debugging pydeps
#: itself)
debug = False
#: specify config file
config = None
#: disable processing of config files
no_config = False
#: print pydeps version
version = False
#: set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET.
log = None
#: tries to automatically find the name of the current package.
find_package = False
#: filename
fname = None
#: be more verbose (-vv, -vvv for more verbosity)
verbose = 0
#: write output to 'file'
output = None
#: output format (svg|png)
format = 'svg'
#: program to use to display the graph (png or svg file depending on the T
#: parameter)
display = None
#: don't call external program to display graph
no_show = False
#: show output of dependency analysis
show_deps = False
#: show output of dependency analysis before removing skips
show_raw_deps = False
#: write output of dependency analysis to 'file'
deps_out = None
#: show output of dot conversion
show_dot = False
#: write dot code to 'file'
dot_out = None
#: skip dot conversion
no_dot = False
#: don't create .svg/.png file, implies --no-show (-t/-o will be ignored)
no_output = False
#: show only import cycles
show_cycles = False
#: set the ModuleFinder.debug flag to this value
debug_mf = 0
#: exclude sources or sinks with degree greater than noise-level
noise_level = 200
#: exclude nodes that are more than n hops away (default=2, 0 -> infinite)
max_bacon = 2
#: coalesce deep modules to at most n levels
max_module_depth = 0
#: include python std lib modules
pylib = False
#: include python all std lib modules (incl. C modules)
pylib_all = False
#: include modules that are not installed (or can't be found on sys.path)
include_missing = False
#: input files to skip (e.g. `foo.*`), multiple file names can be provided
exclude = []
#: same as --exclude, except requires the full match. `-xx foo.bar` will
#: exclude foo.bar, but not foo.bar.blob
exclude_exact = []
#: only include modules that start with MODULE_PATH
only = []
#: create list of direct external dependencies
externals = False
#: draw arrows to (instead of from) imported modules
reverse = False
#: set the direction of the graph, legal values are TB (default, imported
#: modules above importing modules), BT (opposite direction of TB), BT
#: (opposite direction of TB), LR (left-to-right) and RL (right-to-left)
rankdir = 'TB'
#: draw external dependencies as separate clusters
cluster = False
#: the minimum number of nodes a dependency must have before being clustered
#: (default=0)
min_cluster_size = 0
#: the maximum number of nodes a dependency can have before the cluster is
#: collapsed to a single node (default=0)
max_cluster_size = 0
#: draw target module as a cluster
keep_target_cluster = False
#: collapse target module (--keep-target-cluster will be ignored)
collapse_target_cluster = False
#: remove PREFIX from the displayed name of the nodes
rmprefix = []
#: starting value for hue from 0 (red/default) to 360.
start_color = 0
def __init__(self, **kwargs):
for key in dir(self.__class__):
if not key.startswith('_'):
val = getattr(self.__class__, key)
if not callable(val):
setattr(self, key, val)
self.__dict__.update(kwargs)
def __eq__(self, other):
res = True
for (lkey, lval), (rkey, fval) in zip(iter(self), iter(other)):
if lkey != rkey:
print(f"key mismatch: {lkey} != {rkey}")
res = False
if lval != fval:
print(f"val mismatch {lkey}: {lval} != {fval}")
res = False
return res
def set_field(self, field, value):
if field == 'fname':
self.fname = identity(value)
if field == 'verbose':
self.verbose = int(value)
if field == 'output':
self.output = identity(value)
if field == 'format':
self.format = str(value)
if field == 'display':
self.display = identity(value)
if field == 'no_show':
self.no_show = boolval(value)
if field == 'show_deps':
self.show_deps = boolval(value)
if field == 'show_raw_deps':
self.show_raw_deps = boolval(value)
if field == 'deps_out':
self.deps_out = identity(value)
if field == 'show_dot':
self.show_dot = boolval(value)
if field == 'dot_out':
self.dot_out = identity(value)
if field == 'no_dot':
self.no_dot = boolval(value)
if field == 'no_output':
self.no_output = boolval(value)
if field == 'show_cycles':
self.show_cycles = boolval(value)
if field == 'debug_mf':
self.debug_mf = int(value)
if field == 'noise_level':
self.noise_level = int(value)
if field == 'max_bacon':
self.max_bacon = int(value)
if field == 'max_module_depth':
self.max_module_depth = int(value)
if field == 'pylib':
self.pylib = boolval(value)
if field == 'pylib_all':
self.pylib_all = boolval(value)
if field == 'include_missing':
self.include_missing = boolval(value)
if field == 'exclude':
self.exclude = listval(value)
if field == 'exclude_exact':
self.exclude_exact = listval(value)
if field == 'only':
self.only = listval(value)
if field == 'externals':
self.externals = boolval(value)
if field == 'reverse':
self.reverse = boolval(value)
if field == 'rankdir':
self.rankdir = str(value)
if field == 'cluster':
self.cluster = boolval(value)
if field == 'min_cluster_size':
self.min_cluster_size = int(value)
if field == 'max_cluster_size':
self.max_cluster_size = int(value)
if field == 'keep_target_cluster':
self.keep_target_cluster = boolval(value)
if field == 'collapse_target_cluster':
self.collapse_target_cluster = boolval(value)
if field == 'rmprefix':
self.rmprefix = listval(value)
if field == 'start_color':
self.start_color = int(value)
def __iter__(self):
return iter(self.__dict__.items())
def __getattr__(self, name):
return self.__dict__[name]
def update(self, data):
if set(data.keys()) - set(self.__dict__.keys()):
warnings.warn("Unknown config keys: %s" % (set(data.keys()) - set(self.__dict__.keys())))
for k, v in data.items():
if k in self.__dict__:
self.set_field(k, v)
def write_ini(self):
import configparser
cfg = configparser.ConfigParser()
cfg['pydeps'] = {k : str(v) for k, v in self}
with StringIO() as fp:
cfg.write(fp)
return fp.getvalue()
def write_toml(self):
import tomlkit
return tomlkit.dumps({
'tools.pydeps': {k : v for k, v in self if v is not None}
})
def write_json(self):
data = {'pydeps': self.__dict__}
return json.dumps(data, indent=4)
def write_yaml(self):
import yaml
data = {'pydeps': self.__dict__}
return yaml.dump(data, default_flow_style=False)
@classmethod
def load(cls, fnames):
"""Load config from file.
"""
conf = cls()
for fname in fnames:
ftype = filetype(fname)
if ftype == 'toml':
conf.update(load_toml(fname))
if ftype == 'json':
conf.update(load_json(fname))
if ftype == 'yaml':
conf.update(load_yaml(fname))
if ftype == 'ini':
conf.update(load_ini(fname))
return conf
def filetype(fname):
if fname.endswith('.toml'):
return 'toml'
elif fname.endswith('.json'):
return 'json'
elif fname.endswith('.yaml'):
return 'yaml'
elif fname.endswith('.yml'):
return 'yaml'
elif fname.endswith('.ini'):
return 'ini'
else:
return 'ini'
| 11,463 | 26.757869 | 101 | py |
pydeps | pydeps-master/pydeps/colors.py | # -*- coding: utf-8 -*-
"""Color calculations.
"""
import colorsys
# noinspection PyAugmentAssignment
# import hashlib
START_COLOR = 0 # Value can be changed from command-line argument
def frange(start, end, step):
"""Like range(), but with floats.
"""
val = start
while val < end:
yield val
val += step
def distinct_hues(count):
"""Return ``count`` hues, equidistantly spaced.
"""
for i in frange(0., 360., 360. / count):
hue = ((i + START_COLOR) % 360) / 360.
yield hue
class ColorSpace(object):
def __init__(self, nodes):
self.nodes = {}
for node in nodes:
parts = node.name.split('.')
self.add_to_tree(parts, self.nodes)
self.basecolors = distinct_hues(len(self.nodes))
self.colors = dict(zip(sorted(self.nodes.keys()), self.basecolors))
def add_to_tree(self, parts, tree):
if not parts:
return
first, rest = parts[0], parts[1:]
if first not in tree:
tree[first] = {}
self.add_to_tree(rest, tree[first])
def color(self, src):
nodename = src.name
parts = nodename.split('.')
hue = self.colors[parts[0]]
saturation = min(0.95, 0.4 + 0.1 * (src.out_degree - 1))
lightness = max(0.3, 0.5 - 0.02 * (src.in_degree - 1))
bg = rgb2eightbit(colorsys.hls_to_rgb(hue, lightness, saturation))
black = (0, 0, 0)
white = (255, 255, 255)
fg = foreground(bg, black, white)
return bg, fg
def __str__(self): # pragma: nocover
import pprint
return pprint.pformat(self.colors)
def rgb2eightbit(rgb):
"""Convert floats in [0..1] to integers in [0..256)
"""
return tuple(int(x * 256) for x in rgb)
def name2rgb(hue):
"""Originally used to calculate color based on module name.
"""
r, g, b = colorsys.hsv_to_rgb(hue / 360.0, .8, .7)
return tuple(int(x * 256) for x in [r, g, b])
def brightness(r, g, b):
"""From w3c (range 0..255).
"""
return (r * 299 + g * 587 + b * 114) / 1000
def brightnessdiff(a, b):
"""greater than 125 is good.
"""
return abs(brightness(*a) - brightness(*b))
def colordiff(rgb1, rgb2):
"""From w3c (greater than 500 is good).
(range [0..765])
"""
(r, g, b) = rgb1
(r2, g2, b2) = rgb2
return (
max(r, r2) - min(r, r2) +
max(g, g2) - min(g, g2) +
max(b, b2) - min(b, b2)
)
def foreground(background, *options):
"""Find the best foreground color from `options` based on `background`
color.
"""
def absdiff(a, b):
return brightnessdiff(a, b)
# return 3 * brightnessdiff(a, b) + colordiff(a, b)
diffs = [(absdiff(background, color), color) for color in options]
diffs.sort(reverse=True)
return diffs[0][1]
def rgb2css(rgb):
"""Convert rgb to hex.
"""
return '#%02x%02x%02x' % rgb
| 2,957 | 24.282051 | 75 | py |
pydeps | pydeps-master/pydeps/tools/pydeps2requirements.py | # -*- coding: utf-8 -*-
"""
Generate requirements.txt from pydeps output...
Usage::
pydeps <packagename> --max-bacon=0 \
--show-raw-deps --nodot \
--noshow | python pydeps2requirements.py
"""
import json
import os
import site
import sys
from collections import defaultdict
from pydeps.package_names import find_package_names
WIDTH = 80
# packages that are difficult to eliminate but shouldn't ever be part
# of a package's requirements.
skiplist = {
'_markerlib', 'pkg_resources'
}
def dep2req(name, package, imported_by):
"""Convert dependency to requirement.
"""
lst = [item for item in sorted(imported_by) if not item.startswith(name)]
res = '%-15s # from: ' % package
imps = ', '.join(lst)
if len(imps) < WIDTH - 24:
return res + imps
return res + imps[:WIDTH - 24 - 3] + '...'
_SITE_PACKAGE_DIRS = None
def site_packages():
global _SITE_PACKAGE_DIRS
if _SITE_PACKAGE_DIRS is None:
site_package_dirs = [site.getusersitepackages()]
site_package_dirs += site.getsitepackages()
_SITE_PACKAGE_DIRS = []
for pth in reversed(site_package_dirs):
if os.path.isdir(pth):
_SITE_PACKAGE_DIRS.append(pth)
return _SITE_PACKAGE_DIRS
def is_site_package(p):
for sp in site_packages():
if p.startswith(sp):
return True
return False
def pydeps2reqs(deps):
"""Convert a deps instance into requirements.
"""
reqs = defaultdict(set)
baseprefix = sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix
pkgnames = find_package_names()
for k, v in list(deps.items()):
# not a built-in
p = v['path']
if p and not p.startswith(baseprefix):
if is_site_package(p):
if not p.endswith('.pyd'):
if '/win32/' in p.replace('\\', '/'):
reqs['win32'] |= set(v['imported_by'])
else:
name = k.split('.', 1)[0]
if name not in skiplist:
reqs[name] |= set(v['imported_by'])
if '_dummy' in reqs:
del reqs['_dummy']
return '\n'.join(dep2req(name, pkgnames[name], reqs[name]) for name in sorted(reqs))
def main():
"""Cli entrypoint.
"""
if len(sys.argv) == 2:
fname = sys.argv[1]
with open(fname, 'rb') as fp:
data = json.load(fp)
else:
data = json.loads(sys.stdin.read())
print(pydeps2reqs(data))
if __name__ == "__main__":
main()
| 2,587 | 24.623762 | 88 | py |
pydeps | pydeps-master/tests/test_dep2dot.py | # -*- coding: utf-8 -*-
import os
import sys
import pydeps.cli
from pydeps import pydeps
from pydeps.target import Target
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_dep2dot():
files = """
foo:
- __init__.py
- a.py: |
from . import b
- b.py
"""
with create_files(files) as workdir:
assert simpledeps('foo', '-LDEBUG -vv') == {
'foo.b -> foo.a'
}
args = pydeps.cli.parse_args(["foo", "--noshow"])
pydeps.pydeps(**args)
assert os.path.exists(os.path.join(workdir, 'foo.svg'))
| 648 | 22.178571 | 63 | py |
pydeps | pydeps-master/tests/test_externals.py | # -*- coding: utf-8 -*-
import ast
from pydeps.pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_relative_imports(capsys):
files = """
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert simpledeps('foo') == {
'bar -> foo.a',
'bar.b -> foo.a'
}
pydeps(fname='foo', externals=True)
io = capsys.readouterr()
assert ast.literal_eval(io.out) == ['bar']
| 638 | 22.666667 | 50 | py |
pydeps | pydeps-master/tests/test_dot.py | # -*- coding: utf-8 -*-
from pydeps.dot import dot, cmd2args
def test_svg(tmpdir):
tmpdir.chdir()
ab = tmpdir.join('ab.svg')
dot(u"""
digraph G {
a -> b
}
""", o=ab.basename)
assert ab.exists()
def test_svg_str(tmpdir):
tmpdir.chdir()
ab = tmpdir.join('ab.svg')
dot("""
digraph G {
a -> b
}
""", o=ab.basename)
assert ab.exists()
def test_boolopt(tmpdir):
tmpdir.chdir()
ab = tmpdir.join('ab.svg')
dot("""
digraph G {
a -> b
}
""", x=True, o=ab.basename)
print(tmpdir.listdir())
assert ab.exists()
def test_obj(tmpdir): # pragma: nocover
GRAPH = u"""
digraph G {
a -> b
}
"""
import sys
if sys.version_info >= (3,):
class MyClass:
def __str__(self):
return GRAPH
else:
class MyClass(object):
def __unicode__(self):
return GRAPH
tmpdir.chdir()
ab = tmpdir.join('ab.svg')
dot(MyClass(), x=True, o=ab.basename)
print(tmpdir.listdir())
assert ab.exists()
def test_cmd2args():
assert cmd2args([1, 2]) == [1, 2]
| 1,161 | 17.444444 | 41 | py |
pydeps | pydeps-master/tests/test_skinny_package.py | # -*- coding: utf-8 -*-
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_from_html5lib():
files = """
foo:
- __init__.py
- a.py: |
from bar import py
bar:
- __init__.py
"""
with create_files(files) as workdir:
assert simpledeps('foo', '--show-deps -LINFO -vv') == {
'bar -> foo.a',
}
| 435 | 21.947368 | 63 | py |
pydeps | pydeps-master/tests/test_cli.py | # -*- coding: utf-8 -*-
import os
from pydeps.cli import error
from pydeps.pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps, empty
def test_output(tmpdir):
files = """
unrelated: []
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert os.getcwd() == workdir
outname = os.path.join('unrelated', 'foo.svg')
assert not os.path.exists(outname)
pydeps(fname='foo', **empty('--noshow --show-dot', output=outname))
assert os.path.exists(outname)
def test_rankdir_default(tmpdir, capsys):
files = """
unrelated: []
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert os.getcwd() == workdir
outname = os.path.join('unrelated', 'foo.svg')
pydeps(fname='foo', **empty('--noshow --show-dot', output=outname))
captured_stdout = capsys.readouterr().out
assert 'rankdir = TB' in captured_stdout
def test_error(capsys):
"""Test that error function prints reminder about missing inits on FileNotFoundErrors."""
try:
error("[Errno 2] No such file or directory: 'foo'")
except SystemExit:
# because error invokes sys.exit(1), we have to catch it here, otherwise the test would always fail.
pass
else: # test should fail if error function doesn't raise
assert False
captured_stdout = capsys.readouterr().out
assert "(Did you forget to include an __init__.py?)" in captured_stdout
def test_rankdir_BT(tmpdir, capsys):
files = """
unrelated: []
foo:
- __init__.py
- a.py: |
from bar import b
bar:
- __init__.py
- b.py
"""
with create_files(files) as workdir:
assert os.getcwd() == workdir
outname = os.path.join('unrelated', 'foo.svg')
pydeps(fname='foo', **empty('--noshow --show-dot --rankdir=BT', output=outname))
captured_stdout = capsys.readouterr().out
assert 'rankdir = BT' in captured_stdout
| 2,357 | 28.111111 | 108 | py |
pydeps | pydeps-master/tests/test_dirtree.py | # -*- coding: utf-8 -*-
from pydeps.pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
import pytest
@pytest.mark.skip(reason="TODO: fix this (issue #174)")
def test_dirtree():
files = """
foo:
- a:
- __init__.py: ''
- a.py: |
from b.b import bval
- b:
- __init__.py: ''
- b.py: |
bval = 42
"""
with create_files(files) as workdir:
assert simpledeps('foo', '--show-deps -LINFO -vv') == {
'b.b -> a.a',
}
| 633 | 24.36 | 63 | py |
pydeps | pydeps-master/tests/test_json.py | # -*- coding: utf-8 -*-
import json
import os
from pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps, depgrf
def test_dep2dot():
files = """
foo:
- __init__.py
- a.py: |
from . import b
- b.py
"""
with create_files(files) as workdir:
g = depgrf("foo")
d = json.loads(repr(g))
print(d)
assert '__main__' in d['foo']['imported_by']
assert g.sources['foo.a'] == g.sources['foo.a']
assert str(g.sources['foo.a']).startswith('foo.a')
assert 'foo.b' in repr(g.sources['foo.a'])
| 654 | 25.2 | 58 | py |
pydeps | pydeps-master/tests/test_config.py | # -*- coding: utf-8 -*-
# from devtools import debug
from pydeps.cli import parse_args
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_pydeps_config_ini():
files = """
config.ini: |
[pydeps]
rankdir = BT
exclude =
a
c
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp', '--config=config.ini'])
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
def test_pydeps_config_setupcfg_ini():
files = """
setup.cfg: |
[pydeps]
rankdir = BT
exclude =
a
c
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp'])
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
def test_pydeps_config_json():
files = """
pydeps.json: |
{
"pydeps": {
"rankdir": "BT",
"exclude": ["a", "c"]
}
}
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp', '--config=pydeps.json'])
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
def test_pydeps_config_pydeps_yaml():
files = """
pydeps.yml: |
pydeps:
rankdir: BT
exclude:
- a
- c
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp'])
# debug(args)
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
def test_pydeps_config_yaml():
files = """
config.yml: |
pydeps:
rankdir: BT
exclude:
- a
- c
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp', '--config=config.yml'])
# debug(args)
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
def test_pydeps_config_pyproject_toml():
files = """
pyproject.toml: |
[tool.pydeps]
rankdir = "BT"
exclude = ["a", "c"]
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp'])
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
def test_pydeps_config_toml():
files = """
config.toml: |
[tool.pydeps]
rankdir = "BT"
exclude = ["a", "c"]
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
args = parse_args(['relimp', '--config=config.toml'])
assert args['rankdir'] == "BT"
assert args['exclude'] == ['a', 'c']
| 4,035 | 23.760736 | 61 | py |
pydeps | pydeps-master/tests/filemaker.py | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
import shutil
import tempfile
import yaml
class FilemakerBase(object): # pragma: nocover
"""Override marked methods to do something useful. Base class serves as
a dry-run step generator.
"""
def __init__(self, root, fdef):
self.fdef = fdef
self.goto_root(root)
self._makefiles(fdef)
def goto_root(self, dirname):
"""override
"""
print("pushd", dirname)
def makedir(self, dirname, content):
"""override, but call self.make_list(content)
"""
print("mkdir " + dirname)
print("pushd " + dirname)
self.make_list(content)
print("popd")
def make_file(self, filename, content):
"""override
"""
print("create file: %s %r" % (filename, content))
def make_empty_file(self, fname):
"""override
"""
print("touch", fname)
def _make_empty_file(self, fname):
if fname != 'empty':
self.make_empty_file(fname)
def make_list(self, lst):
for item in lst:
self._makefiles(item)
def _makefiles(self, f):
if isinstance(f, dict):
for k, v in list(f.items()):
if isinstance(v, list):
self.makedir(dirname=k, content=v)
elif isinstance(v, str):
self.make_file(filename=k, content=v)
else: # pragma: nocover
raise ValueError("Unexpected:", k, v)
elif isinstance(f, str):
self._make_empty_file(f)
elif isinstance(f, list):
self.make_list(f)
else: # pragma: nocover
raise ValueError("Unknown type:", f)
class Filemaker(FilemakerBase):
def goto_root(self, dirname):
os.chdir(dirname)
def makedir(self, dirname, content):
cwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
self.make_list(content)
os.chdir(cwd)
def make_file(self, filename, content):
open(filename, 'w').write(content)
def make_empty_file(self, fname):
open(fname, 'w').close()
@contextmanager
def create_files(filedef, cleanup=True):
fdef = yaml.safe_load(filedef)
cwd = os.getcwd()
tmpdir = os.path.realpath(tempfile.mkdtemp())
try:
Filemaker(tmpdir, fdef)
if not cleanup:
print("TMPDIR =", tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
if cleanup:
shutil.rmtree(tmpdir, ignore_errors=True)
| 2,606 | 25.333333 | 76 | py |
pydeps | pydeps-master/tests/test_skip.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from tests.filemaker import create_files
from tests.simpledeps import simpledeps, depgrf
def test_no_skip():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
print("plain", simpledeps('relimp'))
assert simpledeps('relimp') == {
'relimp.b -> relimp.a',
'relimp.c -> relimp.b'
}
def test_skip_module_pattern():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
print("-x", simpledeps('relimp', '-x relimp.*'))
assert simpledeps('relimp', '-x relimp.*') == set()
def test_skip_exact_pattern():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
from . import c
- b.py: |
from .c import d
- c:
- __init__.py
- d.py
"""
with create_files(files) as workdir:
print('plain', simpledeps('relimp'))
assert simpledeps('relimp') == {
'relimp.b -> relimp.a',
'relimp.c.d -> relimp.b',
'relimp.c -> relimp.b',
'relimp.c -> relimp.a',
}
print('plain', simpledeps('relimp', '-xx relimp.c'))
assert simpledeps('relimp', '-xx relimp.c') == {
'relimp.b -> relimp.a',
'relimp.c.d -> relimp.b', # this should remain
# 'relimp.c -> relimp.b', # these should be filtered away..
# 'relimp.c -> relimp.a',
}
def test_skip_exact():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
print('-xx', simpledeps('relimp', '-xx relimp.c'))
assert simpledeps('relimp', '-xx relimp.c') == {
'relimp.b -> relimp.a'
}
def test_skip_modules():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
assert simpledeps('relimp', '-x relimp.c') == {
'relimp.b -> relimp.a'
}
# g = depgrf('relimp', '-x relimp.c relimp.b')
# print g
# print simpledeps('relimp', '-x relimp.c relimp.b')
# assert 1
# assert g == 42
def test_rawdeps():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
assert simpledeps('relimp', '--show-raw-deps -x relimp.c') == {
'relimp.b -> relimp.a'
}
| 3,232 | 25.284553 | 72 | py |
pydeps | pydeps-master/tests/test_pyw.py | # -*- coding: utf-8 -*-
import sys
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
import pytest
@pytest.mark.skipif(sys.platform != 'win32', reason=".pyw files only exist on windows")
def test_from_pyw():
files = """
baz.pyw: |
import foo.a
import bar
foo:
- __init__.py
- a.py: |
from bar import py
bar:
- __init__.py
"""
with create_files(files) as workdir:
assert simpledeps('baz.pyw', '--show-deps -LINFO -vv') == {
'foo.a -> baz.pyw',
'foo -> baz.pyw',
'bar -> baz.pyw',
'bar -> foo.a',
}
| 706 | 24.25 | 87 | py |
pydeps | pydeps-master/tests/test_cycles.py | # -*- coding: utf-8 -*-
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_cycle():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import a
"""
with create_files(files, cleanup=False) as workdir:
print("WORKDIR:", workdir)
deps = simpledeps('relimp')
assert 'relimp.a -> relimp.b' in deps
assert 'relimp.b -> relimp.a' in deps
| 518 | 24.95 | 55 | py |
pydeps | pydeps-master/tests/test_colors.py | # -*- coding: utf-8 -*-
import os
from pydeps.colors import rgb2css, brightness, brightnessdiff, colordiff, name2rgb, foreground
red = (255, 0, 0)
green = (0, 255, 0)
yellow = (0, 255, 255)
blue = (0, 0, 255)
black = (0, 0, 0)
white = (255, 255, 255)
def test_rgb2css():
assert rgb2css(red) == '#ff0000'
assert rgb2css(green) == '#00ff00'
assert rgb2css(yellow) == '#00ffff'
assert rgb2css(blue) == '#0000ff'
assert rgb2css(black) == '#000000'
assert rgb2css(white) == '#ffffff'
def test_brightness():
assert brightnessdiff(yellow, white) < brightnessdiff(yellow, black)
def test_colordiff():
assert colordiff(blue, yellow) < colordiff(blue, red)
def test_foreground():
assert foreground(black, red, green, yellow, blue, black, white) == white
assert foreground(black, red, green, yellow, blue, black) == yellow
assert foreground(black, red, green, blue, black) == green
assert foreground(black, red, blue, black) == red
assert foreground(black, blue, black) == blue
def test_name2rgb():
def fg(name):
return foreground(
name2rgb(13),
black, white)
assert fg('hello') == fg('hello.world')
| 1,194 | 24.978261 | 94 | py |
pydeps | pydeps-master/tests/test_package_names.py |
from pydeps.package_names import find_package_names
def test_find_package_names():
packages = find_package_names()
assert 'pip' in packages
assert 'pytest' in packages
| 184 | 17.5 | 51 | py |
pydeps | pydeps-master/tests/test_relative_imports.py | # -*- coding: utf-8 -*-
import os
import sys
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
import pytest
def test_relative_imports():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py
"""
with create_files(files) as workdir:
assert simpledeps('relimp') == {'relimp.b -> relimp.a'}
def test_relative_imports2():
files = """
relimp:
- __init__.py
- a.py: |
from . import b
- b.py: |
from . import c
- c.py
"""
with create_files(files) as workdir:
deps = simpledeps('relimp')
assert 'relimp.c -> relimp.b' in deps
assert 'relimp.b -> relimp.a' in deps
def test_relative_imports3():
files = """
relimp:
- __init__.py
- a.py: |
from .b import c
- b.py
"""
with create_files(files) as workdir:
assert simpledeps('relimp') == {'relimp.b -> relimp.a'}
def test_relative_imports_same_name_with_std():
files = """
relimp:
- __init__.py
- io.py: |
import io
"""
with create_files(files) as workdir:
if sys.version_info < (3,): # pragma: nocover
deps = {'relimp.io -> relimp.io'}
else: # pragma: nocover
deps = {'io -> relimp.io'}
assert simpledeps('relimp', '--pylib') == deps
def test_relative_imports_same_name_with_std_future():
files = """
relimp:
- __init__.py
- io.py: |
from __future__ import absolute_import
import io
"""
with create_files(files) as workdir:
deps = {
'__future__ -> relimp.io',
'io -> relimp.io'
}
assert simpledeps('relimp', '--pylib') == deps
def test_pydeps_colors():
files = """
pdeps:
- __init__.py
- colors.py: |
import colorsys
- depgraph.py: |
import json
import pprint
import enum
from . import colors
"""
with create_files(files, cleanup=False) as workdir:
assert simpledeps('pdeps', '-x enum') == {
'pdeps.colors -> pdeps.depgraph',
}
def test_hierarchy():
files = """
relimp:
- __init__.py
- a:
- __init__.py
- amodule.py: |
from ..b import bmodule
- b:
- __init__.py
- bmodule.py
"""
with create_files(files, cleanup=True) as workdir:
os.system("tree " + workdir)
deps = simpledeps('relimp')
assert 'relimp.b -> relimp.a.amodule' in deps
assert 'relimp.b.bmodule -> relimp.a.amodule' in deps
| 2,990 | 24.784483 | 68 | py |
pydeps | pydeps-master/tests/simpledeps.py | import pydeps.cli
from pydeps import pydeps
from pydeps.py2depgraph import py2dep
from pydeps.target import Target
def empty(args="", **kw):
args = pydeps.cli.parse_args(['foo', '--no-config'] + args.split())
args.pop('fname')
args.update(kw)
return args
def depgrf(item, args=""):
t = Target(item)
# print("TARGET:", t)
with t.chdir_work():
res = py2dep(t, **empty(args))
# print("DEPGRPH:", res)
return res
def simpledeps(item, args=""):
return {"%s -> %s" % (a.name, b.name) for a, b in depgrf(item, args)}
| 572 | 21.92 | 73 | py |
pydeps | pydeps-master/tests/__init__.py | # -*- coding: utf-8 -*-
| 26 | 5.75 | 23 | py |
pydeps | pydeps-master/tests/test_file.py | # -*- coding: utf-8 -*-
import os
from pydeps.py2depgraph import py2dep
from pydeps.pydeps import _pydeps
from pydeps.target import Target
from tests.filemaker import create_files
from tests.simpledeps import empty, simpledeps
def test_file():
files = """
a.py: |
import collections
"""
with create_files(files) as workdir:
assert simpledeps('a.py') == set()
def test_file_in_sub_directory():
files = """
foo:
- a:
- b.py: |
import c
- c.py: ""
"""
with create_files(files) as workdir:
# os.chdir('foo')
# t = Target('a/b.py')
# with t.chdir_work():
# print("CURDIR:", os.getcwd(), os.listdir('.'))
# # deps = py2dep(t, **empty(log="DEBUG", no_dot=True, show_deps=True))
# deps = py2dep(t, **empty())
# print("DEPSx:", deps)
# rels = {f'{a.name} -> {b.name}' for a, b in deps}
# print("RELS:", rels)
# assert 'c -> b.py' in rels
assert 'c -> b.py' in simpledeps('foo/a/b.py') # , '-L DEBUG')
def test_file_in_directory():
files = """
- a:
- b.py: |
import c
- c.py: ""
"""
with create_files(files) as workdir:
assert 'c -> b.py' in simpledeps('a/b.py')
def test_file_pylib():
files = """
a.py: |
import collections
"""
with create_files(files) as workdir:
assert 'collections -> a.py' in simpledeps('a.py', '--pylib')
def test_file_pyliball():
files = """
a.py: |
import collections
"""
with create_files(files) as workdir:
assert 'collections -> a.py' in simpledeps('a.py', '--pylib --pylib-all')
| 1,816 | 24.591549 | 83 | py |
pydeps | pydeps-master/tests/test_py2dep.py | # -*- coding: utf-8 -*-
import os
# from devtools import debug
from pydeps.pydeps import call_pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_py2depgraph(capsys):
files = """
- a.py: |
import b
- b.py
"""
with create_files(files) as workdir:
assert simpledeps('a.py') == {'b -> a.py'}
def test_pydeps_api():
files = """
- a.py: |
import b
- b.py
"""
with create_files(files) as workdir:
call_pydeps('a.py', no_show=True, show_dot=True, dot_out='a.dot')
# debug(os.getcwd())
# debug(os.listdir('.'))
with open('a.dot') as f:
dot = f.read()
# debug(dot)
assert 'b -> a_py' in dot
| 783 | 22.757576 | 73 | py |
pydeps | pydeps-master/tests/test_cluster.py | import os
from pydeps.cli import parse_args
from pydeps.pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_cluster():
files = """
- bar_module:
- __init__.py: ''
- bar_a:
- __init__.py: ''
- a.py: |
from bar_module.bar_a import aa
- aa.py: ''
- bar_b:
- __init__.py: ''
- b.py: from bar_module.bar_a import aa
- bar_c:
- __init__.py: ''
- c.py: |
from bar_module.bar_a import a
- foo_module:
- __init__.py: ''
- foo_a:
- __init__.py: ''
- a.py: |
from . import aa
- aa.py: ''
- foo_c:
- __init__.py: ''
- c.py: |-
from foo_module.foo_a import aa
from bar_module.bar_c import c
"""
with create_files(files) as workdir:
args = parse_args(['foo_module', '--no-config', '--show-deps', '--cluster', '--max-cluster-size=100',
'--show-dot', '--dot-output', 'output.dot', '--no-show', '-LINFO', '-vv'])
pydeps(**args)
assert 'output.dot' in os.listdir(workdir)
dot_output = open('output.dot').read()
assert 'subgraph cluster_bar_module' in dot_output
assert 'ltail="cluster_bar_module"' in dot_output
| 1,535 | 32.391304 | 109 | py |
pydeps | pydeps-master/tests/test_funny_names.py | # -*- coding: utf-8 -*-
from pydeps.pydeps import pydeps
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_from_html5lib():
files = """
foo:
- __init__.py
- a.py: |
from bar import py
bar:
- __init__.py
- py.py: |
barpy = 42
"""
with create_files(files) as workdir:
assert simpledeps('foo', '--show-deps -LINFO -vv') == {
'bar -> foo.a',
'bar.py -> foo.a'
}
def test_multidot():
files = """
foo.bar.py: |
from math import pi
"""
with create_files(files) as workdir:
assert simpledeps('foo.bar.py', '--show-deps --pylib -LINFO -vv') == {
'math -> foo.bar.py',
}
| 813 | 22.941176 | 78 | py |
pydeps | pydeps-master/tests/test_render_context.py | # -*- coding: utf-8 -*-
from pydeps.render_context import RenderContext, Rankdir
def test_render_context():
ctx = RenderContext()
with ctx.graph():
ctx.write_rule('a', 'b')
assert 'a -> b' in ctx.text()
assert 'b -> a' not in ctx.text()
assert 'rankdir = TB' in ctx.text()
def test_render_context_reverse():
# verify that rankdir is reversed in RenderBuffer, not RenderContext.
ctx = RenderContext(reverse=True, rankdir=Rankdir.BOTTOM_TOP)
with ctx.graph():
ctx.write_rule('a', 'b')
assert 'b -> a' in ctx.text()
assert 'a -> b' not in ctx.text()
assert 'rankdir = BT' in ctx.text()
def test_render_context_rankdir():
ctx = RenderContext(rankdir=Rankdir.LEFT_RIGHT)
with ctx.graph():
pass
text = ctx.text()
assert 'rankdir = LR' in text
| 827 | 26.6 | 73 | py |
pydeps | pydeps-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# pydeps documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 13 18:59:19 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pydeps'
copyright = u'2014 - %s Bjorn Pettersen' % datetime.date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.12.12'
# The full version, including alpha/beta/rc tags.
release = '1.12.12'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydepsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pydeps.tex', u'pydeps Documentation',
u'', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pydeps', u'pydeps Documentation',
[u''], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pydeps', u'pydeps Documentation',
u'', 'pydeps', 'command line access to ConfigParser.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
| 8,225 | 30.760618 | 80 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_36.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import sys
import types
import struct
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import imp
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
try:
marshal_data = importlib._bootstrap_external._validate_bytecode_header(fp.read())
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(marshal_data)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_kwonlyargcount,
co.co_nlocals, co.co_stacksize, co.co_flags,
co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab, co.co_freevars,
co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 23,027 | 35.321767 | 97 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_310.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import io
import sys
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
# Old imp constants:
_SEARCH_ERROR = 0
_PY_SOURCE = 1
_PY_COMPILED = 2
_C_EXTENSION = 3
_PKG_DIRECTORY = 5
_C_BUILTIN = 6
_PY_FROZEN = 7
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
def _find_module(name, path=None):
"""An importlib reimplementation of imp.find_module (for our purposes)."""
# It's necessary to clear the caches for our Finder first, in case any
# modules are being added/deleted/modified at runtime. In particular,
# test_modulefinder.py changes file tree contents in a cache-breaking way:
importlib.machinery.PathFinder.invalidate_caches()
spec = importlib.machinery.PathFinder.find_spec(name, path)
if spec is None:
raise ImportError("No module named {name!r}".format(name=name), name=name)
# Some special cases:
if spec.loader is importlib.machinery.BuiltinImporter:
return None, None, ("", "", _C_BUILTIN)
if spec.loader is importlib.machinery.FrozenImporter:
return None, None, ("", "", _PY_FROZEN)
file_path = spec.origin
if spec.loader.is_package(name):
return None, os.path.dirname(file_path), ("", "", _PKG_DIRECTORY)
if isinstance(spec.loader, importlib.machinery.SourceFileLoader):
kind = _PY_SOURCE
elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
kind = _C_EXTENSION
elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader):
kind = _PY_COMPILED
else: # Should never happen.
return None, None, ("", "", _SEARCH_ERROR)
file = io.open_code(file_path)
suffix = os.path.splitext(file_path)[-1]
return file, file_path, (suffix, "rb", kind)
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=None, replace_paths=None):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes if excludes is not None else []
self.replace_paths = replace_paths if replace_paths is not None else []
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with io.open_code(pathname) as fp:
stuff = ("", "rb", _PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with io.open_code(pathname) as fp:
stuff = (ext, "rb", _PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == _PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == _PY_SOURCE:
co = compile(fp.read(), pathname, 'exec')
elif type == _PY_COMPILED:
try:
data = fp.read()
importlib._bootstrap_external._classify_pyc(data, fqname, {})
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(memoryview(data)[16:])
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
except SyntaxError as msg:
self.msg(2, "SyntaxError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
fullname = name + "." + sub
if fullname in self.badmodules:
self._add_badmodule(fullname, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", _C_BUILTIN))
path = self.path
return _find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return co.replace(co_consts=tuple(consts), co_filename=new_filename)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 24,401 | 34.571429 | 86 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_27.py | """Find modules used by a script, using introspection."""
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U" # universal line endings
else:
# Python < 2.3 compatibility, no longer strictly required
READ_MODE = "r"
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
def _unpack_opargs(code):
# enumerate() is not an option, since we sometimes process
# multiple elements on a single pass through the loop
extended_arg = 0
n = len(code)
i = 0
while i < n:
op = ord(code[i])
offset = i
i = i+1
arg = None
if op >= HAVE_ARGUMENT:
arg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = arg*65536
yield (offset, op, arg)
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname, READ_MODE) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname, READ_MODE) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in _unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if c in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 1
and opargs[i-1][0] == LOAD_CONST):
fromlist = consts[opargs[i-1][1]]
yield "import", (fromlist, names[oparg])
continue
def scan_opcodes_25(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in _unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == -1: # normal import
yield "import", (fromlist, names[oparg])
elif level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
if fp:
fp.close()
return m
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules that appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
| 24,461 | 34.973529 | 86 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_39.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import io
import sys
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
# Old imp constants:
_SEARCH_ERROR = 0
_PY_SOURCE = 1
_PY_COMPILED = 2
_C_EXTENSION = 3
_PKG_DIRECTORY = 5
_C_BUILTIN = 6
_PY_FROZEN = 7
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
def _find_module(name, path=None):
"""An importlib reimplementation of imp.find_module (for our purposes)."""
# It's necessary to clear the caches for our Finder first, in case any
# modules are being added/deleted/modified at runtime. In particular,
# test_modulefinder.py changes file tree contents in a cache-breaking way:
importlib.machinery.PathFinder.invalidate_caches()
spec = importlib.machinery.PathFinder.find_spec(name, path)
if spec is None:
raise ImportError("No module named {name!r}".format(name=name), name=name)
# Some special cases:
if spec.loader is importlib.machinery.BuiltinImporter:
return None, None, ("", "", _C_BUILTIN)
if spec.loader is importlib.machinery.FrozenImporter:
return None, None, ("", "", _PY_FROZEN)
file_path = spec.origin
if spec.loader.is_package(name):
return None, os.path.dirname(file_path), ("", "", _PKG_DIRECTORY)
if isinstance(spec.loader, importlib.machinery.SourceFileLoader):
kind = _PY_SOURCE
elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
kind = _C_EXTENSION
elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader):
kind = _PY_COMPILED
else: # Should never happen.
return None, None, ("", "", _SEARCH_ERROR)
file = io.open_code(file_path)
suffix = os.path.splitext(file_path)[-1]
return file, file_path, (suffix, "rb", kind)
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=None, replace_paths=None):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes if excludes is not None else []
self.replace_paths = replace_paths if replace_paths is not None else []
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with io.open_code(pathname) as fp:
stuff = ("", "rb", _PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with io.open_code(pathname) as fp:
stuff = (ext, "rb", _PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == _PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == _PY_SOURCE:
co = compile(fp.read(), pathname, 'exec')
elif type == _PY_COMPILED:
try:
data = fp.read()
importlib._bootstrap_external._classify_pyc(data, fqname, {})
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(memoryview(data)[16:])
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
except SyntaxError as msg:
self.msg(2, "SyntaxError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
fullname = name + "." + sub
if fullname in self.badmodules:
self._add_badmodule(fullname, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", _C_BUILTIN))
path = self.path
return _find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return co.replace(co_consts=tuple(consts), co_filename=new_filename)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 24,401 | 34.571429 | 86 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_pydeps_orig.py | """Find modules used by a script, using introspection."""
# This module should be kept compatible with Python 2.2, see PEP 291.
from __future__ import print_function
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
READ_MODE = "r"
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook: name(%s) caller(%s) fromlist(%s) level(%s)" % (name, caller, fromlist, level))
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
if q.shortname in ('__future__', 'future'): # the future package causes recursion overflow
return None
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module: partname(%s) fqname(%s) parent(%s)" % (partname, fqname, parent))
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
# fqname = dotted module name we're loading
suffix, mode, kind = file_info
kstr = {
imp.PKG_DIRECTORY: 'PKG_DIRECTORY',
imp.PY_SOURCE: 'PY_SOURCE',
imp.PY_COMPILED: 'PY_COMPILED',
}.get(kind, 'unknown-kind')
self.msgin(2, "load_module(%s) fqname=%s, fp=%s, pathname=%s" % (kstr, fqname, fp and "fp", pathname))
if kind == imp.PKG_DIRECTORY:
module = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", module)
return module
if kind == imp.PY_SOURCE:
co = compile(
fp.read() + '\n',
pathname,
'exec', # compile code block
dont_inherit=True # don't inherit future statements from current environment
)
elif kind == imp.PY_COMPILED:
# a .pyc file is a binary file containing only thee things:
# 1. a four-byte magic number
# 2. a four byte modification timestamp, and
# 3. a Marshalled code object
# from: https://nedbatchelder.com/blog/200804/the_structure_of_pyc_files.html
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError("Bad magic number in %s" % pathname)
fp.read(4) # skip modification timestamp
co = marshal.load(fp) # load marshalled code object.
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack=struct.unpack): # pragma: nocover
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_34(self, co):
i = 0
bytecode = list(dis.Bytecode(co))
while i < len(bytecode):
if (
bytecode[i].opname == "LOAD_CONST" and
bytecode[i + 1].opname == "LOAD_CONST" and
bytecode[i + 2].opname == "IMPORT_NAME"
):
level = bytecode[i].argval
fromlist = bytecode[i + 1].argval
import_name = bytecode[i + 2].argval
if level == 0:
yield "absolute_import", (fromlist, import_name)
else:
yield "relative_import", (level, fromlist, import_name)
i += 2
i += 1
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (3, 4):
scanner = self.scan_opcodes_34
elif sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import":
level = 0
else:
level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
# m is still the caller here... [bp]
self._safe_import_hook(parent.__name__, m, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 26,278 | 35.651325 | 112 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_next.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import io
import sys
# Old imp constants:
_SEARCH_ERROR = 0
_PY_SOURCE = 1
_PY_COMPILED = 2
_C_EXTENSION = 3
_PKG_DIRECTORY = 5
_C_BUILTIN = 6
_PY_FROZEN = 7
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
def _find_module(name, path=None):
"""An importlib reimplementation of imp.find_module (for our purposes)."""
# It's necessary to clear the caches for our Finder first, in case any
# modules are being added/deleted/modified at runtime. In particular,
# test_modulefinder.py changes file tree contents in a cache-breaking way:
importlib.machinery.PathFinder.invalidate_caches()
spec = importlib.machinery.PathFinder.find_spec(name, path)
if spec is None:
raise ImportError("No module named {name!r}".format(name=name), name=name)
# Some special cases:
if spec.loader is importlib.machinery.BuiltinImporter:
return None, None, ("", "", _C_BUILTIN)
if spec.loader is importlib.machinery.FrozenImporter:
return None, None, ("", "", _PY_FROZEN)
file_path = spec.origin
if spec.loader.is_package(name):
return None, os.path.dirname(file_path), ("", "", _PKG_DIRECTORY)
if isinstance(spec.loader, importlib.machinery.SourceFileLoader):
kind = _PY_SOURCE
elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
kind = _C_EXTENSION
elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader):
kind = _PY_COMPILED
else: # Should never happen.
return None, None, ("", "", _SEARCH_ERROR)
file = io.open_code(file_path)
suffix = os.path.splitext(file_path)[-1]
return file, file_path, (suffix, "rb", kind)
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=None, replace_paths=None):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes if excludes is not None else []
self.replace_paths = replace_paths if replace_paths is not None else []
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with io.open_code(pathname) as fp:
stuff = ("", "rb", _PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with io.open_code(pathname) as fp:
stuff = (ext, "rb", _PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == _PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == _PY_SOURCE:
co = compile(fp.read(), pathname, 'exec')
elif type == _PY_COMPILED:
try:
data = fp.read()
importlib._bootstrap_external._classify_pyc(data, fqname, {})
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(memoryview(data)[16:])
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
except SyntaxError as msg:
self.msg(2, "SyntaxError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
fullname = name + "." + sub
if fullname in self.badmodules:
self._add_badmodule(fullname, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co):
# Scan the code, and yield 'interesting' opcode combinations
for name in dis._find_store_names(co):
yield "store", (name,)
for name, level, fromlist in dis._find_imports(co):
if level == 0: # absolute import
yield "absolute_import", (fromlist, name)
else: # relative import
yield "relative_import", (level, fromlist, name)
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", _C_BUILTIN))
path = self.path
return _find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return co.replace(co_consts=tuple(consts), co_filename=new_filename)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 23,699 | 34.532234 | 86 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_35.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import sys
import types
import struct
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
try:
marshal_data = importlib._bootstrap_external._validate_bytecode_header(fp.read())
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(marshal_data)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes_25
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_kwonlyargcount,
co.co_nlocals, co.co_stacksize, co.co_flags,
co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab, co.co_freevars,
co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 23,085 | 35.355906 | 97 | py |
pydeps | pydeps-master/docs/module-finder-archive/__init__.py | 0 | 0 | 0 | py | |
pydeps | pydeps-master/docs/module-finder-archive/mf_38.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import io
import sys
import types
import warnings
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
# Old imp constants:
_SEARCH_ERROR = 0
_PY_SOURCE = 1
_PY_COMPILED = 2
_C_EXTENSION = 3
_PKG_DIRECTORY = 5
_C_BUILTIN = 6
_PY_FROZEN = 7
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
def _find_module(name, path=None):
"""An importlib reimplementation of imp.find_module (for our purposes)."""
# It's necessary to clear the caches for our Finder first, in case any
# modules are being added/deleted/modified at runtime. In particular,
# test_modulefinder.py changes file tree contents in a cache-breaking way:
importlib.machinery.PathFinder.invalidate_caches()
spec = importlib.machinery.PathFinder.find_spec(name, path)
if spec is None:
raise ImportError("No module named {name!r}".format(name=name), name=name)
# Some special cases:
if spec.loader is importlib.machinery.BuiltinImporter:
return None, None, ("", "", _C_BUILTIN)
if spec.loader is importlib.machinery.FrozenImporter:
return None, None, ("", "", _PY_FROZEN)
file_path = spec.origin
if spec.loader.is_package(name):
return None, os.path.dirname(file_path), ("", "", _PKG_DIRECTORY)
if isinstance(spec.loader, importlib.machinery.SourceFileLoader):
kind = _PY_SOURCE
elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
kind = _C_EXTENSION
elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader):
kind = _PY_COMPILED
else: # Should never happen.
return None, None, ("", "", _SEARCH_ERROR)
file = io.open_code(file_path)
suffix = os.path.splitext(file_path)[-1]
return file, file_path, (suffix, "rb", kind)
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=None, replace_paths=None):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes if excludes is not None else []
self.replace_paths = replace_paths if replace_paths is not None else []
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with io.open_code(pathname) as fp:
stuff = ("", "rb", _PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with io.open_code(pathname) as fp:
stuff = (ext, "rb", _PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == _PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == _PY_SOURCE:
co = compile(fp.read(), pathname, 'exec')
elif type == _PY_COMPILED:
try:
data = fp.read()
importlib._bootstrap_external._classify_pyc(data, fqname, {})
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(memoryview(data)[16:])
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
except SyntaxError as msg:
self.msg(2, "SyntaxError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
fullname = name + "." + sub
if fullname in self.badmodules:
self._add_badmodule(fullname, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", _C_BUILTIN))
path = self.path
return _find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return co.replace(co_consts=tuple(consts), co_filename=new_filename)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 24,430 | 34.510174 | 86 | py |
pydeps | pydeps-master/docs/module-finder-archive/mf_37.py | """Find modules used by a script, using introspection."""
import dis
import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
import sys
import types
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import imp
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
EXTENDED_ARG = dis.EXTENDED_ARG
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
packagePathMap.setdefault(packagename, []).append(path)
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
for dir in m.__path__:
try:
names = os.listdir(dir)
except OSError:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp:
fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
try:
data = fp.read()
importlib._bootstrap_external._classify_pyc(data, fqname, {})
except ImportError as exc:
self.msgout(2, "raise ImportError: " + str(exc), pathname)
raise
co = marshal.loads(memoryview(data)[16:])
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules that appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_kwonlyargcount,
co.co_nlocals, co.co_stacksize, co.co_flags,
co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab, co.co_freevars,
co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupted]")
| 23,035 | 35.334385 | 86 | py |
MRE-ISE | MRE-ISE-main/run.py | import argparse
import logging
import sys
sys.path.append("..")
import torch
import numpy as np
import random
from torchvision import transforms
from torch.utils.data import DataLoader
from cores.gene.model import MRE
from transformers import CLIPProcessor, CLIPModel
from transformers import CLIPConfig
from processor.dataset import MREProcessor, MREDataset
from cores.gene.model import Trainer
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from torch.utils.tensorboard import SummaryWriter
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASS = {
'bert': (MREProcessor, MREDataset),
}
DATA_PATH = {
'MRE': {'train': 'data/vsg_tsg/ours_train.json',
'dev': 'data/vsg_tsg/ours_valid.json',
'test': 'data/vsg_tsg/ours_test.json',
'vbow': 'data/vsg_tsg/vbow.pk',
'tbow': 'data/vsg_tsg/tbow.pk'
}
}
IMG_PATH = {
'MRE': {'train': 'data/img_org/train/',
'dev': 'data/img_org/val/',
'test': 'data/img_org/test'}}
def set_seed(seed=2021):
"""set random seed"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
np.random.seed(seed)
random.seed(seed)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrain_name', default='openai/clip-vit-base-patch32', type=str, help="The name of pretrained model")
parser.add_argument('--dataset_name', default='MRE', type=str, help="The name of example_dataset.")
parser.add_argument('--num_epochs', default=30, type=int, help="Training epochs")
parser.add_argument('--device', default='cuda', type=str, help="cuda or cpu")
parser.add_argument('--batch_size', default=32, type=int, help="batch size")
parser.add_argument('--lr_pretrained', default=2e-5, type=float, help="pre-trained learning rate")
parser.add_argument('--lr_main', default=2e-4, type=float, help="learning rate")
parser.add_argument('--warmup_ratio', default=0.01, type=float)
parser.add_argument('--eval_begin_epoch', default=1, type=int)
parser.add_argument('--seed', default=1234, type=int, help="random seed, default is 1")
parser.add_argument('--load_path', default=None, type=str, help="Load model from load_path")
parser.add_argument('--save_path', default='ckpt', type=str, help="save model at save_path")
parser.add_argument('--write_path', default=None, type=str, help="do_test=True, predictions will be write in write_path")
parser.add_argument('--notes', default="", type=str, help="input some remarks for making save path dir.")
parser.add_argument('--do_train', action='store_true') # , action='store_true'
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--do_predict', action='store_true')
parser.add_argument('--max_seq', default=40, type=int)
parser.add_argument('--max_obj', default=40, type=int)
parser.add_argument('--hid_size', default=768, type=int, help="hidden state size")
parser.add_argument('--num_layers', default=2, type=int, help="number of refine layers")
parser.add_argument('--beta', default=0.01, type=float, help="Default is 1e-2")
parser.add_argument("--num_per", type=int, default=16, help="Default is 16")
parser.add_argument("--feature_denoise", type=bool, default=True, help="Default is False.")
parser.add_argument("--top_k", type=int, default=10, help="Default is 10.")
parser.add_argument("--epsilon", type=float, default=0.3, help="Default is 0.3.")
parser.add_argument("--temperature", type=float, default=0.1, help="Default is 0.1.")
parser.add_argument("--graph_skip_conn", type=float, default=0.0, help="Default is 0.0.")
parser.add_argument("--graph_include_self", type=bool, default=True, help="Default is True.")
parser.add_argument("--dropout", type=float, default=0.5, help="Default is 0.0")
parser.add_argument("--graph_type", type=str, default="epsilonNN", help="epsilonNN, KNN, prob")
parser.add_argument("--graph_metric_type", type=str, default="multi_mlp")
parser.add_argument("--repar", type=bool, default=True, help="Default is True.")
parser.add_argument("--threshold", type=float, default=0.2, help="Default is 0.2.")
parser.add_argument("--prior_mode", type=str, default="Gaussian", help="Default is Gaussian.")
parser.add_argument("--is_IB", type=bool, default=True, help="Default is True.")
parser.add_argument("--eta1", type=float, default=0.7, help="Default is 0.7")
parser.add_argument("--eta1", type=float, default=0.9, help="Default is 0.9")
parser.add_argument("--text_bow_size", type=int, default=2000, help="Default is 2000")
parser.add_argument("--visual_bow_size", type=int, default=2000, help="Default is 2000")
parser.add_argument("--neighbor_num", type=int, default=2, help="Default is 2")
parser.add_argument("--topic_keywords_number", type=int, default=10, help="Default is 10")
parser.add_argument("--topic_number", type=int, default=10, help="Default is 10")
args = parser.parse_args()
data_path, img_path, aux_path = DATA_PATH[args.dataset_name], IMG_PATH[args.dataset_name], AUX_PATH[args.dataset_name]
data_process, dataset_class = MODEL_CLASS[args.model_name]
re_path = 'data/ours_rel2id.json'
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
set_seed(args.seed) # set seed, default is 1
if args.save_path is not None: # make save_path dir
args.save_path = os.path.join(args.save_path, args.model_name, args.dataset_name+"_"+str(args.batch_size)+"_"+str(args.lr)+"_"+args.notes)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path, exist_ok=True)
print(args)
logdir = "logs/" + args.model_name + "_"+args.dataset_name + "_"+str(args.batch_size) + "_" + str(args.lr) + args.notes
writer = SummaryWriter(log_dir=logdir)
if args.do_train:
clip_vit, clip_processor, aux_processor, rcnn_processor = None, None, None, None
clip_processor = CLIPProcessor.from_pretrained(args.vit_name)
aux_processor = CLIPProcessor.from_pretrained(args.vit_name)
aux_processor.feature_extractor.size, aux_processor.feature_extractor.crop_size = args.aux_size, args.aux_size
rcnn_processor = CLIPProcessor.from_pretrained(args.vit_name)
rcnn_processor.feature_extractor.size, rcnn_processor.feature_extractor.crop_size = args.rcnn_size, args.rcnn_size
clip_model = CLIPModel.from_pretrained(args.vit_name)
clip_vit = clip_model.vision_model
clip_text = clip_model.text_model
processor = data_process(data_path, re_path, args.bert_name, args.vit_name, clip_processor=clip_processor, aux_processor=aux_processor, rcnn_processor=rcnn_processor)
train_dataset = dataset_class(processor, transform, img_path, aux_path, args.max_seq, aux_size=args.aux_size, rcnn_size=args.rcnn_size, mode='train', max_obj_num=args.max_obj_num)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
dev_dataset = dataset_class(processor, transform, img_path, aux_path, args.max_seq, aux_size=args.aux_size, rcnn_size=args.rcnn_size, mode='dev', max_obj_num=args.max_obj_num)
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
test_dataset = dataset_class(processor, transform, img_path, aux_path, args.max_seq, aux_size=args.aux_size, rcnn_size=args.rcnn_size, mode='test', max_obj_num=args.max_obj_num)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
re_dict = processor.get_relation_dict()
num_labels = len(re_dict)
tokenizer = processor.tokenizer
# test
vision_config = CLIPConfig.from_pretrained(args.vit_name).vision_config
text_config = CLIPConfig.from_pretrained(args.vit_name).text_config
model = MRE(args, vision_config, text_config, clip_vit, clip_text, num_labels,
args.text_bow_size, args.visual_bow_size, tokenizer, processor)
trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=test_dataloader, re_dict=re_dict, model=model, args=args, logger=logger, writer=writer)
trainer.train()
torch.cuda.empty_cache()
writer.close()
if __name__ == "__main__":
main()
| 9,056 | 52.276471 | 187 | py |
MRE-ISE | MRE-ISE-main/VSG/RelTR_parser/visual_scene_graph.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image
import requests
import matplotlib.pyplot as plt
import json
import pickle
import ast
from tqdm import tqdm
from transformers import CLIPProcessor, CLIPModel
import cv2
from models.backbone import Backbone, Joiner
from models.position_encoding import PositionEmbeddingSine
from models.transformer import Transformer
from models.reltr import RelTR
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
CLASSES = ['N/A', 'airplane', 'animal', 'arm', 'bag', 'banana', 'basket', 'beach', 'bear', 'bed', 'bench', 'bike',
'bird', 'board', 'boat', 'book', 'boot', 'bottle', 'bowl', 'box', 'boy', 'branch', 'building',
'bus', 'cabinet', 'cap', 'car', 'cat', 'chair', 'child', 'clock', 'coat', 'counter', 'cow', 'cup',
'curtain', 'desk', 'dog', 'door', 'drawer', 'ear', 'elephant', 'engine', 'eye', 'face', 'fence',
'finger', 'flag', 'flower', 'food', 'fork', 'fruit', 'giraffe', 'girl', 'glass', 'glove', 'guy',
'hair', 'hand', 'handle', 'hat', 'head', 'helmet', 'hill', 'horse', 'house', 'jacket', 'jean',
'kid', 'kite', 'lady', 'lamp', 'laptop', 'leaf', 'leg', 'letter', 'light', 'logo', 'man', 'men',
'motorcycle', 'mountain', 'mouth', 'neck', 'nose', 'number', 'orange', 'pant', 'paper', 'paw',
'people', 'person', 'phone', 'pillow', 'pizza', 'plane', 'plant', 'plate', 'player', 'pole', 'post',
'pot', 'racket', 'railing', 'rock', 'roof', 'room', 'screen', 'seat', 'sheep', 'shelf', 'shirt',
'shoe', 'short', 'sidewalk', 'sign', 'sink', 'skateboard', 'ski', 'skier', 'sneaker', 'snow',
'sock', 'stand', 'street', 'surfboard', 'table', 'tail', 'tie', 'tile', 'tire', 'toilet', 'towel',
'tower', 'track', 'train', 'tree', 'truck', 'trunk', 'umbrella', 'vase', 'vegetable', 'vehicle',
'wave', 'wheel', 'window', 'windshield', 'wing', 'wire', 'woman', 'zebra']
REL_CLASSES = ['__background__', 'above', 'across', 'against', 'along', 'and', 'at', 'attached to', 'behind',
'belonging to', 'between', 'carrying', 'covered in', 'covering', 'eating', 'flying in', 'for',
'from', 'growing on', 'hanging from', 'has', 'holding', 'in', 'in front of', 'laying on',
'looking at', 'lying on', 'made of', 'mounted on', 'near', 'of', 'on', 'on back of', 'over',
'painted on', 'parked on', 'part of', 'playing', 'riding', 'says', 'sitting on', 'standing on',
'to', 'under', 'using', 'walking in', 'walking on', 'watching', 'wearing', 'wears', 'with']
# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
x = torch.tensor([img_w, img_h, img_w, img_h], dtype=out_bbox.dtype).to(torch.get_device(out_bbox))
b = b * x
return b
def find_repeat(new_bbox, bbox):
flag = 0
index = 0
def get_list(x):
return [i for i in range(max(0, x-5), x+5)]
for idx, b in enumerate(bbox):
if (new_bbox[0] in get_list(b[0])) and (new_bbox[1] in get_list(b[1])) and (new_bbox[2] in get_list(b[2])) and (new_bbox[3] in get_list(b[3])):
flag = 1
index = idx
break
return flag, index
def construct_scene_graph(data, original_img_dir, target_file, mode='train'):
position_embedding = PositionEmbeddingSine(128, normalize=True)
backbone = Backbone('resnet50', False, False, False)
backbone = Joiner(backbone, position_embedding)
backbone.num_channels = 2048
transformer = Transformer(d_model=256, dropout=0.1, nhead=8,
dim_feedforward=2048,
num_encoder_layers=6,
num_decoder_layers=6,
normalize_before=False,
return_intermediate_dec=True)
model = RelTR(backbone, transformer, num_classes=151, num_rel_classes=51,
num_entities=100, num_triplets=200)
# The checkpoint is pretrained on Visual Genome
ckpt = torch.hub.load_state_dict_from_url(
url='https://cloud.tnt.uni-hannover.de/index.php/s/PB8xTKspKZF7fyK/download/checkpoint0149.pth',
# map_location=lambda storage, loc: storage.cuda(0),
check_hash=True)
# map_location='cpu'
model.load_state_dict(ckpt['model'])
model.to(torch.device('cuda'))
# Some transformation functions
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# img_list = os.listdir(dirname)
# img_list = []
# for d in data:
# img_list.append(d['img_id'])
# img_list = ['twitter_stream_2018_10_10_25_0_2_7.jpg']
res_list = []
with torch.no_grad():
model.eval()
for d in tqdm(data, total=len(data)):
# print("1:{}".format(torch.cuda.memory_allocated(0)))
res = dict()
img_id = d['img_id']
# res['img'] = i
im = Image.open(os.path.join(original_img_dir, mode, img_id))
img = transform(im).unsqueeze(0)
img = img.to(torch.device('cuda'))
# propagate through the model
outputs = model(img)
# outputs = dict()
# for k, v in res.items():
# outputs[k] = v.to(torch.device('cpu'))
keep_thresh = 0.35
# keep only predictions with >0.3 confidence
probas = outputs['rel_logits'].softmax(-1)[0, :, :-1]
probas_sub = outputs['sub_logits'].softmax(-1)[0, :, :-1]
probas_obj = outputs['obj_logits'].softmax(-1)[0, :, :-1]
keep = torch.logical_and(probas.max(-1).values > keep_thresh,
torch.logical_and(probas_sub.max(-1).values > keep_thresh,
probas_obj.max(-1).values > keep_thresh))
# convert boxes from [0; 1] to image scales
sub_bboxes_scaled = rescale_bboxes(outputs['sub_boxes'][0, keep], im.size)
obj_bboxes_scaled = rescale_bboxes(outputs['obj_boxes'][0, keep], im.size)
# topk = 10 # display up to 10 images
keep_queries = torch.nonzero(keep, as_tuple=True)[0]
topk = keep_queries.size(0) # display up to 10 images
indices = torch.argsort(
-probas[keep_queries].max(-1)[0] * probas_sub[keep_queries].max(-1)[0] *
probas_obj[keep_queries].max(-1)[
0])[
:topk]
keep_queries = keep_queries[indices]
bbox = [[0, 0, im.width, im.height]]
bbox_attri = ['img']
# s_bbox = []
# s_bbox_attri = []
# o_bbox = []
# o_bbox_attri = []
rel = [{'s_index': 0, 'o_index': 0, 'name': 'self'}]
for idx, s, o in zip(keep_queries, sub_bboxes_scaled[indices], obj_bboxes_scaled[indices]):
(sxmin, symin, sxmax, symax) = [max(0, round(x)) for x in s.tolist()]
sxmax, symax = min(im.width, sxmax), min(im.height, symax)
(oxmin, oymin, oxmax, oymax) = [max(0, round(x)) for x in o.tolist()]
oxmax, oymax = min(im.width, oxmax), min(im.height, oymax)
# new_img = im.new('RGB', size=s)
_flag, _idx = find_repeat((sxmin, symin, sxmax, symax), bbox)
if not _flag:
bbox.append((sxmin, symin, sxmax, symax))
bbox_attri.append(CLASSES[probas_sub[idx].argmax()])
s_index = len(bbox)-1
else:
s_index = _idx
_flag, _idx = find_repeat((oxmin, oymin, oxmax, oymax), bbox)
if not _flag:
bbox.append((oxmin, oymin, oxmax, oymax))
bbox_attri.append(CLASSES[probas_sub[idx].argmax()])
o_index = len(bbox) - 1
else:
o_index = _idx
# if (sxmin, symin, sxmax, symax) not in bbox:
# bbox.append((sxmin, symin, sxmax, symax))
# bbox_attri.append(CLASSES[probas_sub[idx].argmax()])
# if (oxmin, oymin, oxmax, oymax) not in bbox:
# bbox.append((oxmin, oymin, oxmax, oymax))
# bbox_attri.append(CLASSES[probas_obj[idx].argmax()])
# s_index = bbox.index((sxmin, symin, sxmax, symax))
# o_index = bbox.index((oxmin, oymin, oxmax, oymax))
rel_attri = REL_CLASSES[probas[idx].argmax()]
rel.append({'s_index': s_index, 'o_index': o_index, 'name': rel_attri})
# res['s_bbox'] = s_bbox
# res['s_bbox_attri'] = s_bbox_attri
# res['o_bbox'] = o_bbox
# res['o_bbox_attri'] = o_bbox_attri
res['bbox'] = bbox
res['bbox_attri'] = bbox_attri
res['rel'] = rel
d['VSG'] = res
res_list.append(d)
# torch.empty_cache()
assert len(data) == len(res_list)
with open(target_file, 'w', encoding='utf-8') as f:
json.dump(res_list, f)
def get_obj_features(dirname):
vision_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
if torch.cuda.is_available():
vision_model.to(torch.device('cuda'))
with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.json')) as f:
data = json.load(f)
mode = os.path.basename(dirname).split('.')[0].split('_')[1]
with torch.no_grad():
vision_model.eval()
for d in tqdm(data, total=len(data)):
imgid = d['img']
img_path = os.path.join('../../data/img_org', mode, imgid)
bbox = d['bbox']
features = []
for b in bbox:
crop_img = cv2.imread(img_path)
# print(crop_img.shape)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
features.append(image_features.tolist())
d['features'] = features
with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.pk'), 'wb') as fout:
pickle.dump(data, fout)
if __name__ == '__main__':
FILE_DIR = '../data/tsg/'
IMG_DIR = '../data/img_org/'
DIST_DIR = '../data/vsg_tsg/'
for i in ['ours_train.json', 'ours_val.json', 'ours_test.json']:
print(f'parsing {i} ... ')
with open(os.path.join(FILE_DIR, i)) as f:
data = json.load(f)
base_name = os.path.basename(i).split('.')[0]
# dirname = os.path.join(FILE_DIR, i)
mode = i.split('.')[0].split('_')[1]
print(mode)
construct_scene_graph(data, original_img_dir=IMG_DIR, target_file=os.path.join(DIST_DIR, f'{base_name}.json'),
mode=mode)
# get_obj_features(dirname)
# data = [1, 2, 3]
# output_file = os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.json')
# print(output_file)
# with open(output_file, 'w', encoding='utf-8') as f:
# json.dump(data, f)
# with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.pk'), 'wb') as fout:
# pickle.dump(data, fout)
# x = [{'s_index': 0, 'o_index': 0, 'name': 'self'}, {'s_index': 1, 'o_index': 2, 'name': 'wearing'}, {'s_index': 1, 'o_index': 3, 'name': 'wearing'}, {'s_index': 4, 'o_index': 5, 'name': 'wearing'}, {'s_index': 4, 'o_index': 6, 'name': 'has'}, {'s_index': 4, 'o_index': 7, 'name': 'wearing'}, {'s_index': 8, 'o_index': 9, 'name': 'on'}]
# b = [[i['s_index'], i['o_index']] for i in x]
# print(b)
# with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname) + '.pk'), 'wb') as fout:
# pickle.dump([1, 2], fout)
| 12,857 | 45.086022 | 341 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/config.py | """
Configuration file!
"""
import os
from argparse import ArgumentParser
import numpy as np
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(ROOT_PATH, 'data')
def path(fn):
return os.path.join(DATA_PATH, fn)
def stanford_path(fn):
return os.path.join(DATA_PATH, 'stanford_filtered', fn)
# =============================================================================
# Update these with where your data is stored ~~~~~~~~~~~~~~~~~~~~~~~~~
VG_IMAGES = '/datasets2/VG_100K_2/VG_100K'
RCNN_CHECKPOINT_FN = path('faster_rcnn_500k.h5')
IM_DATA_FN = stanford_path('image_data.json')
VG_SGG_FN = stanford_path('VG-SGG.h5')
VG_SGG_DICT_FN = stanford_path('VG-SGG-dicts.json')
PROPOSAL_FN = stanford_path('proposals.h5')
COCO_PATH = 'datasets/mscoco'
# =============================================================================
# =============================================================================
MODES = ('sgdet', 'sgcls', 'predcls')
BOX_SCALE = 1024 # Scale at which we have the boxes
IM_SCALE = 592 # Our images will be resized to this res without padding
# Proposal assignments
BG_THRESH_HI = 0.5
BG_THRESH_LO = 0.0
RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
RPN_NEGATIVE_OVERLAP = 0.3
# Max number of foreground examples
RPN_FG_FRACTION = 0.5
FG_FRACTION = 0.25
# Total number of examples
RPN_BATCHSIZE = 256
ROIS_PER_IMG = 256
REL_FG_FRACTION = 0.25
RELS_PER_IMG = 256
RELS_PER_IMG_REFINE = 64
BATCHNORM_MOMENTUM = 0.01
ANCHOR_SIZE = 16
ANCHOR_RATIOS = (0.23232838, 0.63365731, 1.28478321, 3.15089189) #(0.5, 1, 2)
ANCHOR_SCALES = (2.22152954, 4.12315647, 7.21692515, 12.60263013, 22.7102731) #(4, 8, 16, 32)
class ModelConfig(object):
"""Wrapper class for model hyperparameters."""
def __init__(self):
"""
Defaults
"""
self.coco = None
self.ckpt = None
self.save_dir = None
self.lr = None
self.batch_size = None
self.val_size = None
self.l2 = None
self.clip = None
self.num_gpus = None
self.num_workers = None
self.print_interval = None
self.gt_box = None
self.mode = None
self.refine = None
self.ad3 = False
self.test = False
self.adam = False
self.multi_pred=False
self.cache = None
self.model = None
self.use_proposals=False
self.use_resnet=False
self.use_tanh=False
self.use_bias = False
self.limit_vision=False
self.num_epochs=None
self.old_feats=False
self.order=None
self.det_ckpt=None
self.nl_edge=None
self.nl_obj=None
self.hidden_dim=None
self.pass_in_obj_feats_to_decoder = None
self.pass_in_obj_feats_to_edge = None
self.pooling_dim = None
self.rec_dropout = None
self.parser = self.setup_parser()
self.args = vars(self.parser.parse_args())
print("~~~~~~~~ Hyperparameters used: ~~~~~~~")
for x, y in self.args.items():
print("{} : {}".format(x, y))
self.__dict__.update(self.args)
if len(self.ckpt) != 0:
self.ckpt = os.path.join(ROOT_PATH, self.ckpt)
else:
self.ckpt = None
if len(self.cache) != 0:
self.cache = os.path.join(ROOT_PATH, self.cache)
else:
self.cache = None
if len(self.save_dir) == 0:
self.save_dir = None
else:
self.save_dir = os.path.join(ROOT_PATH, self.save_dir)
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
assert self.val_size >= 0
if self.mode not in MODES:
raise ValueError("Invalid mode: mode must be in {}".format(MODES))
if self.model not in ('motifnet', 'stanford'):
raise ValueError("Invalid model {}".format(self.model))
if self.ckpt is not None and not os.path.exists(self.ckpt):
raise ValueError("Ckpt file ({}) doesnt exist".format(self.ckpt))
def setup_parser(self):
"""
Sets up an argument parser
:return:
"""
parser = ArgumentParser(description='training code')
# Options to deprecate
parser.add_argument('-coco', dest='coco', help='Use COCO (default to VG)', action='store_true')
parser.add_argument('-ckpt', dest='ckpt', help='Filename to load from', type=str, default='')
parser.add_argument('-det_ckpt', dest='det_ckpt', help='Filename to load detection parameters from', type=str, default='')
parser.add_argument('-save_dir', dest='save_dir',
help='Directory to save things to, such as checkpoints/save', default='', type=str)
parser.add_argument('-ngpu', dest='num_gpus', help='cuantos GPUs tienes', type=int, default=3)
parser.add_argument('-nwork', dest='num_workers', help='num processes to use as workers', type=int, default=1)
parser.add_argument('-lr', dest='lr', help='learning rate', type=float, default=1e-3)
parser.add_argument('-b', dest='batch_size', help='batch size per GPU',type=int, default=2)
parser.add_argument('-val_size', dest='val_size', help='val size to use (if 0 we wont use val)', type=int, default=5000)
parser.add_argument('-l2', dest='l2', help='weight decay', type=float, default=1e-4)
parser.add_argument('-clip', dest='clip', help='gradients will be clipped to have norm less than this', type=float, default=5.0)
parser.add_argument('-p', dest='print_interval', help='print during training', type=int,
default=100)
parser.add_argument('-m', dest='mode', help='mode \in {sgdet, sgcls, predcls}', type=str,
default='sgdet')
parser.add_argument('-model', dest='model', help='which model to use? (motifnet, stanford). If you want to use the baseline (NoContext) model, then pass in motifnet here, and nl_obj, nl_edge=0', type=str,
default='motifnet')
parser.add_argument('-old_feats', dest='old_feats', help='Use the original image features for the edges', action='store_true')
parser.add_argument('-order', dest='order', help='Linearization order for Rois (confidence -default, size, random)',
type=str, default='confidence')
parser.add_argument('-cache', dest='cache', help='where should we cache predictions', type=str,
default='')
parser.add_argument('-gt_box', dest='gt_box', help='use gt boxes during training', action='store_true')
parser.add_argument('-adam', dest='adam', help='use adam. Not recommended', action='store_true')
parser.add_argument('-test', dest='test', help='test set', action='store_true')
parser.add_argument('-multipred', dest='multi_pred', help='Allow multiple predicates per pair of box0, box1.', action='store_true')
parser.add_argument('-nepoch', dest='num_epochs', help='Number of epochs to train the model for',type=int, default=25)
parser.add_argument('-resnet', dest='use_resnet', help='use resnet instead of VGG', action='store_true')
parser.add_argument('-proposals', dest='use_proposals', help='Use Xu et als proposals', action='store_true')
parser.add_argument('-nl_obj', dest='nl_obj', help='Num object layers', type=int, default=1)
parser.add_argument('-nl_edge', dest='nl_edge', help='Num edge layers', type=int, default=2)
parser.add_argument('-hidden_dim', dest='hidden_dim', help='Num edge layers', type=int, default=256)
parser.add_argument('-pooling_dim', dest='pooling_dim', help='Dimension of pooling', type=int, default=4096)
parser.add_argument('-pass_in_obj_feats_to_decoder', dest='pass_in_obj_feats_to_decoder', action='store_true')
parser.add_argument('-pass_in_obj_feats_to_edge', dest='pass_in_obj_feats_to_edge', action='store_true')
parser.add_argument('-rec_dropout', dest='rec_dropout', help='recurrent dropout to add', type=float, default=0.1)
parser.add_argument('-use_bias', dest='use_bias', action='store_true')
parser.add_argument('-use_tanh', dest='use_tanh', action='store_true')
parser.add_argument('-limit_vision', dest='limit_vision', action='store_true')
return parser
| 8,445 | 41.656566 | 212 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/eval_rels.py |
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
import torch
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE
import dill as pkl
import os
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
optimistic_restore(detector, ckpt['state_dict'])
# if conf.mode == 'sgdet':
# det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict']
# detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight'])
# detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias'])
# detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight'])
# detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias'])
all_pred_entries = []
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
if conf.num_gpus == 1:
det_res = [det_res]
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
}
assert np.all(objs_i[rels_i[:,0]] > 0) and np.all(objs_i[rels_i[:,1]] > 0)
# assert np.all(rels_i[:,2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
}
all_pred_entries.append(pred_entry)
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred)
if conf.cache is not None and os.path.exists(conf.cache):
print("Found {}! Loading from it".format(conf.cache))
with open(conf.cache,'rb') as f:
all_pred_entries = pkl.load(f)
for i, pred_entry in enumerate(tqdm(all_pred_entries)):
gt_entry = {
'gt_classes': val.gt_classes[i].copy(),
'gt_relations': val.relationships[i].copy(),
'gt_boxes': val.gt_boxes[i].copy(),
}
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
evaluator[conf.mode].print_stats()
else:
detector.eval()
for val_b, batch in enumerate(tqdm(val_loader)):
val_batch(conf.num_gpus*val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
if conf.cache is not None:
with open(conf.cache,'wb') as f:
pkl.dump(all_pred_entries, f)
| 4,353 | 37.530973 | 89 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/train_detector.py | """
Training script 4 Detection
"""
from dataloaders.mscoco import CocoDetection, CocoDataLoader
from dataloaders.visual_genome import VGDataLoader, VG
from lib.object_detector import ObjectDetector
import numpy as np
from torch import optim
import torch
import pandas as pd
import time
import os
from config import ModelConfig, FG_FRACTION, RPN_FG_FRACTION, IM_SCALE, BOX_SCALE
from torch.nn import functional as F
from lib.fpn.box_utils import bbox_loss
import torch.backends.cudnn as cudnn
from pycocotools.cocoeval import COCOeval
from lib.pytorch_misc import optimistic_restore, clip_grad_norm
from torch.optim.lr_scheduler import ReduceLROnPlateau
cudnn.benchmark = True
conf = ModelConfig()
if conf.coco:
train, val = CocoDetection.splits()
val.ids = val.ids[:conf.val_size]
train.ids = train.ids
train_loader, val_loader = CocoDataLoader.splits(train, val, batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
else:
train, val, _ = VG.splits(num_val_im=conf.val_size, filter_non_overlap=False,
filter_empty_rels=False, use_proposals=conf.use_proposals)
train_loader, val_loader = VGDataLoader.splits(train, val, batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet)
detector.cuda()
# Note: if you're doing the stanford setup, you'll need to change this to freeze the lower layers
if conf.use_proposals:
for n, param in detector.named_parameters():
if n.startswith('features'):
param.requires_grad = False
optimizer = optim.SGD([p for p in detector.parameters() if p.requires_grad],
weight_decay=conf.l2, lr=conf.lr * conf.num_gpus * conf.batch_size, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=3, factor=0.1,
verbose=True, threshold=0.001, threshold_mode='abs', cooldown=1)
start_epoch = -1
if conf.ckpt is not None:
ckpt = torch.load(conf.ckpt)
if optimistic_restore(detector, ckpt['state_dict']):
start_epoch = ckpt['epoch']
def train_epoch(epoch_num):
detector.train()
tr = []
start = time.time()
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch))
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = pd.concat(tr[-conf.print_interval:], axis=1).mean(1)
time_per_batch = (time.time() - start) / conf.print_interval
print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format(
epoch_num, b, len(train_loader), time_per_batch, len(train_loader) * time_per_batch / 60))
print(mn)
print('-----------', flush=True)
start = time.time()
return pd.concat(tr, axis=1)
def train_batch(b):
"""
:param b: contains:
:param imgs: the image, [batch_size, 3, IM_SIZE, IM_SIZE]
:param all_anchors: [num_anchors, 4] the boxes of all anchors that we'll be using
:param all_anchor_inds: [num_anchors, 2] array of the indices into the concatenated
RPN feature vector that give us all_anchors,
each one (img_ind, fpn_idx)
:param im_sizes: a [batch_size, 4] numpy array of (h, w, scale, num_good_anchors) for each image.
:param num_anchors_per_img: int, number of anchors in total over the feature pyramid per img
Training parameters:
:param train_anchor_inds: a [num_train, 5] array of indices for the anchors that will
be used to compute the training loss (img_ind, fpn_idx)
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:return:
"""
result = detector[b]
scores = result.od_obj_dists
box_deltas = result.od_box_deltas
labels = result.od_obj_labels
roi_boxes = result.od_box_priors
bbox_targets = result.od_box_targets
rpn_scores = result.rpn_scores
rpn_box_deltas = result.rpn_box_deltas
# detector loss
valid_inds = (labels.data != 0).nonzero().squeeze(1)
fg_cnt = valid_inds.size(0)
bg_cnt = labels.size(0) - fg_cnt
class_loss = F.cross_entropy(scores, labels)
# No gather_nd in pytorch so instead convert first 2 dims of tensor to 1d
box_reg_mult = 2 * (1. / FG_FRACTION) * fg_cnt / (fg_cnt + bg_cnt + 1e-4)
twod_inds = valid_inds * box_deltas.size(1) + labels[valid_inds].data
box_loss = bbox_loss(roi_boxes[valid_inds], box_deltas.view(-1, 4)[twod_inds],
bbox_targets[valid_inds]) * box_reg_mult
loss = class_loss + box_loss
# RPN loss
if not conf.use_proposals:
train_anchor_labels = b.train_anchor_labels[:, -1]
train_anchors = b.train_anchors[:, :4]
train_anchor_targets = b.train_anchors[:, 4:]
train_valid_inds = (train_anchor_labels.data == 1).nonzero().squeeze(1)
rpn_class_loss = F.cross_entropy(rpn_scores, train_anchor_labels)
# print("{} fg {} bg, ratio of {:.3f} vs {:.3f}. RPN {}fg {}bg ratio of {:.3f} vs {:.3f}".format(
# fg_cnt, bg_cnt, fg_cnt / (fg_cnt + bg_cnt + 1e-4), FG_FRACTION,
# train_valid_inds.size(0), train_anchor_labels.size(0)-train_valid_inds.size(0),
# train_valid_inds.size(0) / (train_anchor_labels.size(0) + 1e-4), RPN_FG_FRACTION), flush=True)
rpn_box_mult = 2 * (1. / RPN_FG_FRACTION) * train_valid_inds.size(0) / (train_anchor_labels.size(0) + 1e-4)
rpn_box_loss = bbox_loss(train_anchors[train_valid_inds],
rpn_box_deltas[train_valid_inds],
train_anchor_targets[train_valid_inds]) * rpn_box_mult
loss += rpn_class_loss + rpn_box_loss
res = pd.Series([rpn_class_loss.data[0], rpn_box_loss.data[0],
class_loss.data[0], box_loss.data[0], loss.data[0]],
['rpn_class_loss', 'rpn_box_loss', 'class_loss', 'box_loss', 'total'])
else:
res = pd.Series([class_loss.data[0], box_loss.data[0], loss.data[0]],
['class_loss', 'box_loss', 'total'])
optimizer.zero_grad()
loss.backward()
clip_grad_norm(
[(n, p) for n, p in detector.named_parameters() if p.grad is not None],
max_norm=conf.clip, clip=True)
optimizer.step()
return res
def val_epoch():
detector.eval()
# all_boxes is a list of length number-of-classes.
# Each list element is a list of length number-of-images.
# Each of those list elements is either an empty list []
# or a numpy array of detection.
vr = []
for val_b, batch in enumerate(val_loader):
vr.append(val_batch(val_b, batch))
vr = np.concatenate(vr, 0)
if vr.shape[0] == 0:
print("No detections anywhere")
return 0.0
val_coco = val.coco
coco_dt = val_coco.loadRes(vr)
coco_eval = COCOeval(val_coco, coco_dt, 'bbox')
coco_eval.params.imgIds = val.ids if conf.coco else [x for x in range(len(val))]
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
mAp = coco_eval.stats[1]
return mAp
def val_batch(batch_num, b):
result = detector[b]
if result is None:
return np.zeros((0, 7))
scores_np = result.obj_scores.data.cpu().numpy()
cls_preds_np = result.obj_preds.data.cpu().numpy()
boxes_np = result.boxes_assigned.data.cpu().numpy()
im_inds_np = result.im_inds.data.cpu().numpy()
im_scales = b.im_sizes.reshape((-1, 3))[:, 2]
if conf.coco:
boxes_np /= im_scales[im_inds_np][:, None]
boxes_np[:, 2:4] = boxes_np[:, 2:4] - boxes_np[:, 0:2] + 1
cls_preds_np[:] = [val.ind_to_id[c_ind] for c_ind in cls_preds_np]
im_inds_np[:] = [val.ids[im_ind + batch_num * conf.batch_size * conf.num_gpus]
for im_ind in im_inds_np]
else:
boxes_np *= BOX_SCALE / IM_SCALE
boxes_np[:, 2:4] = boxes_np[:, 2:4] - boxes_np[:, 0:2] + 1
im_inds_np += batch_num * conf.batch_size * conf.num_gpus
return np.column_stack((im_inds_np, boxes_np, scores_np, cls_preds_np))
print("Training starts now!")
for epoch in range(start_epoch + 1, start_epoch + 1 + conf.num_epochs):
rez = train_epoch(epoch)
print("overall{:2d}: ({:.3f})\n{}".format(epoch, rez.mean(1)['total'], rez.mean(1)), flush=True)
mAp = val_epoch()
scheduler.step(mAp)
torch.save({
'epoch': epoch,
'state_dict': detector.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(conf.save_dir, '{}-{}.tar'.format('coco' if conf.coco else 'vg', epoch)))
| 9,155 | 40.808219 | 115 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/eval_rel_count.py | """
Baseline model that works by simply iterating through the training set to make a dictionary.
Also, caches this (we can use this for training).
The model is quite simple, so we don't use the base train/test code
"""
from dataloaders.visual_genome import VGDataLoader, VG
from lib.object_detector import ObjectDetector
import numpy as np
import torch
import os
from lib.get_dataset_counts import get_counts, box_filter
from config import ModelConfig, FG_FRACTION, RPN_FG_FRACTION, DATA_PATH, BOX_SCALE, IM_SCALE, PROPOSAL_FN
import torch.backends.cudnn as cudnn
from lib.pytorch_misc import optimistic_restore, nonintersecting_2d_inds
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from copy import deepcopy
import dill as pkl
cudnn.benchmark = True
conf = ModelConfig()
MUST_OVERLAP=False
train, val, test = VG.splits(num_val_im=conf.val_size, filter_non_overlap=MUST_OVERLAP,
filter_duplicate_rels=True,
use_proposals=conf.use_proposals)
if conf.test:
print("test data!")
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
fg_matrix, bg_matrix = get_counts(train_data=train, must_overlap=MUST_OVERLAP)
detector = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet,
nms_filter_duplicates=True, thresh=0.01)
detector.eval()
detector.cuda()
classifier = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
mode='gtbox', use_resnet=conf.use_resnet,
nms_filter_duplicates=True, thresh=0.01)
classifier.eval()
classifier.cuda()
ckpt = torch.load(conf.ckpt)
mismatch = optimistic_restore(detector, ckpt['state_dict'])
mismatch = optimistic_restore(classifier, ckpt['state_dict'])
MOST_COMMON_MODE = True
if MOST_COMMON_MODE:
prob_matrix = fg_matrix.astype(np.float32)
prob_matrix[:,:,0] = bg_matrix
# TRYING SOMETHING NEW.
prob_matrix[:,:,0] += 1
prob_matrix /= np.sum(prob_matrix, 2)[:,:,None]
# prob_matrix /= float(fg_matrix.max())
np.save(os.path.join(DATA_PATH, 'pred_stats.npy'), prob_matrix)
prob_matrix[:,:,0] = 0 # Zero out BG
else:
prob_matrix = fg_matrix.astype(np.float64)
prob_matrix = prob_matrix / prob_matrix.max(2)[:,:,None]
np.save(os.path.join(DATA_PATH, 'pred_dist.npy'), prob_matrix)
# It's test time!
def predict(boxes, classes):
relation_possibilities_ = np.array(box_filter(boxes, must_overlap=MUST_OVERLAP), dtype=int)
full_preds = np.zeros((boxes.shape[0], boxes.shape[0], train.num_predicates))
for o1, o2 in relation_possibilities_:
c1, c2 = classes[[o1, o2]]
full_preds[o1, o2] = prob_matrix[c1, c2]
full_preds[:,:,0] = 0.0 # Zero out BG.
return full_preds
# ##########################################################################################
# ##########################################################################################
# For visualizing / exploring
c_to_ind = {c: i for i, c in enumerate(train.ind_to_classes)}
def gimme_the_dist(c1name, c2name):
c1 = c_to_ind[c1name]
c2 = c_to_ind[c2name]
dist = prob_matrix[c1, c2]
argz = np.argsort(-dist)
for i, a in enumerate(argz):
if dist[a] > 0.0:
print("{:3d}: {:10s} ({:.4f})".format(i, train.ind_to_predicates[a], dist[a]))
counts = np.zeros((train.num_classes, train.num_classes, train.num_predicates), dtype=np.int64)
for ex_ind in tqdm(range(len(val))):
gt_relations = val.relationships[ex_ind].copy()
gt_classes = val.gt_classes[ex_ind].copy()
o1o2 = gt_classes[gt_relations[:, :2]].tolist()
for (o1, o2), pred in zip(o1o2, gt_relations[:, 2]):
counts[o1, o2, pred] += 1
zeroshot_case = counts[np.where(prob_matrix == 0)].sum() / float(counts.sum())
max_inds = prob_matrix.argmax(2).ravel()
max_counts = counts.reshape(-1, 51)[np.arange(max_inds.shape[0]), max_inds]
most_freq_port = max_counts.sum()/float(counts.sum())
print(" Rel acc={:.2f}%, {:.2f}% zsl".format(
most_freq_port*100, zeroshot_case*100))
# ##########################################################################################
# ##########################################################################################
T = len(val)
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred)
# First do detection results
img_offset = 0
all_pred_entries = {'sgdet':[], 'sgcls':[], 'predcls':[]}
for val_b, b in enumerate(tqdm(val_loader)):
det_result = detector[b]
img_ids = b.gt_classes_primary.data.cpu().numpy()[:,0]
scores_np = det_result.obj_scores.data.cpu().numpy()
cls_preds_np = det_result.obj_preds.data.cpu().numpy()
boxes_np = det_result.boxes_assigned.data.cpu().numpy()* BOX_SCALE/IM_SCALE
# boxpriors_np = det_result.box_priors.data.cpu().numpy()
im_inds_np = det_result.im_inds.data.cpu().numpy() + img_offset
for img_i in np.unique(img_ids + img_offset):
gt_entry = {
'gt_classes': val.gt_classes[img_i].copy(),
'gt_relations': val.relationships[img_i].copy(),
'gt_boxes': val.gt_boxes[img_i].copy(),
}
pred_boxes = boxes_np[im_inds_np == img_i]
pred_classes = cls_preds_np[im_inds_np == img_i]
obj_scores = scores_np[im_inds_np == img_i]
all_rels = nonintersecting_2d_inds(pred_boxes.shape[0])
fp = predict(pred_boxes, pred_classes)
fp_pred = fp[all_rels[:,0], all_rels[:,1]]
scores = np.column_stack((
obj_scores[all_rels[:,0]],
obj_scores[all_rels[:,1]],
fp_pred.max(1)
)).prod(1)
sorted_inds = np.argsort(-scores)
sorted_inds = sorted_inds[scores[sorted_inds] > 0] #[:100]
pred_entry = {
'pred_boxes': pred_boxes,
'pred_classes': pred_classes,
'obj_scores': obj_scores,
'pred_rel_inds': all_rels[sorted_inds],
'rel_scores': fp_pred[sorted_inds],
}
all_pred_entries['sgdet'].append(pred_entry)
evaluator['sgdet'].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
img_offset += img_ids.max() + 1
evaluator['sgdet'].print_stats()
# -----------------------------------------------------------------------------------------
# EVAL CLS AND SG
img_offset = 0
for val_b, b in enumerate(tqdm(val_loader)):
det_result = classifier[b]
scores, cls_preds = det_result.rm_obj_dists[:,1:].data.max(1)
scores_np = scores.cpu().numpy()
cls_preds_np = (cls_preds+1).cpu().numpy()
img_ids = b.gt_classes_primary.data.cpu().numpy()[:,0]
boxes_np = b.gt_boxes_primary.data.cpu().numpy()
im_inds_np = det_result.im_inds.data.cpu().numpy() + img_offset
for img_i in np.unique(img_ids + img_offset):
gt_entry = {
'gt_classes': val.gt_classes[img_i].copy(),
'gt_relations': val.relationships[img_i].copy(),
'gt_boxes': val.gt_boxes[img_i].copy(),
}
pred_boxes = boxes_np[im_inds_np == img_i]
pred_classes = cls_preds_np[im_inds_np == img_i]
obj_scores = scores_np[im_inds_np == img_i]
all_rels = nonintersecting_2d_inds(pred_boxes.shape[0])
fp = predict(pred_boxes, pred_classes)
fp_pred = fp[all_rels[:,0], all_rels[:,1]]
sg_cls_scores = np.column_stack((
obj_scores[all_rels[:,0]],
obj_scores[all_rels[:,1]],
fp_pred.max(1)
)).prod(1)
sg_cls_inds = np.argsort(-sg_cls_scores)
sg_cls_inds = sg_cls_inds[sg_cls_scores[sg_cls_inds] > 0] #[:100]
pred_entry = {
'pred_boxes': pred_boxes,
'pred_classes': pred_classes,
'obj_scores': obj_scores,
'pred_rel_inds': all_rels[sg_cls_inds],
'rel_scores': fp_pred[sg_cls_inds],
}
all_pred_entries['sgcls'].append(deepcopy(pred_entry))
evaluator['sgcls'].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
########################################################
fp = predict(gt_entry['gt_boxes'], gt_entry['gt_classes'])
fp_pred = fp[all_rels[:, 0], all_rels[:, 1]]
pred_cls_scores = fp_pred.max(1)
pred_cls_inds = np.argsort(-pred_cls_scores)
pred_cls_inds = pred_cls_inds[pred_cls_scores[pred_cls_inds] > 0][:100]
pred_entry['pred_rel_inds'] = all_rels[pred_cls_inds]
pred_entry['rel_scores'] = fp_pred[pred_cls_inds]
pred_entry['pred_classes'] = gt_entry['gt_classes']
pred_entry['obj_scores'] = np.ones(pred_entry['pred_classes'].shape[0])
all_pred_entries['predcls'].append(pred_entry)
evaluator['predcls'].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
img_offset += img_ids.max() + 1
evaluator['predcls'].print_stats()
evaluator['sgcls'].print_stats()
for mode, entries in all_pred_entries.items():
with open('caches/freqbaseline-{}-{}.pkl'.format('overlap' if MUST_OVERLAP else 'nonoverlap', mode), 'wb') as f:
pkl.dump(entries, f)
| 9,552 | 36.758893 | 116 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/_visualize.py | """
Visualization script. I used this to create the figures in the paper.
WARNING: I haven't tested this in a while. It's possible that some later features I added break things here, but hopefully there should be easy fixes. I'm uploading this in the off chance it might help someone. If you get it to work, let me know (and also send a PR with bugs/etc)
"""
from dataloaders.visual_genome import VGDataLoader, VG
from lib.rel_model import RelModel
import numpy as np
import torch
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE
from lib.fpn.box_utils import bbox_overlaps
from collections import defaultdict
from PIL import Image, ImageDraw, ImageFont
import os
from functools import reduce
conf = ModelConfig()
train, val, test = VG.splits(num_val_im=conf.val_size)
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
optimistic_restore(detector, ckpt['state_dict'])
############################################ HELPER FUNCTIONS ###################################
def get_cmap(N):
import matplotlib.cm as cmx
import matplotlib.colors as colors
"""Returns a function that maps each index in 0, 1, ... N-1 to a distinct RGB color."""
color_norm = colors.Normalize(vmin=0, vmax=N - 1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
pad = 40
return np.round(np.array(scalar_map.to_rgba(index)) * (255 - pad) + pad)
return map_index_to_rgb_color
cmap = get_cmap(len(train.ind_to_classes) + 1)
def load_unscaled(fn):
""" Loads and scales images so that it's 1024 max-dimension"""
image_unpadded = Image.open(fn).convert('RGB')
im_scale = 1024.0 / max(image_unpadded.size)
image = image_unpadded.resize((int(im_scale * image_unpadded.size[0]), int(im_scale * image_unpadded.size[1])),
resample=Image.BICUBIC)
return image
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', 32)
def draw_box(draw, boxx, cls_ind, text_str):
box = tuple([float(b) for b in boxx])
if '-GT' in text_str:
color = (255, 128, 0, 255)
else:
color = (0, 128, 0, 255)
# color = tuple([int(x) for x in cmap(cls_ind)])
# draw the fucking box
draw.line([(box[0], box[1]), (box[2], box[1])], fill=color, width=8)
draw.line([(box[2], box[1]), (box[2], box[3])], fill=color, width=8)
draw.line([(box[2], box[3]), (box[0], box[3])], fill=color, width=8)
draw.line([(box[0], box[3]), (box[0], box[1])], fill=color, width=8)
# draw.rectangle(box, outline=color)
w, h = draw.textsize(text_str, font=font)
x1text = box[0]
y1text = max(box[1] - h, 0)
x2text = min(x1text + w, draw.im.size[0])
y2text = y1text + h
print("drawing {}x{} rectangle at {:.1f} {:.1f} {:.1f} {:.1f}".format(
h, w, x1text, y1text, x2text, y2text))
draw.rectangle((x1text, y1text, x2text, y2text), fill=color)
draw.text((x1text, y1text), text_str, fill='black', font=font)
return draw
def val_epoch():
detector.eval()
evaluator = BasicSceneGraphEvaluator.all_modes()
for val_b, batch in enumerate(tqdm(val_loader)):
val_batch(conf.num_gpus * val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
# if conf.num_gpus == 1:
# det_res = [det_res]
assert conf.num_gpus == 1
boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i = det_res
gt_entry = {
'gt_classes': val.gt_classes[batch_num].copy(),
'gt_relations': val.relationships[batch_num].copy(),
'gt_boxes': val.gt_boxes[batch_num].copy(),
}
# gt_entry = {'gt_classes': gtc[i], 'gt_relations': gtr[i], 'gt_boxes': gtb[i]}
assert np.all(objs_i[rels_i[:, 0]] > 0) and np.all(objs_i[rels_i[:, 1]] > 0)
# assert np.all(rels_i[:, 2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE / IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
}
pred_to_gt, pred_5ples, rel_scores = evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
# SET RECALL THRESHOLD HERE
pred_to_gt = pred_to_gt[:20]
pred_5ples = pred_5ples[:20]
# Get a list of objects that match, and GT objects that dont
objs_match = (bbox_overlaps(pred_entry['pred_boxes'], gt_entry['gt_boxes']) >= 0.5) & (
objs_i[:, None] == gt_entry['gt_classes'][None]
)
objs_matched = objs_match.any(1)
has_seen = defaultdict(int)
has_seen_gt = defaultdict(int)
pred_ind2name = {}
gt_ind2name = {}
edges = {}
missededges = {}
badedges = {}
if val.filenames[batch_num].startswith('2343676'):
import ipdb
ipdb.set_trace()
def query_pred(pred_ind):
if pred_ind not in pred_ind2name:
has_seen[objs_i[pred_ind]] += 1
pred_ind2name[pred_ind] = '{}-{}'.format(train.ind_to_classes[objs_i[pred_ind]],
has_seen[objs_i[pred_ind]])
return pred_ind2name[pred_ind]
def query_gt(gt_ind):
gt_cls = gt_entry['gt_classes'][gt_ind]
if gt_ind not in gt_ind2name:
has_seen_gt[gt_cls] += 1
gt_ind2name[gt_ind] = '{}-GT{}'.format(train.ind_to_classes[gt_cls], has_seen_gt[gt_cls])
return gt_ind2name[gt_ind]
matching_pred5ples = pred_5ples[np.array([len(x) > 0 for x in pred_to_gt])]
for fiveple in matching_pred5ples:
head_name = query_pred(fiveple[0])
tail_name = query_pred(fiveple[1])
edges[(head_name, tail_name)] = train.ind_to_predicates[fiveple[4]]
gt_5ples = np.column_stack((gt_entry['gt_relations'][:, :2],
gt_entry['gt_classes'][gt_entry['gt_relations'][:, 0]],
gt_entry['gt_classes'][gt_entry['gt_relations'][:, 1]],
gt_entry['gt_relations'][:, 2],
))
has_match = reduce(np.union1d, pred_to_gt)
for gt in gt_5ples[np.setdiff1d(np.arange(gt_5ples.shape[0]), has_match)]:
# Head and tail
namez = []
for i in range(2):
matching_obj = np.where(objs_match[:, gt[i]])[0]
if matching_obj.size > 0:
name = query_pred(matching_obj[0])
else:
name = query_gt(gt[i])
namez.append(name)
missededges[tuple(namez)] = train.ind_to_predicates[gt[4]]
for fiveple in pred_5ples[np.setdiff1d(np.arange(pred_5ples.shape[0]), matching_pred5ples)]:
if fiveple[0] in pred_ind2name:
if fiveple[1] in pred_ind2name:
badedges[(pred_ind2name[fiveple[0]], pred_ind2name[fiveple[1]])] = train.ind_to_predicates[fiveple[4]]
theimg = load_unscaled(val.filenames[batch_num])
theimg2 = theimg.copy()
draw2 = ImageDraw.Draw(theimg2)
# Fix the names
for pred_ind in pred_ind2name.keys():
draw2 = draw_box(draw2, pred_entry['pred_boxes'][pred_ind],
cls_ind=objs_i[pred_ind],
text_str=pred_ind2name[pred_ind])
for gt_ind in gt_ind2name.keys():
draw2 = draw_box(draw2, gt_entry['gt_boxes'][gt_ind],
cls_ind=gt_entry['gt_classes'][gt_ind],
text_str=gt_ind2name[gt_ind])
recall = int(100 * len(reduce(np.union1d, pred_to_gt)) / gt_entry['gt_relations'].shape[0])
id = '{}-{}'.format(val.filenames[batch_num].split('/')[-1][:-4], recall)
pathname = os.path.join('qualitative', id)
if not os.path.exists(pathname):
os.mkdir(pathname)
theimg.save(os.path.join(pathname, 'img.jpg'), quality=100, subsampling=0)
theimg2.save(os.path.join(pathname, 'imgbox.jpg'), quality=100, subsampling=0)
with open(os.path.join(pathname, 'shit.txt'), 'w') as f:
f.write('good:\n')
for (o1, o2), p in edges.items():
f.write('{} - {} - {}\n'.format(o1, p, o2))
f.write('fn:\n')
for (o1, o2), p in missededges.items():
f.write('{} - {} - {}\n'.format(o1, p, o2))
f.write('shit:\n')
for (o1, o2), p in badedges.items():
f.write('{} - {} - {}\n'.format(o1, p, o2))
mAp = val_epoch()
| 9,693 | 36.867188 | 280 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/train_rels.py | """
Training script for scene graph detection. Integrated with my faster rcnn setup
"""
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
from torch import optim
import torch
import pandas as pd
import time
import os
from config import ModelConfig, BOX_SCALE, IM_SCALE
from torch.nn import functional as F
from lib.pytorch_misc import optimistic_restore, de_chunkize, clip_grad_norm
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from lib.pytorch_misc import print_para
from torch.optim.lr_scheduler import ReduceLROnPlateau
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, _ = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
# Freeze the detector
for n, param in detector.detector.named_parameters():
param.requires_grad = False
print(print_para(detector), flush=True)
def get_optim(lr):
# Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
# stabilize the cores.
fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
# params = [p for n,p in detector.named_parameters() if p.requires_grad]
if conf.adam:
optimizer = optim.Adam(params, weight_decay=conf.l2, lr=lr, eps=1e-3)
else:
optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=3, factor=0.1,
verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
return optimizer, scheduler
ckpt = torch.load(conf.ckpt)
if conf.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
print("Loading EVERYTHING")
start_epoch = ckpt['epoch']
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
# optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict'])
else:
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.cuda()
def train_epoch(epoch_num):
detector.train()
tr = []
start = time.time()
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch, verbose=b % (conf.print_interval*10) == 0)) #b == 0))
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = pd.concat(tr[-conf.print_interval:], axis=1).mean(1)
time_per_batch = (time.time() - start) / conf.print_interval
print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format(
epoch_num, b, len(train_loader), time_per_batch, len(train_loader) * time_per_batch / 60))
print(mn)
print('-----------', flush=True)
start = time.time()
return pd.concat(tr, axis=1)
def train_batch(b, verbose=False):
"""
:param b: contains:
:param imgs: the image, [batch_size, 3, IM_SIZE, IM_SIZE]
:param all_anchors: [num_anchors, 4] the boxes of all anchors that we'll be using
:param all_anchor_inds: [num_anchors, 2] array of the indices into the concatenated
RPN feature vector that give us all_anchors,
each one (img_ind, fpn_idx)
:param im_sizes: a [batch_size, 4] numpy array of (h, w, scale, num_good_anchors) for each image.
:param num_anchors_per_img: int, number of anchors in total over the feature pyramid per img
Training parameters:
:param train_anchor_inds: a [num_train, 5] array of indices for the anchors that will
be used to compute the training loss (img_ind, fpn_idx)
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:return:
"""
result = detector[b]
losses = {}
losses['class_loss'] = F.cross_entropy(result.rm_obj_dists, result.rm_obj_labels)
losses['rel_loss'] = F.cross_entropy(result.rel_dists, result.rel_labels[:, -1])
loss = sum(losses.values())
optimizer.zero_grad()
loss.backward()
clip_grad_norm(
[(n, p) for n, p in detector.named_parameters() if p.grad is not None],
max_norm=conf.clip, verbose=verbose, clip=True)
losses['total'] = loss
optimizer.step()
res = pd.Series({x: y.data[0] for x, y in losses.items()})
return res
def val_epoch():
detector.eval()
evaluator = BasicSceneGraphEvaluator.all_modes()
for val_b, batch in enumerate(val_loader):
val_batch(conf.num_gpus * val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
return np.mean(evaluator[conf.mode].result_dict[conf.mode + '_recall'][100])
def val_batch(batch_num, b, evaluator):
det_res = detector[b]
if conf.num_gpus == 1:
det_res = [det_res]
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
}
assert np.all(objs_i[rels_i[:, 0]] > 0) and np.all(objs_i[rels_i[:, 1]] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i, # hack for now.
}
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
print("Training starts now!")
optimizer, scheduler = get_optim(conf.lr * conf.num_gpus * conf.batch_size)
for epoch in range(start_epoch + 1, start_epoch + 1 + conf.num_epochs):
rez = train_epoch(epoch)
print("overall{:2d}: ({:.3f})\n{}".format(epoch, rez.mean(1)['total'], rez.mean(1)), flush=True)
if conf.save_dir is not None:
torch.save({
'epoch': epoch,
'state_dict': detector.state_dict(), #{k:v for k,v in detector.state_dict().items() if not k.startswith('detector.')},
# 'optimizer': optimizer.state_dict(),
}, os.path.join(conf.save_dir, '{}-{}.tar'.format('vgrel', epoch)))
mAp = val_epoch()
scheduler.step(mAp)
if any([pg['lr'] <= (conf.lr * conf.num_gpus * conf.batch_size)/99.0 for pg in optimizer.param_groups]):
print("exiting training early", flush=True)
break
| 8,782 | 41.225962 | 130 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/visual_genome.py | """
File that involves dataloaders for the Visual Genome example_dataset.
"""
import json
import os
import h5py
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
from dataloaders.blob import Blob
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import VG_IMAGES, IM_DATA_FN, VG_SGG_FN, VG_SGG_DICT_FN, BOX_SCALE, IM_SCALE, PROPOSAL_FN
from dataloaders.image_transforms import SquarePad, Grayscale, Brightness, Sharpness, Contrast, \
RandomOrder, Hue, random_crop
from collections import defaultdict
from pycocotools.coco import COCO
class VG(Dataset):
def __init__(self, mode, roidb_file=VG_SGG_FN, dict_file=VG_SGG_DICT_FN,
image_file=IM_DATA_FN, filter_empty_rels=True, num_im=-1, num_val_im=5000,
filter_duplicate_rels=True, filter_non_overlap=True,
use_proposals=False):
"""
Torch example_dataset for VisualGenome
:param mode: Must be train, test, or val
:param roidb_file: HDF5 containing the GT boxes, classes, and relationships
:param dict_file: JSON Contains mapping of classes/relationships to words
:param image_file: HDF5 containing image filenames
:param filter_empty_rels: True if we filter out images without relationships between
boxes. One might want to set this to false if training a detector.
:param filter_duplicate_rels: Whenever we see a duplicate relationship we'll sample instead
:param num_im: Number of images in the entire example_dataset. -1 for all images.
:param num_val_im: Number of images in the validation set (must be less than num_im
unless num_im is -1.)
:param proposal_file: If None, we don't provide proposals. Otherwise file for where we get RPN
proposals
"""
if mode not in ('test', 'train', 'val'):
raise ValueError("Mode must be in test, train, or val. Supplied {}".format(mode))
self.mode = mode
# Initialize
self.roidb_file = roidb_file
self.dict_file = dict_file
self.image_file = image_file
self.filter_non_overlap = filter_non_overlap
self.filter_duplicate_rels = filter_duplicate_rels and self.mode == 'train'
self.split_mask, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(
self.roidb_file, self.mode, num_im, num_val_im=num_val_im,
filter_empty_rels=filter_empty_rels,
filter_non_overlap=self.filter_non_overlap and self.is_train,
)
self.filenames = load_image_filenames(image_file)
self.filenames = [self.filenames[i] for i in np.where(self.split_mask)[0]]
self.ind_to_classes, self.ind_to_predicates = load_info(dict_file)
if use_proposals:
print("Loading proposals", flush=True)
p_h5 = h5py.File(PROPOSAL_FN, 'r')
rpn_rois = p_h5['rpn_rois']
rpn_scores = p_h5['rpn_scores']
rpn_im_to_roi_idx = np.array(p_h5['im_to_roi_idx'][self.split_mask])
rpn_num_rois = np.array(p_h5['num_rois'][self.split_mask])
self.rpn_rois = []
for i in range(len(self.filenames)):
rpn_i = np.column_stack((
rpn_scores[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],
rpn_rois[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],
))
self.rpn_rois.append(rpn_i)
else:
self.rpn_rois = None
# You could add data augmentation here. But we didn't.
# tform = []
# if self.is_train:
# tform.append(RandomOrder([
# Grayscale(),
# Brightness(),
# Contrast(),
# Sharpness(),
# Hue(),
# ]))
tform = [
SquarePad(),
Resize(IM_SCALE),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
self.transform_pipeline = Compose(tform)
@property
def coco(self):
"""
:return: a Coco-like object that we can use to evaluate detection!
"""
anns = []
for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
for cls, box in zip(cls_array.tolist(), box_array.tolist()):
anns.append({
'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
'category_id': cls,
'id': len(anns),
'image_id': i,
'iscrowd': 0,
})
fauxcoco = COCO()
fauxcoco.dataset = {
'info': {'description': 'ayy lmao'},
'images': [{'id': i} for i in range(self.__len__())],
'categories': [{'supercategory': 'person',
'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
'annotations': anns,
}
fauxcoco.createIndex()
return fauxcoco
@property
def is_train(self):
return self.mode.startswith('train')
@classmethod
def splits(cls, *args, **kwargs):
""" Helper method to generate splits of the example_dataset"""
train = cls('train', *args, **kwargs)
val = cls('val', *args, **kwargs)
test = cls('test', *args, **kwargs)
return train, val, test
def __getitem__(self, index):
image_unpadded = Image.open(self.filenames[index]).convert('RGB')
# Optionally flip the image if we're doing training
flipped = self.is_train and np.random.random() > 0.5
gt_boxes = self.gt_boxes[index].copy()
# Boxes are already at BOX_SCALE
if self.is_train:
# crop boxes that are too large. This seems to be only a problem for image heights, but whatevs
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]].clip(
None, BOX_SCALE / max(image_unpadded.size) * image_unpadded.size[1])
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]].clip(
None, BOX_SCALE / max(image_unpadded.size) * image_unpadded.size[0])
# # crop the image for data augmentation
# image_unpadded, gt_boxes = random_crop(image_unpadded, gt_boxes, BOX_SCALE, round_boxes=True)
w, h = image_unpadded.size
box_scale_factor = BOX_SCALE / max(w, h)
if flipped:
scaled_w = int(box_scale_factor * float(w))
# print("Scaled w is {}".format(scaled_w))
image_unpadded = image_unpadded.transpose(Image.FLIP_LEFT_RIGHT)
gt_boxes[:, [0, 2]] = scaled_w - gt_boxes[:, [2, 0]]
img_scale_factor = IM_SCALE / max(w, h)
if h > w:
im_size = (IM_SCALE, int(w * img_scale_factor), img_scale_factor)
elif h < w:
im_size = (int(h * img_scale_factor), IM_SCALE, img_scale_factor)
else:
im_size = (IM_SCALE, IM_SCALE, img_scale_factor)
gt_rels = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.mode == 'train'
old_size = gt_rels.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in gt_rels:
all_rel_sets[(o0, o1)].append(r)
gt_rels = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
gt_rels = np.array(gt_rels)
entry = {
'img': self.transform_pipeline(image_unpadded),
'img_size': im_size,
'gt_boxes': gt_boxes,
'gt_classes': self.gt_classes[index].copy(),
'gt_relations': gt_rels,
'scale': IM_SCALE / BOX_SCALE, # Multiply the boxes by this.
'index': index,
'flipped': flipped,
'fn': self.filenames[index],
}
if self.rpn_rois is not None:
entry['proposals'] = self.rpn_rois[index]
assertion_checks(entry)
return entry
def __len__(self):
return len(self.filenames)
@property
def num_predicates(self):
return len(self.ind_to_predicates)
@property
def num_classes(self):
return len(self.ind_to_classes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# MISC. HELPER FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def assertion_checks(entry):
im_size = tuple(entry['img'].size())
if len(im_size) != 3:
raise ValueError("Img must be dim-3")
c, h, w = entry['img'].size()
if c != 3:
raise ValueError("Must have 3 color channels")
num_gt = entry['gt_boxes'].shape[0]
if entry['gt_classes'].shape[0] != num_gt:
raise ValueError("GT classes and GT boxes must have same number of examples")
assert (entry['gt_boxes'][:, 2] >= entry['gt_boxes'][:, 0]).all()
assert (entry['gt_boxes'] >= -1).all()
def load_image_filenames(image_file, image_dir=VG_IMAGES):
"""
Loads the image filenames from visual genome from the JSON file that contains them.
This matches the preprocessing in scene-graph-TF-release/data_tools/vg_to_imdb.py.
:param image_file: JSON file. Elements contain the param "image_id".
:param image_dir: directory where the VisualGenome images are located
:return: List of filenames corresponding to the good images
"""
with open(image_file, 'r') as f:
im_data = json.load(f)
corrupted_ims = ['1592.jpg', '1722.jpg', '4616.jpg', '4617.jpg']
fns = []
for i, img in enumerate(im_data):
basename = '{}.jpg'.format(img['image_id'])
if basename in corrupted_ims:
continue
filename = os.path.join(image_dir, basename)
if os.path.exists(filename):
fns.append(filename)
assert len(fns) == 108073
return fns
def load_graphs(graphs_file, mode='train', num_im=-1, num_val_im=0, filter_empty_rels=True,
filter_non_overlap=False):
"""
Load the file containing the GT boxes and relations, as well as the example_dataset split
:param graphs_file: HDF5
:param mode: (train, val, or test)
:param num_im: Number of images we want
:param num_val_im: Number of validation images
:param filter_empty_rels: (will be filtered otherwise.)
:param filter_non_overlap: If training, filter images that dont overlap.
:return: image_index: numpy array corresponding to the index of images we're using
boxes: List where each element is a [num_gt, 4] array of ground
truth boxes (x1, y1, x2, y2)
gt_classes: List where each element is a [num_gt] array of classes
relationships: List where each element is a [num_r, 3] array of
(box_ind_1, box_ind_2, predicate) relationships
"""
if mode not in ('train', 'val', 'test'):
raise ValueError('{} invalid'.format(mode))
roi_h5 = h5py.File(graphs_file, 'r')
data_split = roi_h5['split'][:]
split = 2 if mode == 'test' else 0
split_mask = data_split == split
# Filter out images without bounding boxes
split_mask &= roi_h5['img_to_first_box'][:] >= 0
if filter_empty_rels:
split_mask &= roi_h5['img_to_first_rel'][:] >= 0
image_index = np.where(split_mask)[0]
if num_im > -1:
image_index = image_index[:num_im]
if num_val_im > 0:
if mode == 'val':
image_index = image_index[:num_val_im]
elif mode == 'train':
image_index = image_index[num_val_im:]
split_mask = np.zeros_like(data_split).astype(bool)
split_mask[image_index] = True
# Get box information
all_labels = roi_h5['labels'][:, 0]
all_boxes = roi_h5['boxes_{}'.format(BOX_SCALE)][:] # will index later
assert np.all(all_boxes[:, :2] >= 0) # sanity check
assert np.all(all_boxes[:, 2:] > 0) # no empty box
# convert from xc, yc, w, h to x1, y1, x2, y2
all_boxes[:, :2] = all_boxes[:, :2] - all_boxes[:, 2:] / 2
all_boxes[:, 2:] = all_boxes[:, :2] + all_boxes[:, 2:]
im_to_first_box = roi_h5['img_to_first_box'][split_mask]
im_to_last_box = roi_h5['img_to_last_box'][split_mask]
im_to_first_rel = roi_h5['img_to_first_rel'][split_mask]
im_to_last_rel = roi_h5['img_to_last_rel'][split_mask]
# load relation labels
_relations = roi_h5['relationships'][:]
_relation_predicates = roi_h5['predicates'][:, 0]
assert (im_to_first_rel.shape[0] == im_to_last_rel.shape[0])
assert (_relations.shape[0] == _relation_predicates.shape[0]) # sanity check
# Get everything by image.
boxes = []
gt_classes = []
relationships = []
for i in range(len(image_index)):
boxes_i = all_boxes[im_to_first_box[i]:im_to_last_box[i] + 1, :]
gt_classes_i = all_labels[im_to_first_box[i]:im_to_last_box[i] + 1]
if im_to_first_rel[i] >= 0:
predicates = _relation_predicates[im_to_first_rel[i]:im_to_last_rel[i] + 1]
obj_idx = _relations[im_to_first_rel[i]:im_to_last_rel[i] + 1] - im_to_first_box[i]
assert np.all(obj_idx >= 0)
assert np.all(obj_idx < boxes_i.shape[0])
rels = np.column_stack((obj_idx, predicates))
else:
assert not filter_empty_rels
rels = np.zeros((0, 3), dtype=np.int32)
if filter_non_overlap:
assert mode == 'train'
inters = bbox_overlaps(boxes_i, boxes_i)
rel_overs = inters[rels[:, 0], rels[:, 1]]
inc = np.where(rel_overs > 0.0)[0]
if inc.size > 0:
rels = rels[inc]
else:
split_mask[image_index[i]] = 0
continue
boxes.append(boxes_i)
gt_classes.append(gt_classes_i)
relationships.append(rels)
return split_mask, boxes, gt_classes, relationships
def load_info(info_file):
"""
Loads the file containing the visual genome label meanings
:param info_file: JSON
:return: ind_to_classes: sorted list of classes
ind_to_predicates: sorted list of predicates
"""
info = json.load(open(info_file, 'r'))
info['label_to_idx']['__background__'] = 0
info['predicate_to_idx']['__background__'] = 0
class_to_ind = info['label_to_idx']
predicate_to_ind = info['predicate_to_idx']
ind_to_classes = sorted(class_to_ind, key=lambda k: class_to_ind[k])
ind_to_predicates = sorted(predicate_to_ind, key=lambda k: predicate_to_ind[k])
return ind_to_classes, ind_to_predicates
def vg_collate(data, num_gpus=3, is_train=False, mode='det'):
assert mode in ('det', 'rel')
blob = Blob(mode=mode, is_train=is_train, num_gpus=num_gpus,
batch_size_per_gpu=len(data) // num_gpus)
for d in data:
blob.append(d)
blob.reduce()
return blob
class VGDataLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, filtering out None,
but also loads everything as a (cuda) variable
"""
@classmethod
def splits(cls, train_data, val_data, batch_size=3, num_workers=1, num_gpus=3, mode='det',
**kwargs):
assert mode in ('det', 'rel')
train_load = cls(
dataset=train_data,
batch_size=batch_size * num_gpus,
shuffle=True,
num_workers=num_workers,
collate_fn=lambda x: vg_collate(x, mode=mode, num_gpus=num_gpus, is_train=True),
drop_last=True,
# pin_memory=True,
**kwargs,
)
val_load = cls(
dataset=val_data,
batch_size=batch_size * num_gpus if mode=='det' else num_gpus,
shuffle=False,
num_workers=num_workers,
collate_fn=lambda x: vg_collate(x, mode=mode, num_gpus=num_gpus, is_train=False),
drop_last=True,
# pin_memory=True,
**kwargs,
)
return train_load, val_load
| 16,373 | 37.527059 | 129 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/blob.py | """
Data blob, hopefully to make collating less painful and MGPU training possible
"""
from lib.fpn.anchor_targets import anchor_target_layer
import numpy as np
import torch
from torch.autograd import Variable
class Blob(object):
def __init__(self, mode='det', is_train=False, num_gpus=1, primary_gpu=0, batch_size_per_gpu=3):
"""
Initializes an empty Blob object.
:param mode: 'det' for detection and 'rel' for det+relationship
:param is_train: True if it's training
"""
assert mode in ('det', 'rel')
assert num_gpus >= 1
self.mode = mode
self.is_train = is_train
self.num_gpus = num_gpus
self.batch_size_per_gpu = batch_size_per_gpu
self.primary_gpu = primary_gpu
self.imgs = [] # [num_images, 3, IM_SCALE, IM_SCALE] array
self.im_sizes = [] # [num_images, 4] array of (h, w, scale, num_valid_anchors)
self.all_anchor_inds = [] # [all_anchors, 2] array of (img_ind, anchor_idx). Only has valid
# boxes (meaning some are gonna get cut out)
self.all_anchors = [] # [num_im, IM_SCALE/4, IM_SCALE/4, num_anchors, 4] shapes. Anchors outside get squashed
# to 0
self.gt_boxes = [] # [num_gt, 4] boxes
self.gt_classes = [] # [num_gt,2] array of img_ind, class
self.gt_rels = [] # [num_rels, 3]. Each row is (gtbox0, gtbox1, rel).
self.gt_sents = []
self.gt_nodes = []
self.sent_lengths = []
self.train_anchor_labels = [] # [train_anchors, 5] array of (img_ind, h, w, A, labels)
self.train_anchors = [] # [train_anchors, 8] shapes with anchor, target
self.train_anchor_inds = None # This will be split into GPUs, just (img_ind, h, w, A).
self.batch_size = None
self.gt_box_chunks = None
self.anchor_chunks = None
self.train_chunks = None
self.proposal_chunks = None
self.proposals = []
@property
def is_flickr(self):
return self.mode == 'flickr'
@property
def is_rel(self):
return self.mode == 'rel'
@property
def volatile(self):
return not self.is_train
def append(self, d):
"""
Adds a single image to the blob
:param datom:
:return:
"""
i = len(self.imgs)
self.imgs.append(d['img'])
h, w, scale = d['img_size']
# all anchors
self.im_sizes.append((h, w, scale))
gt_boxes_ = d['gt_boxes'].astype(np.float32) * d['scale']
self.gt_boxes.append(gt_boxes_)
self.gt_classes.append(np.column_stack((
i * np.ones(d['gt_classes'].shape[0], dtype=np.int64),
d['gt_classes'],
)))
# Add relationship info
if self.is_rel:
self.gt_rels.append(np.column_stack((
i * np.ones(d['gt_relations'].shape[0], dtype=np.int64),
d['gt_relations'])))
# Augment with anchor targets
if self.is_train:
train_anchors_, train_anchor_inds_, train_anchor_targets_, train_anchor_labels_ = \
anchor_target_layer(gt_boxes_, (h, w))
self.train_anchors.append(np.hstack((train_anchors_, train_anchor_targets_)))
self.train_anchor_labels.append(np.column_stack((
i * np.ones(train_anchor_inds_.shape[0], dtype=np.int64),
train_anchor_inds_,
train_anchor_labels_,
)))
if 'proposals' in d:
self.proposals.append(np.column_stack((i * np.ones(d['proposals'].shape[0], dtype=np.float32),
d['scale'] * d['proposals'].astype(np.float32))))
def _chunkize(self, datom, tensor=torch.LongTensor):
"""
Turn data list into chunks, one per GPU
:param datom: List of lists of numpy arrays that will be concatenated.
:return:
"""
chunk_sizes = [0] * self.num_gpus
for i in range(self.num_gpus):
for j in range(self.batch_size_per_gpu):
chunk_sizes[i] += datom[i * self.batch_size_per_gpu + j].shape[0]
return Variable(tensor(np.concatenate(datom, 0)), volatile=self.volatile), chunk_sizes
def reduce(self):
""" Merges all the detections into flat lists + numbers of how many are in each"""
if len(self.imgs) != self.batch_size_per_gpu * self.num_gpus:
raise ValueError("Wrong batch size? imgs len {} bsize/gpu {} numgpus {}".format(
len(self.imgs), self.batch_size_per_gpu, self.num_gpus
))
self.imgs = Variable(torch.stack(self.imgs, 0), volatile=self.volatile)
self.im_sizes = np.stack(self.im_sizes).reshape(
(self.num_gpus, self.batch_size_per_gpu, 3))
if self.is_rel:
self.gt_rels, self.gt_rel_chunks = self._chunkize(self.gt_rels)
self.gt_boxes, self.gt_box_chunks = self._chunkize(self.gt_boxes, tensor=torch.FloatTensor)
self.gt_classes, _ = self._chunkize(self.gt_classes)
if self.is_train:
self.train_anchor_labels, self.train_chunks = self._chunkize(self.train_anchor_labels)
self.train_anchors, _ = self._chunkize(self.train_anchors, tensor=torch.FloatTensor)
self.train_anchor_inds = self.train_anchor_labels[:, :-1].contiguous()
if len(self.proposals) != 0:
self.proposals, self.proposal_chunks = self._chunkize(self.proposals, tensor=torch.FloatTensor)
def _scatter(self, x, chunk_sizes, dim=0):
""" Helper function"""
if self.num_gpus == 1:
return x.cuda(self.primary_gpu, async=True)
return torch.nn.parallel.scatter_gather.Scatter.apply(
list(range(self.num_gpus)), chunk_sizes, dim, x)
def scatter(self):
""" Assigns everything to the GPUs"""
self.imgs = self._scatter(self.imgs, [self.batch_size_per_gpu] * self.num_gpus)
self.gt_classes_primary = self.gt_classes.cuda(self.primary_gpu, async=True)
self.gt_boxes_primary = self.gt_boxes.cuda(self.primary_gpu, async=True)
# Predcls might need these
self.gt_classes = self._scatter(self.gt_classes, self.gt_box_chunks)
self.gt_boxes = self._scatter(self.gt_boxes, self.gt_box_chunks)
if self.is_train:
self.train_anchor_inds = self._scatter(self.train_anchor_inds,
self.train_chunks)
self.train_anchor_labels = self.train_anchor_labels.cuda(self.primary_gpu, async=True)
self.train_anchors = self.train_anchors.cuda(self.primary_gpu, async=True)
if self.is_rel:
self.gt_rels = self._scatter(self.gt_rels, self.gt_rel_chunks)
else:
if self.is_rel:
self.gt_rels = self.gt_rels.cuda(self.primary_gpu, async=True)
if self.proposal_chunks is not None:
self.proposals = self._scatter(self.proposals, self.proposal_chunks)
def __getitem__(self, index):
"""
Returns a tuple containing data
:param index: Which GPU we're on, or 0 if no GPUs
:return: If training:
(image, im_size, img_start_ind, anchor_inds, anchors, gt_boxes, gt_classes,
train_anchor_inds)
test:
(image, im_size, img_start_ind, anchor_inds, anchors)
"""
if index not in list(range(self.num_gpus)):
raise ValueError("Out of bounds with index {} and {} gpus".format(index, self.num_gpus))
if self.is_rel:
rels = self.gt_rels
if index > 0 or self.num_gpus != 1:
rels_i = rels[index] if self.is_rel else None
elif self.is_flickr:
rels = (self.gt_sents, self.gt_nodes)
if index > 0 or self.num_gpus != 1:
rels_i = (self.gt_sents[index], self.gt_nodes[index])
else:
rels = None
rels_i = None
if self.proposal_chunks is None:
proposals = None
else:
proposals = self.proposals
if index == 0 and self.num_gpus == 1:
image_offset = 0
if self.is_train:
return (self.imgs, self.im_sizes[0], image_offset,
self.gt_boxes, self.gt_classes, rels, proposals, self.train_anchor_inds)
return self.imgs, self.im_sizes[0], image_offset, self.gt_boxes, self.gt_classes, rels, proposals
# Otherwise proposals is None
assert proposals is None
image_offset = self.batch_size_per_gpu * index
# TODO: Return a namedtuple
if self.is_train:
return (
self.imgs[index], self.im_sizes[index], image_offset,
self.gt_boxes[index], self.gt_classes[index], rels_i, None, self.train_anchor_inds[index])
return (self.imgs[index], self.im_sizes[index], image_offset,
self.gt_boxes[index], self.gt_classes[index], rels_i, None)
| 9,073 | 38.281385 | 118 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.