repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
risk-slim | risk-slim-master/riskslim/__init__.py | from .coefficient_set import CoefficientSet
from .lattice_cpa import run_lattice_cpa, setup_lattice_cpa, finish_lattice_cpa
from .utils import load_data_from_csv, print_model | 174 | 57.333333 | 79 | py |
risk-slim | risk-slim-master/riskslim/mip.py | from math import ceil, floor
import numpy as np
from cplex import Cplex, SparsePair, infinity as CPX_INFINITY
from .coefficient_set import CoefficientSet
from .utils import print_log
#todo: add loss cut
#todo: add constraint function
#todo: default cplex parameters
#todo: check cores
#todo: pass compute_loss to convert_risk_slim_cplex_solution
def create_risk_slim(coef_set, input):
"""
create RiskSLIM MIP object
Parameters
----------
input - dictionary of RiskSLIM parameters and formulation
Returns
-------
mip - RiskSLIM surrogate MIP without 0 cuts
Issues
----
no support for non-integer Lset "values"
only drops intercept index for variable_names that match '(Intercept)'
"""
assert isinstance(coef_set, CoefficientSet)
assert isinstance(input, dict)
# setup printing and loading
function_print_flag = input.get('print_flag', False)
print_from_function = lambda msg: print_log(msg) if function_print_flag else lambda msg: None
# set default parameters
input.setdefault('w_pos', 1.0)
input.setdefault('w_neg', 2.0 - input['w_pos'])
input.setdefault('C_0', 0.01)
input.setdefault('include_auxillary_variable_for_objval', True)
input.setdefault('include_auxillary_variable_for_L0_norm', True)
input.setdefault('loss_min', 0.00)
input.setdefault('loss_max', float(CPX_INFINITY))
input.setdefault('L0_min', 0)
input.setdefault('L0_max', len(coef_set))
input.setdefault('objval_min', 0.00)
input.setdefault('objval_max', float(CPX_INFINITY))
input.setdefault('relax_integer_variables', False)
input.setdefault('drop_variables', True)
input.setdefault('tight_formulation', False)
input.setdefault('set_cplex_cutoffs', True)
# variables
P = len(coef_set)
w_pos, w_neg = input['w_pos'], input['w_neg']
C_0j = np.copy(coef_set.c0)
L0_reg_ind = np.isnan(C_0j)
C_0j[L0_reg_ind] = input['C_0']
C_0j = C_0j.tolist()
C_0_rho = np.copy(C_0j)
trivial_L0_min = 0
trivial_L0_max = np.sum(L0_reg_ind)
rho_ub = list(coef_set.ub)
rho_lb = list(coef_set.lb)
rho_type = ''.join(list(coef_set.vtype))
# calculate min/max values for loss
loss_min = max(0.0, float(input['loss_min']))
loss_max = min(CPX_INFINITY, float(input['loss_max']))
# calculate min/max values for model size
L0_min = max(input['L0_min'], 0.0)
L0_max = min(input['L0_max'], trivial_L0_max)
L0_min = ceil(L0_min)
L0_max = floor(L0_max)
assert L0_min <= L0_max
# calculate min/max values for objval
objval_min = max(input['objval_min'], 0.0)
objval_max = min(input['objval_max'], CPX_INFINITY)
assert objval_min <= objval_max
# include constraint on min/max model size?
nontrivial_L0_min = L0_min > trivial_L0_min
nontrivial_L0_max = L0_max < trivial_L0_max
include_auxillary_variable_for_L0_norm = input['include_auxillary_variable_for_L0_norm'] or \
nontrivial_L0_min or \
nontrivial_L0_max
# include constraint on min/max objective value?
nontrivial_objval_min = objval_min > 0.0
nontrivial_objval_max = objval_max < CPX_INFINITY
include_auxillary_variable_for_objval = input['include_auxillary_variable_for_objval'] or \
nontrivial_objval_min or \
nontrivial_objval_max
has_intercept = '(Intercept)' in coef_set.variable_names
"""
RiskSLIM MIP Formulation
minimize w_pos*loss_pos + w_neg *loss_minus + 0*rho_j + C_0j*alpha_j
such that
L0_min ≤ L0 ≤ L0_max
-rho_min * alpha_j < lambda_j < rho_max * alpha_j
L_0 in 0 to P
rho_j in [rho_min_j, rho_max_j]
alpha_j in {0,1}
x = [loss_pos, loss_neg, rho_j, alpha_j]
optional constraints:
objval = w_pos * loss_pos + w_neg * loss_min + sum(C_0j * alpha_j) (required for callback)
L0_norm = sum(alpha_j) (required for callback)
Changes for Tight Formulation (included when input['tight_formulation'] = True):
sigma_j in {0,1} for j s.t. lambda_j has free sign and alpha_j exists
lambda_j ≥ delta_pos_j if alpha_j = 1 and sigma_j = 1
lambda_j ≥ -delta_neg_j if alpha_j = 1 and sigma_j = 0
lambda_j ≥ alpha_j for j such that lambda_j >= 0
lambda_j ≤ -alpha_j for j such that lambda_j <= 0
"""
# create MIP object
mip = Cplex()
vars = mip.variables
cons = mip.linear_constraints
# set sense
mip.objective.set_sense(mip.objective.sense.minimize)
# add main variables
loss_obj = [w_pos]
loss_ub = [loss_max]
loss_lb = [loss_min]
loss_type = 'C'
loss_names = ['loss']
obj = loss_obj + [0.0] * P + C_0j
ub = loss_ub + rho_ub + [1.0] * P
lb = loss_lb + rho_lb + [0.0] * P
ctype = loss_type + rho_type + 'B' * P
rho_names = ['rho_%d' % j for j in range(P)]
alpha_names = ['alpha_%d' % j for j in range(P)]
varnames = loss_names + rho_names + alpha_names
if include_auxillary_variable_for_objval:
objval_auxillary_name = ['objval']
objval_auxillary_ub = [objval_max]
objval_auxillary_lb = [objval_min]
objval_type = 'C'
print_from_function("adding auxiliary variable for objval s.t. %1.4f <= objval <= %1.4f" % (objval_min, objval_max))
obj += [0.0]
ub += objval_auxillary_ub
lb += objval_auxillary_lb
varnames += objval_auxillary_name
ctype += objval_type
if include_auxillary_variable_for_L0_norm:
L0_norm_auxillary_name = ['L0_norm']
L0_norm_auxillary_ub = [L0_max]
L0_norm_auxillary_lb = [L0_min]
L0_norm_type = 'I'
print_from_function("adding auxiliary variable for L0_norm s.t. %d <= L0_norm <= %d" % (L0_min, L0_max))
obj += [0.0]
ub += L0_norm_auxillary_ub
lb += L0_norm_auxillary_lb
varnames += L0_norm_auxillary_name
ctype += L0_norm_type
if input['relax_integer_variables']:
ctype = ctype.replace('I', 'C')
ctype = ctype.replace('B', 'C')
vars.add(obj = obj, lb = lb, ub = ub, types = ctype, names = varnames)
# 0-Norm LB Constraints:
# lambda_j,lb * alpha_j ≤ lambda_j <= Inf
# 0 ≤ lambda_j - lambda_j,lb * alpha_j < Inf
for j in range(P):
cons.add(names = ["L0_norm_lb_" + str(j)],
lin_expr = [SparsePair(ind=[rho_names[j], alpha_names[j]], val=[1.0, -rho_lb[j]])],
senses = "G",
rhs = [0.0])
# 0-Norm UB Constraints:
# lambda_j ≤ lambda_j,ub * alpha_j
# 0 <= -lambda_j + lambda_j,ub * alpha_j
for j in range(P):
cons.add(names = ["L0_norm_ub_" + str(j)],
lin_expr =[SparsePair(ind=[rho_names[j], alpha_names[j]], val=[-1.0, rho_ub[j]])],
senses = "G",
rhs = [0.0])
# objval_max constraint
# loss_var + sum(C_0j .* alpha_j) <= objval_max
if include_auxillary_variable_for_objval:
print_from_function("adding constraint so that objective value <= " + str(objval_max))
cons.add(names = ["objval_def"],
lin_expr = [SparsePair(ind = objval_auxillary_name + loss_names + alpha_names, val=[-1.0] + loss_obj + C_0j)],
senses = "E",
rhs = [0.0])
# Auxiliary L0_norm variable definition:
# L0_norm = sum(alpha_j)
# L0_norm - sum(alpha_j) = 0
if include_auxillary_variable_for_L0_norm:
cons.add(names = ["L0_norm_def"],
lin_expr = [SparsePair(ind = L0_norm_auxillary_name + alpha_names, val = [1.0] + [-1.0] * P)],
senses = "E",
rhs = [0.0])
# drop L0_norm_lb constraint for any variable with rho_lb >= 0
dropped_variables = []
constraints_to_drop = []
# drop alpha / L0_norm_ub / L0_norm_lb for ('Intercept')
if input['drop_variables']:
# drop L0_norm_ub/lb constraint for any variable with rho_ub/rho_lb >= 0
sign_pos_ind = np.flatnonzero(coef_set.sign > 0)
sign_neg_ind = np.flatnonzero(coef_set.sign < 0)
constraints_to_drop.extend(["L0_norm_lb_" + str(j) for j in sign_pos_ind])
constraints_to_drop.extend(["L0_norm_ub_" + str(j) for j in sign_neg_ind])
# drop alpha for any variable where rho_ub = rho_lb = 0
fixed_value_ind = np.flatnonzero(coef_set.ub == coef_set.lb)
variables_to_drop = ["alpha_" + str(j) for j in fixed_value_ind]
vars.delete(variables_to_drop)
dropped_variables += variables_to_drop
alpha_names = [alpha_names[j] for j in range(P) if alpha_names[j] not in dropped_variables]
if has_intercept:
intercept_idx = coef_set.variable_names.index('(Intercept)')
intercept_alpha_name = 'alpha_' + str(intercept_idx)
vars.delete([intercept_alpha_name])
alpha_names.remove(intercept_alpha_name)
dropped_variables.append(intercept_alpha_name)
print_from_function("dropped L0 indicator for '(Intercept)'")
constraints_to_drop.extend(["L0_norm_ub_" + str(intercept_idx), "L0_norm_lb_" + str(intercept_idx)])
if len(constraints_to_drop) > 0:
constraints_to_drop = list(set(constraints_to_drop))
cons.delete(constraints_to_drop)
# indices
indices = {
'n_variables': vars.get_num(),
'n_constraints': cons.get_num(),
'names': vars.get_names(),
'loss_names': loss_names,
'rho_names': rho_names,
'alpha_names': alpha_names,
'loss': vars.get_indices(loss_names),
'rho': vars.get_indices(rho_names),
'alpha': vars.get_indices(alpha_names),
'L0_reg_ind': L0_reg_ind,
'C_0_rho': C_0_rho,
'C_0_alpha': mip.objective.get_linear(alpha_names) if len(alpha_names) > 0 else [],
}
if include_auxillary_variable_for_objval:
indices.update({
'objval_name': objval_auxillary_name,
'objval': vars.get_indices(objval_auxillary_name)[0],
})
if include_auxillary_variable_for_L0_norm:
indices.update({
'L0_norm_name': L0_norm_auxillary_name,
'L0_norm': vars.get_indices(L0_norm_auxillary_name)[0],
})
# officially change the problem to LP if variables are relaxed
if input['relax_integer_variables']:
old_problem_type = mip.problem_type[mip.get_problem_type()]
mip.set_problem_type(mip.problem_type.LP)
new_problem_type = mip.problem_type[mip.get_problem_type()]
print_from_function("changed problem type from %s to %s" % (old_problem_type, new_problem_type))
if input['set_cplex_cutoffs'] and not input['relax_integer_variables']:
mip.parameters.mip.tolerances.lowercutoff.set(objval_min)
mip.parameters.mip.tolerances.uppercutoff.set(objval_max)
return mip, indices
def set_cplex_mip_parameters(cpx, param, display_cplex_progress = False):
"""
Helper function to set CPLEX parameters of CPLEX MIP object
Parameters
----------
mip
param
display_cplex_progress
Returns
-------
MIP with parameters
"""
p = cpx.parameters
p.randomseed.set(param['randomseed'])
p.threads.set(param['n_cores'])
p.output.clonelog.set(0)
p.parallel.set(1)
if display_cplex_progress is (None or False):
cpx = set_cpx_display_options(cpx, display_mip = False, display_lp = False, display_parameters = False)
problem_type = cpx.problem_type[cpx.get_problem_type()]
if problem_type == 'MIP':
# CPLEX Memory Parameters
# MIP.Param.workdir.Cur = exp_workdir;
# MIP.Param.workmem.Cur = cplex_workingmem;
# MIP.Param.mip.strategy.file.Cur = 2; %nodefile uncompressed
# MIP.Param.mip.limits.treememory.Cur = cplex_nodefilesize;
# CPLEX MIP Parameters
p.emphasis.mip.set(param['mipemphasis'])
p.mip.tolerances.mipgap.set(param['mipgap'])
p.mip.tolerances.absmipgap.set(param['absmipgap'])
p.mip.tolerances.integrality.set(param['integrality_tolerance'])
# CPLEX Solution Pool Parameters
p.mip.limits.repairtries.set(param['repairtries'])
p.mip.pool.capacity.set(param['poolsize'])
p.mip.pool.replace.set(param['poolreplace'])
# 0 = replace oldest /1: replace worst objective / #2 = replace least diverse solutions
return cpx
def set_cpx_display_options(cpx, display_mip = True, display_parameters = False, display_lp = False):
cpx.parameters.mip.display.set(display_mip)
cpx.parameters.simplex.display.set(display_lp)
try:
cpx.parameters.paramdisplay.set(display_parameters)
except AttributeError:
pass
if not (display_mip or display_lp):
cpx.set_results_stream(None)
cpx.set_log_stream(None)
cpx.set_error_stream(None)
cpx.set_warning_stream(None)
return cpx
def add_mip_starts(mip, indices, pool, max_mip_starts = float('inf'), mip_start_effort_level = 4):
"""
Parameters
----------
mip - RiskSLIM surrogate MIP
indices - indices of RiskSLIM surrogate MIP
pool - solution pool
max_mip_starts - max number of mip starts to add (optional; default is add all)
mip_start_effort_level - effort that CPLEX will spend trying to fix (optional; default is 4)
Returns
-------
"""
# todo remove suboptimal using pool filter
assert isinstance(mip, Cplex)
try:
obj_cutoff = mip.parameters.mip.tolerances.uppercutoff.get()
except:
obj_cutoff = float('inf')
pool = pool.distinct().sort()
n_added = 0
for objval, rho in zip(pool.objvals, pool.solutions):
if np.less_equal(objval, obj_cutoff):
mip_start_name = "mip_start_" + str(n_added)
mip_start_obj, _ = convert_to_risk_slim_cplex_solution(rho = rho, indices = indices, objval = objval)
mip_start_obj = cast_mip_start(mip_start_obj, mip)
mip.MIP_starts.add(mip_start_obj, mip_start_effort_level, mip_start_name)
n_added += 1
if n_added >= max_mip_starts:
break
return mip
def cast_mip_start(mip_start, cpx):
"""
casts the solution values and indices in a Cplex SparsePair
Parameters
----------
mip_start cplex SparsePair
cpx Cplex
Returns
-------
Cplex SparsePair where the indices are integers and the values for each variable match the variable type specified in CPLEX Object
"""
assert isinstance(cpx, Cplex)
assert isinstance(mip_start, SparsePair)
vals = list(mip_start.val)
idx = np.array(list(mip_start.ind), dtype = int).tolist()
types = cpx.variables.get_types(idx)
for j, t in enumerate(types):
if t in ['B', 'I']:
vals[j] = int(vals[j])
elif t in ['C']:
vals[j] = float(vals[j])
return SparsePair(ind = idx, val = vals)
def convert_to_risk_slim_cplex_solution(rho, indices, loss = None, objval = None):
"""
Convert coefficient vector 'rho' into a solution for RiskSLIM CPLEX MIP
Parameters
----------
rho
indices
loss
objval
Returns
-------
"""
global compute_loss
n_variables = indices['n_variables']
solution_idx = np.arange(n_variables)
solution_val = np.zeros(n_variables)
# rho
solution_val[indices['rho']] = rho
# alpha
alpha = np.zeros(len(indices['alpha']))
alpha[np.flatnonzero(rho[indices['L0_reg_ind']])] = 1.0
solution_val[indices['alpha']] = alpha
L0_penalty = np.sum(indices['C_0_alpha'] * alpha)
# add loss / objval
need_loss = 'loss' in indices
need_objective_val = 'objval' in indices
need_L0_norm = 'L0_norm' in indices
need_sigma = 'sigma_names' in indices
# check that we have the right length
# COMMENT THIS OUT FOR DEPLOYMENT
# if need_sigma:
# pass
# else:
# assert (indices['n_variables'] == (len(rho) + len(alpha) + need_loss + need_objective_val + need_L0_norm))
if need_loss:
if loss is None:
if objval is None:
loss = compute_loss(rho)
else:
loss = objval - L0_penalty
solution_val[indices['loss']] = loss
if need_objective_val:
if objval is None:
if loss is None:
objval = compute_loss(rho) + L0_penalty
else:
objval = loss + L0_penalty
solution_val[indices['objval']] = objval
if need_L0_norm:
solution_val[indices['L0_norm']] = np.sum(alpha)
if need_sigma:
rho_for_sigma = np.array([indices['rho'][int(s.strip('sigma_'))] for s in indices['sigma_names']])
solution_val[indices['sigma']] = np.abs(solution_val[rho_for_sigma])
solution_cpx = SparsePair(ind = solution_idx, val = solution_val.tolist())
return solution_cpx, objval
| 17,079 | 32.754941 | 134 | py |
risk-slim | risk-slim-master/riskslim/tests/test_risk_slim.py | import os
import pprint
import numpy as np
import riskslim
# Dataset Strategy
#
# variables: binary, real,
# N+: 0, 1, >1
# N-: 0, 1, >1
# Testing Strategy
#
# loss_computation normal, fast, lookup
# max_coefficient 0, 1, >1
# max_L0_value 0, 1, >1
# max_offset 0, 1, Inf
# c0_value eps, 1e-8, 0.01, C0_max
# sample_weights no, yes
# w_pos 1.00, < 1.00, > 1.00
# initialization on, off
# chained_updates on, off
# polishing on, off
# seq_rd on, off
# data
data_name = "breastcancer" # name of the data
data_dir = os.getcwd() + '/examples/data/' # directory where datasets are stored
data_csv_file = data_dir + data_name + '_data.csv' # csv file for the dataset
sample_weights_csv_file = None # csv file of sample weights for the dataset (optional)
default_settings = {
#
'c0_value': 1e-6,
'w_pos': 1.00,
#
# LCPA Settings
'max_runtime': 300.0, # max runtime for LCPA
'max_tolerance': np.finfo('float').eps, # tolerance to stop LCPA (set to 0 to return provably optimal solution)
'display_cplex_progress': True, # set to True to print CPLEX progress
'loss_computation': 'normal', # how to compute the loss function ('normal','fast','lookup')
'tight_formulation': True, # use a slightly formulation of surrogate MIP that provides a slightly improved formulation
#
# Other LCPA Heuristics
'chained_updates_flag': True, # use chained updates
'add_cuts_at_heuristic_solutions': True, # add cuts at integer feasible solutions found using polishing/rounding
#
# LCPA Rounding Heuristic
'round_flag': True, # round continuous solutions with SeqRd
'polish_rounded_solutions': True, # polish solutions rounded with SeqRd using DCD
'rounding_tolerance': float('inf'), # only solutions with objective value < (1 + tol) are rounded
'rounding_start_cuts': 0, # cuts needed to start using rounding heuristic
'rounding_start_gap': float('inf'), # optimality gap needed to start using rounding heuristic
'rounding_stop_cuts': 20000, # cuts needed to stop using rounding heuristic
'rounding_stop_gap': 0.2, # optimality gap needed to stop using rounding heuristic
#
# LCPA Polishing Heuristic
'polish_flag': True, # polish integer feasible solutions with DCD
'polishing_tolerance': 0.1, # only solutions with objective value (1 + tol) are polished.
'polishing_max_runtime': 10.0, # max time to run polishing each time
'polishing_max_solutions': 5.0, # max # of solutions to polish each time
'polishing_start_cuts': 0, # cuts needed to start using polishing heuristic
'polishing_start_gap': float('inf'), # min optimality gap needed to start using polishing heuristic
'polishing_stop_cuts': float('inf'), # cuts needed to stop using polishing heuristic
'polishing_stop_gap': 5.0, # max optimality gap required to stop using polishing heuristic
#
# Initialization Procedure
'initialization_flag': False, # use initialization procedure
'init_display_progress': True, # show progress of initialization procedure
'init_display_cplex_progress': False, # show progress of CPLEX during intialization procedure
#
'init_max_runtime': 300.0, # max time to run CPA in initialization procedure
'init_max_iterations': 10000, # max # of cuts needed to stop CPA
'init_max_tolerance': 0.0001, # tolerance of solution to stop CPA
'init_max_runtime_per_iteration': 300.0, # max time per iteration of CPA
'init_max_cplex_time_per_iteration': 10.0, # max time per iteration to solve surrogate problem in CPA
#
'init_use_sequential_rounding': True, # use SeqRd in initialization procedure
'init_sequential_rounding_max_runtime': 30.0, # max runtime for SeqRd in initialization procedure
'init_sequential_rounding_max_solutions': 5, # max solutions to round using SeqRd
'init_polishing_after': True, # polish after rounding
'init_polishing_max_runtime': 30.0, # max runtime for polishing
'init_polishing_max_solutions': 5, # max solutions to polish
#
# CPLEX Solver Parameters
'cplex_randomseed': 0, # random seed
'cplex_mipemphasis': 0, # cplex MIP strategy
}
def test_risk_slim(data_csv_file, sample_weights_csv_file = None, max_coefficient = 5, max_L0_value = 5, max_offset = 50, c0_value = 1e-6, w_pos = 1.00, settings = None):
# load dataset
data = riskslim.load_data_from_csv(dataset_csv_file = data_csv_file, sample_weights_csv_file = sample_weights_csv_file)
N, P = data['X'].shape
# offset value
coef_set = riskslim.CoefficientSet(variable_names=data['variable_names'], lb=-max_coefficient, ub=max_coefficient, sign=0)
coef_set.update_intercept_bounds(X = data['X'], y = data['Y'], max_offset = max_offset, max_L0_value = max_L0_value)
# create constraint dictionary
trivial_L0_max = P - np.sum(coef_set.C_0j == 0)
max_L0_value = min(max_L0_value, trivial_L0_max)
constraints = {
'L0_min': 0,
'L0_max': max_L0_value,
'coef_set':coef_set,
}
# Train model using lattice_cpa
model_info, mip_info, lcpa_info = riskslim.run_lattice_cpa(data, constraints, settings)
#model info contains key results
pprint.pprint(model_info)
# lcpa_output contains detailed information about LCPA
pprint.pprint(lcpa_info)
return True
test_risk_slim(data_csv_file = data_csv_file, max_coefficient = 5, max_L0_value = 5, max_offset = 50, settings = default_settings)
test_risk_slim(data_csv_file = data_csv_file, max_coefficient = 5, max_L0_value = 1, max_offset = 50, settings = default_settings)
test_risk_slim(data_csv_file = data_csv_file, max_coefficient = 5, max_L0_value = 0, max_offset = 50, settings = default_settings)
test_risk_slim(data_csv_file = data_csv_file, max_coefficient = 5, max_L0_value = 0, max_offset = 0, settings = default_settings)
| 6,690 | 50.076336 | 170 | py |
risk-slim | risk-slim-master/riskslim/tests/test_loss_functions.py | #noinspection
import numpy as np
import riskslim.loss_functions.fast_log_loss as fast
import riskslim.loss_functions.log_loss as normal
import riskslim.loss_functions.log_loss_weighted as weighted
import riskslim.loss_functions.lookup_log_loss as lookup
from riskslim.setup_functions import _setup_training_weights
np.random.seed(seed = 0)
#initialize data matrix X and label vector Y
n_rows = 1000000
n_cols = 20
rho_ub = 100
rho_lb = -100
#helper function s
def generate_binary_data(n_rows = 1000000, n_cols = 20):
X = np.random.randint(low=0, high=2, size=(n_rows, n_cols))
Y = np.random.randint(low=0, high=2, size=(n_rows, 1))
pos_ind = Y == 1
Y[~pos_ind] = -1
return X, Y
def generate_integer_model(n_cols = 20, rho_ub = 100, rho_lb = -100, sparse_pct = 0.5):
rho = np.random.randint(low=rho_lb, high=rho_ub, size=n_cols)
rho = np.require(rho, dtype=Z.dtype, requirements=['F'])
nnz_count = int(sparse_pct * np.floor(n_cols / 2))
set_to_zero = np.random.choice(range(0, n_cols), size=nnz_count, replace=False)
rho[set_to_zero] = 0.0
return rho
def get_score_bounds(Z_min, Z_max, rho):
pos_ind = np.where(rho>0.0)[0]
neg_ind = np.where(rho<0.0)[0]
s_min, s_max = 0, 0
for j in pos_ind:
s_max += rho[j] * Z_max[j]
s_min += rho[j] * Z_min[j]
for j in neg_ind:
s_max += rho[j] * Z_min[j]
s_min += rho[j] * Z_max[j]
return s_min, s_max
def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None):
"global variables: L0_reg_ind"
edge_values = np.vstack([Z_min * rho_lb,
Z_max * rho_lb,
Z_min * rho_ub,
Z_max * rho_ub])
if L0_max is None or L0_max == Z_min.shape[0]:
s_min = np.sum(np.min(edge_values, axis = 0))
s_max = np.sum(np.max(edge_values, axis = 0))
else:
min_values = np.min(edge_values, axis = 0)
s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
s_min_no_reg = np.sum(min_values[~L0_reg_ind])
s_min = s_min_reg + s_min_no_reg
max_values = np.max(edge_values, axis = 0)
s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
s_max_no_reg = np.sum(max_values[~L0_reg_ind])
s_max = s_max_reg + s_max_no_reg
return s_min, s_max
#generate data
X, Y = generate_binary_data(n_rows, n_cols)
Z = X * Y
Z = np.require(Z, requirements=['F'], dtype=np.float64)
rho = generate_integer_model(n_cols, rho_ub, rho_lb)
L0_reg_ind = np.ones(n_cols, dtype='bool')
L0_reg_ind[0] = False
Z_min = np.min(Z, axis = 0)
Z_max = np.max(Z, axis = 0)
#setup weights
weights = _setup_training_weights(Y, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0)
#create lookup table
min_score, max_score = get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = n_cols)
loss_value_tbl, prob_value_tbl, loss_tbl_offset = lookup.get_loss_value_and_prob_tables(min_score, max_score)
loss_tbl_offset = int(loss_tbl_offset)
#assert correctnes of log_loss from scores function
for s in range(int(min_score), int(max_score)+1):
normal_value = normal.log_loss_value_from_scores(np.array(s, dtype = Z.dtype, ndmin = 1)) #loss_value_tbl[s+loss_tbl_offset]
cython_value = fast.log_loss_value_from_scores(np.array(s, dtype = Z.dtype, ndmin = 1))
table_value = loss_value_tbl[s+loss_tbl_offset]
lookup_value = lookup.log_loss_value_from_scores(np.array(s,dtype = Z.dtype, ndmin = 1), loss_value_tbl, loss_tbl_offset)
assert(np.isclose(normal_value, cython_value, rtol = 1e-06))
assert(np.isclose(table_value, cython_value, rtol = 1e-06))
assert(np.isclose(table_value, normal_value, rtol = 1e-06))
assert(np.equal(table_value, lookup_value))
#python implementations need to be 'C' aligned instead of D aligned
Z_py = np.require(Z, requirements = ['C'])
rho_py = np.require(rho, requirements = ['C'])
scores_py = Z_py.dot(rho_py)
#define tests
def normal_value_test(): return normal.log_loss_value(Z_py, rho_py)
def fast_value_test(): return fast.log_loss_value(Z, rho)
def lookup_value_test(): return lookup.log_loss_value(Z, rho, loss_value_tbl, loss_tbl_offset)
def normal_cut_test(): return normal.log_loss_value_and_slope(Z_py, rho_py)
def fast_cut_test(): return fast.log_loss_value_and_slope(Z, rho)
def lookup_cut_test(): return lookup.log_loss_value_and_slope(Z, rho, loss_value_tbl, prob_value_tbl, loss_tbl_offset)
# def dynamic_lookup_value_test():
# s_min_dynamic, s_max_dynamic = get_score_bounds(Z_min, Z_max, rho)
# tbl, offset = lookup.get_loss_value_table(s_min_dynamic, s_max_dynamic)
# return lookup.log_loss_value(Z, rho, tbl, offset)
#check values and cuts
normal_cut = normal_cut_test()
cython_cut = fast_cut_test()
lookup_cut = lookup_cut_test()
assert(np.isclose(fast_value_test(), lookup_value_test()))
assert(np.isclose(normal_cut[0], cython_cut[0]))
assert(np.isclose(lookup_cut[0], cython_cut[0]))
assert(all(np.isclose(normal_cut[1], cython_cut[1])))
assert(all(np.isclose(lookup_cut[1], cython_cut[1])))
print("passed cut tests")
#weighted tests
def weighted_value_test(weights): return weighted.log_loss_value(Z_py, weights, np.sum(weights), rho_py)
def weighted_cut_test(weights): return weighted.log_loss_value_and_slope(Z_py, weights, np.sum(weights), rho_py)
def weighted_scores_test(weights): return weighted.log_loss_value_from_scores(weights, np.sum(weights), scores_py)
#w_pos = w_neg = 1.0
weights = _setup_training_weights(Y, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0)
weights_match_unit_weights = all(weights == 1.0)
if weights_match_unit_weights:
print("tests for match between normal and weighted loss function")
#value
assert(np.isclose(normal_value_test(), weighted_value_test(weights)))
assert(np.isclose(normal_value_test(), weighted_scores_test(weights)))
#cut
normal_cut = normal_cut_test()
weighted_cut = weighted_cut_test(weights)
assert(np.isclose(normal_cut[0], weighted_cut[0]))
assert(all(np.isclose(normal_cut[1], weighted_cut[1])))
print("passed all tests for weighted implementations when w_pos = w_neg = 1.0")
#w_pos = w_neg = 1.0
w_pos = 0.5 + np.random.rand()
w_neg = 1.0
weights = _setup_training_weights(Y, w_pos = 0.5 + np.random.rand(), w_neg = 1.0, w_total_target = 2.0)
weighted_value = weighted_value_test(weights)
weighted_cut = weighted_cut_test(weights)
weighted_value_from_scores = weighted_scores_test(weights)
assert(np.isclose(weighted_value, weighted_value_from_scores))
assert(np.isclose(weighted_value, weighted_cut[0]))
print("passed all tests for weighted loss functions when w_pos = %1.2f and w_neg = %1.2f" % (w_pos, w_neg))
# print 'timing for loss value computation \n'
# %timeit -n 20 normal_value = normal_value_test()
# %timeit -n 20 cython_value = fast_value_test()
# %timeit -n 20 lookup_value = lookup_value_test()
#
# print 'timing for loss cut computation \n'
# %timeit -n 20 normal_cut = normal_cut_test()
# %timeit -n 20 cython_cut = fast_cut_test()
# %timeit -n 20 lookup_cut = lookup_cut_test()
| 7,112 | 37.657609 | 128 | py |
risk-slim | risk-slim-master/riskslim/tests/__init__.py | 0 | 0 | 0 | py | |
risk-slim | risk-slim-master/riskslim/loss_functions/log_loss_weighted.py | import numpy as np
def log_loss_value(Z, weights, total_weights, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
supports sample non-negative weights for each example in the training data
see http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.dot(weights) / total_weights
return loss_value
def log_loss_value_and_slope(Z, weights, total_weights, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
supports sample non-negative weights for each example in the training data
this function should only be used when generating cuts in cutting-plane algorithms
(computing both the value and the slope at the same time is slightly cheaper)
see http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
loss_slope: (n_cols x 1) vector = 1/n_rows * sum(-Z*rho ./ (1+exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
exp_scores_pos = np.exp(-scores[pos_idx])
exp_scores_neg = np.exp(scores[~pos_idx])
#compute loss value
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(exp_scores_pos)
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(exp_scores_neg)
loss_value = loss_value.dot(weights) / total_weights
#compute loss slope
log_probs = np.empty_like(scores)
log_probs[pos_idx] = 1.0 / (1.0 + exp_scores_pos)
log_probs[~pos_idx] = (exp_scores_neg / (1.0 + exp_scores_neg))
log_probs -= 1.0
log_probs *= weights
loss_slope = Z.T.dot(log_probs) / total_weights
return loss_value, loss_slope
def log_loss_value_from_scores(weights, total_weights, scores):
"""
computes the logistic loss value from a vector of scores in a numerically stable way
where scores = Z.dot(rho)
see also: http://stackoverflow.com/questions/20085768/
this function is used for heuristics (discrete_descent, sequential_rounding).
to save computation when running the heuristics, we store the scores and
call this function to compute the loss directly from the scores
this reduces the need to recompute the dot product.
Parameters
----------
scores numpy.array of scores = Z.dot(rho)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.dot(weights) / total_weights
return loss_value | 3,880 | 37.425743 | 88 | py |
risk-slim | risk-slim-master/riskslim/loss_functions/build_cython_loss_functions.py | #!/usr/bin/env python
"""
This script builds loss functions using Cython on a local machine.
To run this script
1. Change to the directory
$REPO_DIR/riskslim/loss_functions
2. Run the following commands in Bash:
python2 build_cython_loss_functions.py build_ext --inplace
python3 build_cython_loss_functions.py build_ext --inplace
"""
import numpy
import scipy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
#fast log loss
ext_modules = [Extension(name = "fast_log_loss",
sources=["fast_log_loss.pyx"],
include_dirs=[numpy.get_include(), scipy.get_include()],
libraries=["m"],
extra_compile_args = ["-ffast-math"])]
setup(
cmdclass = {'build_ext': build_ext},
include_dirs = [numpy.get_include(), scipy.get_include()],
ext_modules = ext_modules,
)
#lookup log loss
ext_modules = [Extension(name = "lookup_log_loss",
sources=["lookup_log_loss.pyx"],
include_dirs=[numpy.get_include(), scipy.get_include()],
libraries=["m"],
extra_compile_args = ["-ffast-math"])]
setup(
cmdclass = {'build_ext': build_ext},
include_dirs = [numpy.get_include(), scipy.get_include()],
ext_modules = ext_modules,
)
| 1,404 | 23.224138 | 81 | py |
risk-slim | risk-slim-master/riskslim/loss_functions/log_loss.py | import numpy as np
def log_loss_value(Z, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.mean()
return loss_value
def log_loss_value_and_slope(Z, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
this function should only be used when generating cuts in cutting-plane algorithms
(computing both the value and the slope at the same time is slightly cheaper)
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
loss_slope: (n_cols x 1) vector = 1/n_rows * sum(-Z*rho ./ (1+exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
exp_scores_pos = np.exp(-scores[pos_idx])
exp_scores_neg = np.exp(scores[~pos_idx])
#compute loss value
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(exp_scores_pos)
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(exp_scores_neg)
loss_value = loss_value.mean()
#compute loss slope
log_probs = np.empty_like(scores)
log_probs[pos_idx] = 1.0 / (1.0 + exp_scores_pos)
log_probs[~pos_idx] = exp_scores_neg / (1.0 + exp_scores_neg)
loss_slope = Z.T.dot(log_probs - 1.0) / Z.shape[0]
return loss_value, loss_slope
def log_loss_value_from_scores(scores):
"""
computes the logistic loss value from a vector of scores in a numerically stable way
where scores = Z.dot(rho)
see also: http://stackoverflow.com/questions/20085768/
this function is used for heuristics (discrete_descent, sequential_rounding).
to save computation when running the heuristics, we store the scores and
call this function to compute the loss directly from the scores
this reduces the need to recompute the dot product.
Parameters
----------
scores numpy.array of scores = Z.dot(rho)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.mean()
return loss_value
def log_probs(Z, rho):
"""
compute the probabilities of the logistic loss function in a way that is numerically stable
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
log_probs numpy.array of probabilities under the logit model
"""
scores = Z.dot(rho)
pos_idx = scores > 0
log_probs = np.empty_like(scores)
log_probs[pos_idx] = 1.0 / (1.0 + np.exp(-scores[pos_idx]))
log_probs[~pos_idx] = np.exp(scores[~pos_idx]) / (1.0 + np.exp(scores[~pos_idx]))
return log_probs
| 3,795 | 32.298246 | 95 | py |
risk-slim | risk-slim-master/riskslim/loss_functions/__init__.py | from .log_loss import *
from .log_loss_weighted import *
try:
from .fast_log_loss import *
except ImportError:
print("warning: could not import fast log loss")
print("warning: returning handle to standard loss functions")
# todo replace with warning object
import log_loss as fast_log_loss
try:
from .lookup_log_loss import *
except ImportError:
print("warning: could not import lookup log loss")
print("warning: returning handle to standard loss functions")
# todo replace with warning object
import log_loss as lookup_log_loss
| 572 | 27.65 | 65 | py |
risk-slim | risk-slim-master/batch/train_risk_slim.py | #!/usr/bin/python
"""
This file is to train a RiskSLIM model in a batch computing environment
It parses command line arguments, and can be called as:
python train_risk_slim.py --data="${data_file}" --results="${results_file}"
where:
data_file csv file containing the training data
results_file file name for the save file; needs to be unique and not already exist on disk
Use "python train_risk_slim.py --help" for a description of additional arguments.
Copyright (C) 2017 Berk Ustun
"""
import os
import sys
import time
import argparse
import logging
import pickle
import json
import numpy as np
# add the source directory to search path to avoid module import errors if riskslim has not been installed
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from riskslim.utils import load_data_from_csv, setup_logging
from riskslim.coefficient_set import CoefficientSet
from riskslim.lattice_cpa import run_lattice_cpa, DEFAULT_LCPA_SETTINGS
# uncomment for debugging
# TODO: run the following when building
# with open(settings_json, 'w') as outfile:
# json.dump(DEFAULT_LCPA_SETTINGS, outfile, sort_keys = False, indent=4)
def setup_parser():
"""
Create an argparse Parser object for RiskSLIM command line arguments.
This object determines all command line arguments, handles input
validation and default values.
See https://docs.python.org/3/library/argparse.html for configuration
"""
#parser helper functions
def is_positive_integer(value):
parsed_value = int(value)
if parsed_value <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return parsed_value
def is_positive_float(value):
parsed_value = float(value)
if parsed_value <= 0.0:
raise argparse.ArgumentTypeError("%s must be a positive value" % value)
return parsed_value
def is_negative_one_or_positive_integer(value):
parsed_value = int(value)
if not (parsed_value == -1 or parsed_value >= 1):
raise argparse.ArgumentTypeError("%s is an invalid value (must be -1 or >=1)" % value)
else:
return parsed_value
def is_file_on_disk(file_name):
if not os.path.isfile(file_name):
raise argparse.ArgumentTypeError("the file %s does not exist!" % file_name)
else:
return file_name
def is_file_not_on_disk(file_name):
if os.path.isfile(file_name):
raise argparse.ArgumentTypeError("the file %s already exists on disk" % file_name)
else:
return file_name
def is_valid_fold(value):
parsed_value = int(value)
if parsed_value < 0:
raise argparse.ArgumentTypeError("%s must be a positive integer" % value)
return parsed_value
parser = argparse.ArgumentParser(
prog='train_risk_slim',
description='Train a RiskSLIM classifier from the command shell',
epilog='Copyright (C) 2017 Berk Ustun',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--data',
type=str,
required=True,
help='csv file with training data')
parser.add_argument('--results',
type=str,
required=True,
help='name of results file (must not already exist)')
parser.add_argument('--cvindices',
type=is_file_on_disk,
help='csv file with indices for K-fold CV')
parser.add_argument('--fold',
type=is_valid_fold,
default=0,
help='index of test fold; set as 0 to use all data for training')
parser.add_argument('--weights',
type=is_file_on_disk,
help='csv file with non-negative weights for each point')
parser.add_argument('--settings',
type=is_file_on_disk,
help='JSON file with additional settings for LCPA')
parser.add_argument('--timelimit',
type=is_negative_one_or_positive_integer,
default=300,
help='time limit on training (in seconds); set as -1 for no time limit')
parser.add_argument('--max_size',
type = is_negative_one_or_positive_integer,
default=-1,
help='maximum number of non-zero coefficients; set as -1 for no limit')
parser.add_argument('--max_coef',
type=is_positive_integer,
default=5,
help='value of upper and lower bounds for any coefficient')
parser.add_argument('--max_offset',
type=is_negative_one_or_positive_integer,
default=-1,
help='value of upper and lower bound on offset parameter; set as -1 to use a conservative value')
parser.add_argument('--c0_value',
type=is_positive_float,
default=1e-6,
help='l0 regularization parameter; set as a positive number between 0.00 and log(2)')
parser.add_argument('--w_pos',
type=is_positive_float,
default=1.00,
help='w_pos')
parser.add_argument('--log',
type=str,
help='name of the log file')
parser.add_argument('--silent',
action='store_true',
help='flag to suppress logging to stderr')
return parser
if __name__ == '__main__':
parser = setup_parser()
parsed = parser.parse_args()
parsed_dict = vars(parsed)
parsed_string = [key + ' : ' + str(parsed_dict[key]) + '\n' for key in parsed_dict]
parsed_string.sort()
# setup logging
logger = logging.getLogger()
logger = setup_logging(logger, log_to_console =(not parsed.silent), log_file = parsed.log)
logger.setLevel(logging.INFO)
logger.info("running 'train_risk_slim.py'")
logger.info("working directory: %r" % os.getcwd())
logger.info("parsed the following variables:\n-%s" % '-'.join(parsed_string))
# check results_file does not exist
if os.path.isfile(parsed.results):
logger.error("results file %s already exists)" % parsed.results)
logger.error("either delete %s or choose a different name" % parsed.results)
sys.exit(1)
# check settings_json exists / or use default settings
settings = dict(DEFAULT_LCPA_SETTINGS)
if parsed.settings is not None:
with open(parsed.settings) as json_file:
loaded_settings = json.load(json_file)
loaded_settings = {str(key): loaded_settings[key] for key in loaded_settings if key in settings}
settings.update(loaded_settings)
#overwrite parameters specified by the user
settings['max_runtime'] = float('inf') if parsed.timelimit == -1 else parsed.timelimit
settings['c0_value'] = parsed.c0_value
settings['w_pos'] = parsed.w_pos
# check if sample weights file was specified, if not set as None
logger.info("loading data and sample weights")
data = load_data_from_csv(dataset_csv_file = parsed.data,
sample_weights_csv_file = parsed.weights,
fold_csv_file = parsed.cvindices,
fold_num = parsed.fold)
N, P = data['X'].shape
# initialize coefficient set and offset parameter
logger.info("creating coefficient set and constraints")
max_coefficient = parsed.max_coef
max_model_size = parsed.max_size if parsed.max_size >= 0 else float('inf')
max_offset = parsed.max_offset if parsed.max_offset >= 0 else float('inf')
coef_set = CoefficientSet(variable_names = data['variable_names'],
lb = -max_coefficient,
ub = max_coefficient,
sign = 0)
coef_set.update_intercept_bounds(X = data['X'], y = data['Y'], max_offset = max_offset, max_L0_value = max_model_size)
#print coefficient set
if not parsed.silent:
print(coef_set)
constraints = {
'L0_min': 0,
'L0_max': max_model_size,
'coef_set': coef_set,
}
# fit RiskSLIM model using Lattice Cutting Plane Algorithm
model_info, mip_info, lcpa_info = run_lattice_cpa(data, constraints, settings)
# save output to disk
results = {
"date": time.strftime("%d/%m/%y", time.localtime()),
"data_file": parsed.data,
"fold_file": parsed.cvindices,
"fold_num": parsed.settings,
"results_file": parsed.results,
}
results.update(model_info)
coef_set = results.pop('coef_set')
results['coef_set_ub'] = coef_set.ub
results['coef_set_lb'] = coef_set.lb
results['coef_set_signs'] = coef_set.sign
results['coef_set_c0'] = coef_set.c0
logger.info("saving results...")
with open(parsed.results, 'wb') as outfile:
pickle.dump(results, outfile, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("saved results as pickle file: %r" % parsed.results)
logger.info('''to access results, use this snippet:
\t\t\t import pickle
\t\t\t f = open(results_file, 'rb')
\t\t\t results = pickle.load(f)
'''
)
logger.info("finished training")
logger.info("quitting\n\n")
sys.exit(0)
| 9,707 | 36.338462 | 122 | py |
ShiftCNN | ShiftCNN-master/shiftcnn_quantization.py | import sys
import os
import numpy as np
import pickle
import matplotlib.pyplot as plt
#
N = 2
B = 4
#
#model = "squeezenet_v1.1"
model = "ResNet-50"
SOURCE_PATH = os.environ["HOME"]+"/github/caffe/models/"+model+"/"
prototxt = SOURCE_PATH+"train_val.prototxt"
source = SOURCE_PATH+model+".caffemodel"
qtarget = SOURCE_PATH+model+"_N"+str(N)+"_B"+str(B)+".caffemodel"
caffe_root = os.environ["CAFFE_ROOT"]
os.chdir(caffe_root)
print caffe_root
sys.path.insert(0, caffe_root + 'python')
import caffe
caffe.set_mode_cpu()
net = caffe.Net(prototxt, source, caffe.TEST)
layers = net.params.keys()
linestyles = ['--', '-']
for idx, layer in enumerate(layers):
#if not('bn' in layer) and not('scale' in layer): # do not include batch normalization and scaling layers in ResNets
wT= 0.0
w = net.params[layer][0].data
wMax = np.max(np.abs(w))
r = w/wMax # normalize
for n in range(0, N):
qSgn = np.sign(r)
qLog = np.log2(abs(r+1e-32))
qIdx = np.floor(qLog)
bLog = qIdx + np.log2(1.5)
bIdx = qLog > bLog # border condition
qIdx[bIdx] = qIdx[bIdx] + 1.0
q = qSgn * 2**(qIdx)
qIdxMem = qSgn * (-(n+1)-qIdx+2)
sIdx = (2-(n+1)-qIdx) > (2**(B-1)-1) # saturation condition
q[sIdx] = 0
qIdxMem[sIdx] = 0
zIdx = q!=0
wT += q
r -= q
np.copyto(net.params[layer][0].data, wT*wMax)
net.save(qtarget)
| 1,514 | 28.134615 | 120 | py |
agd | agd-main/main.py | import sys
import os
import math
import argparse
import pickle
import torch
import importlib
from tqdm import tqdm
from agd import AGD
from architecture.fcn import *
from architecture.vgg import *
from architecture.resnet import *
############################################################################################
######################################### Parse args #######################################
############################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--arch', type=str, default='fcn', choices=['fcn', 'vgg', 'resnet18', 'resnet50'] )
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100', 'mnist', 'imagenet'] )
parser.add_argument('--loss', type=str, default='mse', choices=['mse', 'xent'] )
parser.add_argument('--train_bs', type=int, default=128 )
parser.add_argument('--test_bs', type=int, default=128 )
parser.add_argument('--epochs', type=int, default=200 )
parser.add_argument('--depth', type=int, default=10 )
parser.add_argument('--width', type=int, default=256 )
parser.add_argument('--distribute', action='store_true' )
parser.add_argument('--cpu', action='store_true' )
parser.add_argument('--gain', type=float, default=1.0 )
args = parser.parse_args()
############################################################################################
#################################### Distributed setup #####################################
############################################################################################
local_rank = 0
if args.distribute:
world_size = int(os.getenv('OMPI_COMM_WORLD_SIZE'))
global_rank = int(os.getenv('OMPI_COMM_WORLD_RANK'))
local_rank = global_rank % torch.cuda.device_count()
torch.distributed.init_process_group(backend='nccl', rank=global_rank, world_size=world_size)
print(f'GPU {global_rank} reporting in. Local rank: {local_rank}. CPU threads: {torch.get_num_threads()}.')
torch.distributed.barrier()
if global_rank > 0:
tqdm = lambda x, total : x
sys.stdout = open(os.devnull, 'w')
############################################################################################
####################################### Print args #########################################
############################################################################################
print("{: <39} {: <20}".format("\nArgument", "Value"))
print("{: <39} {: <20}".format(*["=============================="]*2))
for arg in vars(args):
print("{: <39} {: <20}".format(arg, getattr(args, arg)))
print("\nNote: depth and width are only used for fully-connected networks.")
############################################################################################
######################################### Get data #########################################
############################################################################################
print("\nGetting data...")
print("==================================="*2)
data_module = importlib.import_module("data."+args.dataset)
trainset, testset, input_dim, output_dim = data_module.getData()
if args.distribute:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
test_sampler = torch.utils.data.distributed.DistributedSampler(testset, shuffle=False, drop_last=True)
train_loader = torch.utils.data.DataLoader( trainset,
batch_size=int(args.train_bs/world_size),
shuffle=False,
num_workers=8,
pin_memory=True,
sampler=train_sampler )
test_loader = torch.utils.data.DataLoader( testset,
batch_size=int(args.test_bs/world_size),
shuffle=False,
num_workers=8,
pin_memory=True,
sampler=test_sampler )
else:
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.train_bs, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_bs, shuffle=False, pin_memory=True)
############################################################################################
##################################### Set architecture #####################################
############################################################################################
if args.arch == 'fcn':
net = FCN(args.depth, args.width, input_dim, output_dim)
elif args.dataset == 'imagenet' and args.arch == 'resnet50':
net = resnet50(num_classes=1000)
elif 'cifar' not in args.dataset:
raise Exception("That network only works with CIFAR.")
elif args.arch == 'vgg':
net = VGG16(output_dim)
elif args.arch == 'resnet18':
net = PreActResNet18(output_dim)
elif args.arch == 'resnet50':
net = PreActResNet50(output_dim)
if not args.cpu:
net = net.cuda(local_rank)
agd = AGD(net, args.gain)
agd.init_weights()
if args.distribute:
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[local_rank])
print("{: <39} {: <20}".format("\nLayer", "Shape"))
print("{: <39} {: <20}".format(*["=============================="]*2))
for name, p in net.named_parameters():
print("{: <39} {: <20}".format(name, str(list(p.shape))))
############################################################################################
######################################## Define loop #######################################
############################################################################################
def loop(net, dataloader, optim, train):
net.train() if train else net.eval()
num_minibatches = len(dataloader)
epoch_loss = 0
epoch_acc = 0
epoch_log = 0
for data, target in tqdm(dataloader, total=num_minibatches):
if not args.cpu:
data, target = data.cuda(local_rank), target.cuda(local_rank)
output = net(data)
if args.loss == 'mse':
onehot = torch.nn.functional.one_hot(target, num_classes=output.shape[1]).float()
onehot *= math.sqrt(output.shape[1])
loss = (output-onehot).square().mean()
elif args.loss == 'xent':
error = - output[range(target.shape[0]),target] + output.logsumexp(dim=1)
loss = error.mean()
if train: loss.backward()
acc = (output.argmax(dim=1) == target).float().mean()
if args.distribute:
torch.distributed.all_reduce(loss, torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(acc, torch.distributed.ReduceOp.SUM)
loss /= world_size
acc /= world_size
if train:
epoch_log += optim.step()
net.zero_grad()
epoch_acc += acc.item()
epoch_loss += loss.item()
return epoch_loss / num_minibatches, epoch_acc / num_minibatches, epoch_log / num_minibatches
############################################################################################
###################################### Train network #######################################
############################################################################################
results = {}
results['log_list' ] = []
results['train_loss_list'] = []
results['test_loss_list' ] = []
results['train_acc_list' ] = []
results['test_acc_list' ] = []
os.makedirs('logs', exist_ok=True)
filename = ""
for arg in vars(args):
filename += arg + ':' + str(getattr(args,arg)) + '-'
filename = os.path.join('logs', filename[:-1] + '.pickle')
for epoch in range(args.epochs):
print("\nEpoch", epoch)
print("==================================="*2)
if args.distribute: train_loader.sampler.set_epoch(epoch)
train_loss, train_acc, log = loop(net, train_loader, agd, train=True )
test_loss, test_acc, _ = loop(net, test_loader, None, train=False )
print("Log term: \t", log )
print("Train loss:\t", train_loss )
print("Test loss: \t", test_loss )
print("Train acc: \t", train_acc )
print("Test acc: \t", test_acc )
results['log_list' ].append( log )
results['train_loss_list'].append( train_loss )
results['test_loss_list' ].append( test_loss )
results['train_acc_list' ].append( train_acc )
results['test_acc_list' ].append( test_acc )
pickle.dump(results, open( filename, "wb" ) )
| 8,893 | 41.966184 | 122 | py |
agd | agd-main/agd.py | import math
import torch
from torch.optim.optimizer import Optimizer
from torch.nn.init import orthogonal_
def singular_value(p):
sv = math.sqrt(p.shape[0] / p.shape[1])
if p.dim() == 4:
sv /= math.sqrt(p.shape[2] * p.shape[3])
return sv
class AGD(Optimizer):
def __init__(self, net, gain=1.0):
self.net = net
self.depth = len(list(net.parameters()))
self.gain = gain
for p in self.net.parameters():
if p.dim() == 1: raise Exception("Biases are not supported.")
super().__init__(net.parameters(), defaults=dict())
@torch.no_grad()
def init_weights(self):
for p in self.net.parameters():
if p.dim() == 2: orthogonal_(p)
if p.dim() == 4:
for kx in range(p.shape[2]):
for ky in range(p.shape[3]):
orthogonal_(p[:,:,kx,ky])
p *= singular_value(p)
@torch.no_grad()
def step(self):
G = 0
for p in self.net.parameters():
G += singular_value(p) * p.grad.norm(dim=(0,1)).sum()
G /= self.depth
log = math.log(0.5 * (1 + math.sqrt(1 + 4*G)))
for p in self.net.parameters():
factor = singular_value(p) / p.grad.norm(dim=(0,1), keepdim=True)
p -= self.gain * log / self.depth * torch.nan_to_num(factor) * p.grad
return log
| 1,412 | 26.173077 | 81 | py |
agd | agd-main/architecture/fcn.py | import math
import torch.nn as nn
import torch.nn.functional as F
class FCN(nn.Module):
def __init__(self, depth, width, input_dim, output_dim, bias=False):
super(FCN, self).__init__()
self.initial = nn.Linear(input_dim, width, bias=bias)
self.layers = nn.ModuleList([nn.Linear(width, width, bias=bias) for _ in range(depth-2)])
self.final = nn.Linear(width, output_dim, bias=bias)
def forward(self, x):
x = x.view(x.shape[0],-1)
x = self.initial(x)
x = F.relu(x) * math.sqrt(2)
for layer in self.layers:
x = layer(x)
x = F.relu(x) * math.sqrt(2)
return self.final(x)
| 718 | 28.958333 | 97 | py |
agd | agd-main/architecture/resnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
### For CIFAR-10
def PreActResNet18(output_dim): return PreActResNet(PreActBlock, [2,2,2,2], output_dim)
def PreActResNet34(output_dim): return PreActResNet(PreActBlock, [3,4,6,3], output_dim)
def PreActResNet50(output_dim): return PreActResNet(PreActBottleneck, [3,4,6,3], output_dim)
def PreActResNet101(output_dim): return PreActResNet(PreActBottleneck, [3,4,23,3], output_dim)
def PreActResNet152(output_dim): return PreActResNet(PreActBottleneck, [3,8,36,3], output_dim)
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, affine=False)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, affine=False)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, affine=False)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes, bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
### For ImageNet
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
affine=False
) -> None:
super().__init__()
self.affine = affine
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes, affine=self.affine)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes, affine=affine)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition" https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
affine=False
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width, affine=affine)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width, affine=affine)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion, affine=affine)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
bias=False,
affine=False
) -> None:
self.bias=bias
self.affine=affine
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes, affine=self.affine)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes, bias=self.bias)
self.out_dim = num_classes
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if self.affine:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion, affine=False),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
affine=self.affine
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
weights: Optional,
progress: bool,
bias=False,
affine=False,
num_classes=10,
) -> ResNet:
model = ResNet(block, layers, bias=bias, affine=affine, num_classes=num_classes)
return model
def resnet18(num_classes, weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(BasicBlock, [2, 2, 2, 2], weights, progress, bias=bias, affine=affine, num_classes=num_classes)
def resnet34(weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, bias=bias, affine=affine)
def resnet50(num_classes,weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, bias=bias, affine=affine, num_classes=num_classes)
def resnet101(weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, bias=bias, affine=affine)
def resnet152(weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(Bottleneck, [3, 8, 36, 3], weights, progress, bias=bias, affine=affine)
| 14,531 | 35.512563 | 118 | py |
agd | agd-main/architecture/vgg.py | import torch.nn as nn
def VGG11(output_dim): return VGG_CIFAR([64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], output_dim)
def VGG13(output_dim): return VGG_CIFAR([64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], output_dim)
def VGG16(output_dim): return VGG_CIFAR([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], output_dim)
def VGG19(output_dim): return VGG_CIFAR([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], output_dim)
class VGG_CIFAR(nn.Module):
def __init__(self, vgg_cfg, output_dim=10, bias=False, affine=False):
super(VGG_CIFAR, self).__init__()
self.bias = bias
self.affine = affine
self.features = self._make_layers(vgg_cfg)
self.classifier = nn.Linear(512, output_dim, bias=self.bias)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias),
nn.BatchNorm2d(x, affine=self.affine),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
| 1,587 | 44.371429 | 156 | py |
agd | agd-main/architecture/__init__.py | 0 | 0 | 0 | py | |
agd | agd-main/latex/algorithm/agd.py | import math
import torch
from torch.nn.init import orthogonal_
def singular_value(p):
sv = math.sqrt(p.shape[0] / p.shape[1])
if p.dim() == 4:
sv /= math.sqrt(p.shape[2] * p.shape[3])
return sv
class AGD:
@torch.no_grad()
def __init__(self, net, gain=1.0):
self.net = net
self.depth = len(list(net.parameters()))
self.gain = gain
for p in net.parameters():
if p.dim() == 1: raise Exception("Biases are not supported.")
if p.dim() == 2: orthogonal_(p)
if p.dim() == 4:
for kx in range(p.shape[2]):
for ky in range(p.shape[3]):
orthogonal_(p[:,:,kx,ky])
p *= singular_value(p)
@torch.no_grad()
def step(self):
G = 0
for p in self.net.parameters():
G += singular_value(p) * p.grad.norm(dim=(0,1)).sum()
G /= self.depth
log = math.log(0.5 * (1 + math.sqrt(1 + 4*G)))
for p in self.net.parameters():
factor = singular_value(p) / p.grad.norm(dim=(0,1), keepdim=True)
p -= self.gain * log / self.depth * factor * p.grad
| 1,174 | 26.97619 | 77 | py |
agd | agd-main/data/cifar100.py | from torchvision import datasets, transforms
def getData():
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
trainset = datasets.CIFAR100('./data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR100('./data', train=False, download=True, transform=transform_test)
input_dim = 3*32*32
output_dim = 100
return trainset, testset, input_dim, output_dim
| 764 | 27.333333 | 96 | py |
agd | agd-main/data/cifar10.py | from torchvision import datasets, transforms
def getData():
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
trainset = datasets.CIFAR10('./data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR10('./data', train=False, download=True, transform=transform_test)
input_dim = 3*32*32
output_dim = 10
return trainset, testset, input_dim, output_dim
| 761 | 27.222222 | 95 | py |
agd | agd-main/data/__init__.py | 0 | 0 | 0 | py | |
agd | agd-main/data/imagenet.py | import os
from torchvision import datasets, transforms
def getData():
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
traindir = os.path.join(os.getenv('IMAGENET_PATH'), "train")
valdir = os.path.join(os.getenv('IMAGENET_PATH'), "val")
trainset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]))
testset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]))
input_dim = 3*224*224
output_dim = 1000
return trainset, testset, input_dim, output_dim
| 887 | 25.117647 | 64 | py |
agd | agd-main/data/mnist.py | from torchvision import datasets, transforms
def getData():
mean = (0.1307,)
std = (0.3081,)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
trainset = datasets.MNIST('./data', train=True, download=True, transform=transform)
testset = datasets.MNIST('./data', train=False, download=True, transform=transform)
input_dim = 1*28*28
output_dim = 10
return trainset, testset, input_dim, output_dim
| 493 | 25 | 87 | py |
aldiplusplus | aldiplusplus-main/forecasting_results.py | import os
import argparse
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from functools import partial
from collections import defaultdict
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_log_error, mean_squared_error
from utils import (
save_variable,
rmsle,
load_data,
timer,
GeneralizedMeanBlender
)
if __name__ == "__main__":
# load config file from CLI
with open(str(sys.argv[1]), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
data_location = config["data_location"]
output_location = config["output_location"]
# ground truth file
df_bdg = pd.read_csv("data/meters/kaggle/kaggle.csv")
df_metadata = pd.read_csv("data/metadata/metadata.csv")
df_metadata = df_metadata[["building_id_kaggle", "site_id_kaggle", "primaryspaceusage"]]
df_metadata = df_metadata[df_metadata["building_id_kaggle"].notna()]
df_metadata["building_id_kaggle"] = df_metadata["building_id_kaggle"].astype(int)
df_bdg = pd.merge(df_bdg, df_metadata, left_on="building_id", right_on="building_id_kaggle")
df_bdg = df_bdg.sort_values(by="row_id", ascending=True)
MODEL_LIST = [
f"output/{algorithm}/lgb-split_meter-no_normalization.npy",
]
# load predictions
with timer("load predictions"):
preds_matrix = [np.load(x) for x in MODEL_LIST if ".npy" in x]
preds_matrix = np.vstack(preds_matrix).T
# blend predictions
with timer("blend predictions"):
gmb = GeneralizedMeanBlender()
gmb.p = 0.11375872112626925
gmb.c = 0.99817730007820798
gmb.weights = [1]
test_preds = 0.99576627605010293*np.expm1(gmb.transform(np.log1p(preds_matrix)))
# filter preditions
with timer("calculate RMSLE"):
# compare only test year (2017)
test_preds = test_preds[0:len(df_bdg)]
df_bdg["test_preds"] = test_preds
# filter only electricity predictions
df_bdg = df_bdg[df_bdg["meter"] == 0]
# replace NaN
df_bdg["meter_reading"] = df_bdg["meter_reading"].fillna(0)
# breakdown of results
dict_results = {}
# overall
rmsle_all = rmsle(df_bdg["meter_reading"], df_bdg["test_preds"])
dict_results['all'] = rmsle_all
# site-specific
for site_id in df_bdg["site_id_kaggle"].unique():
df_bdg_site = df_bdg.copy()
df_bdg_site = df_bdg_site[df_bdg_site["site_id_kaggle"] == site_id]
rmsle_site = rmsle(df_bdg_site["meter_reading"], df_bdg_site["test_preds"])
dict_results[site_id] = rmsle_site
# PSU-specific
for psu in df_bdg["primaryspaceusage"].unique():
df_bdg_psu = df_bdg.copy()
df_bdg_psu = df_bdg_psu[df_bdg_psu["primaryspaceusage"] == psu]
rmsle_psu = rmsle(df_bdg_psu["meter_reading"], df_bdg_psu["test_preds"])
dict_results[psu] = rmsle_psu
save_variable(f"results/dict_results_forecasting_{algorithm}", dict_results)
print(dict_results)
| 3,154 | 35.264368 | 96 | py |
aldiplusplus | aldiplusplus-main/train_lgb_meter.py | import os
import argparse
import yaml
from datetime import datetime
import lightgbm as lgb
import numpy as np
from utils import (
timer,
Logger,
make_dir,
rmsle,
load_data,
get_validation_months,
)
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--overwrite", action="store_true", help="If True then overwrite existing files"
)
parser.add_argument(
"--normalize_target",
action="store_true",
help="If True then normalize the meter_reading by dividing by log1p(square_feet).",
)
parser.add_argument(
"--n_leaves", type=int, default=500, help="Number of leaves in each tree"
)
parser.add_argument("--lr", type=float, default=0.03, help="Learning rate.")
parser.add_argument(
"--feature_fraction",
type=float,
default=0.7,
help="Fraction of features to select for each trees.",
)
parser.add_argument(
"--subsample",
type=float,
default=0.4,
help="Fraction of rows to use when fitting trees.",
)
parser.add_argument("--file", help="Configuration file")
FEATURES = [
# building meta features
"square_feet",
"year_built",
"floor_count",
# cat cols
"building_id",
"site_id",
"primary_use",
"hour",
"weekday",
"weekday_hour",
"building_weekday_hour",
"building_weekday",
"building_hour",
# raw weather features
"air_temperature",
"cloud_coverage",
"dew_temperature",
"precip_depth_1_hr",
"sea_level_pressure",
"wind_direction",
"wind_speed",
# derivative weather features
"air_temperature_mean_lag7",
"air_temperature_std_lag7",
"air_temperature_mean_lag73",
"air_temperature_std_lag73",
# time features
"weekday_x",
"weekday_y",
"is_holiday",
# target encoding features
# "gte_meter_building_id_hour",
# "gte_meter_building_id_weekday",
]
CAT_COLS = [
"building_id",
"site_id",
"primary_use",
"hour",
"weekday",
"weekday_hour",
"building_weekday_hour",
"building_weekday",
"building_hour",
]
DROP_COLS = [
# time columns
"year",
"timestamp",
"hour_x",
"hour_y",
# weather extremum
"air_temperature_min_lag7",
"air_temperature_max_lag7",
"air_temperature_min_lag73",
"air_temperature_max_lag73",
# first-order gte
# "gte_hour", "gte_weekday", "gte_month", "gte_building_id",
# "gte_meter", "gte_meter_hour", "gte_primary_use", "gte_site_id",
#
# second-order gte
# "gte_meter_weekday", "gte_meter_month", "gte_meter_building_id",
# "gte_meter_primary_use", "gte_meter_site_id",
# month columns
"month_x",
"month_y",
"building_month", # "month",
# "gte_meter_building_id_month",
]
if __name__ == "__main__":
args = parser.parse_args()
# load config file from CLI
with open(str(args.file), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
output_location = config["output_location"]
with timer("Loading data"):
train = load_data(
"train_clean", algorithm=algorithm, output_location=output_location
)
train.drop(DROP_COLS, axis=1, inplace=True)
train = train.loc[train.is_bad_meter_reading == 0].reset_index(drop=True)
with timer("Preprocesing"):
for x in CAT_COLS:
train[x] = train[x].astype("category")
if args.normalize_target:
# target_encode_cols = [x for x in train.columns if "gte" in x]
# train[target_encode_cols] = train[target_encode_cols] / np.log1p(
# train[["square_feet"]].values
# )
train["target"] = np.log1p(train["meter_reading"]) / np.log1p(
train["square_feet"]
)
else:
train["target"] = np.log1p(train["meter_reading"])
# get base file name
model_name = "lgb-split_meter"
make_dir(f"models/{algorithm}/{model_name}")
with timer("Training"):
for seed in [0]:
for n_months in [1, 2, 3, 4, 5, 6]:
validation_months_list = get_validation_months(n_months)
for fold_, validation_months in enumerate(validation_months_list):
for m in [0]: # range(4): # only using 1 meter
# create sub model path
if args.normalize_target:
sub_model_path = f"models/{algorithm}/{model_name}/target_normalization/meter_{m}"
make_dir(sub_model_path)
else:
sub_model_path = f"models/{algorithm}/{model_name}/no_normalization/meter_{m}"
make_dir(sub_model_path)
# create model version
model_version = "_".join(
[
str(args.n_leaves),
str(args.lr),
str(args.feature_fraction),
str(args.subsample),
str(seed),
str(n_months),
str(fold_),
]
)
# check if we can skip this model
full_sub_model_name = f"{sub_model_path}/{model_version}.txt"
if os.path.exists(full_sub_model_name):
if not args.overwrite:
print(
f"{datetime.now()} - {full_sub_model_name} already exists! Skipping..."
)
continue
# get this months indices
trn_idx = np.where(
np.isin(train.month, validation_months, invert=True)
)[0]
val_idx = np.where(
np.isin(train.month, validation_months, invert=False)
)[0]
# print(f"split meter: train size {len(trn_idx)} val size {len(val_idx)}")
# remove indices not in this meter
trn_idx = np.intersect1d(trn_idx, np.where(train.meter == m)[0])
val_idx = np.intersect1d(val_idx, np.where(train.meter == m)[0])
# print(f"split meter: train size {len(trn_idx)} val size {len(val_idx)}")
# initialize model
model = lgb.LGBMRegressor(
random_state=seed + 9999 * args.normalize_target,
n_estimators=3333,#9999,
learning_rate=args.lr,
feature_fraction=args.feature_fraction,
subsample=args.subsample,
num_leaves=args.n_leaves,
metric="rmse",
silent=False,
)
# fit model
msg = f"Training {full_sub_model_name} - train# {len(trn_idx)} val# {len(val_idx)}"
# print(f'{datetime.now()} - Training {full_sub_model_name} - train# {len(trn_idx)} val# {len(val_idx)}')
with timer(msg):
model.fit(
train.loc[trn_idx, FEATURES],
train.loc[trn_idx, "target"],
eval_set=[
(
train.loc[val_idx, FEATURES],
train.loc[val_idx, "target"],
)
],
early_stopping_rounds=25,
verbose=25,
)
model.booster_.save_model(full_sub_model_name)
| 8,108 | 32.097959 | 129 | py |
aldiplusplus | aldiplusplus-main/predict_lgb_meter.py | import argparse
import glob
import yaml
import numpy as np
import pandas as pd
import lightgbm as lgb
from utils import (
Logger,
timer,
rmsle,
load_data,
make_dir,
)
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--normalize_target",
action="store_true",
help="If True then normalize the meter_reading by dividing by log1p(square_feet).",
)
parser.add_argument("--file", help="Configuration file")
FEATURES = [
# building meta features
"square_feet",
"year_built",
"floor_count",
# cat cols
"building_id",
"site_id",
"primary_use",
"hour",
"weekday",
"weekday_hour",
"building_weekday_hour",
"building_weekday",
"building_hour",
# raw weather features
"air_temperature",
"cloud_coverage",
"dew_temperature",
"precip_depth_1_hr",
"sea_level_pressure",
"wind_direction",
"wind_speed",
# derivative weather features
"air_temperature_mean_lag7",
"air_temperature_std_lag7",
"air_temperature_mean_lag73",
"air_temperature_std_lag73",
# time features
"weekday_x",
"weekday_y",
"is_holiday",
# target encoding features
# "gte_meter_building_id_hour", "gte_meter_building_id_weekday",
]
CAT_COLS = [
"building_id",
"site_id",
"primary_use",
"hour",
"weekday",
"weekday_hour",
"building_weekday_hour",
"building_weekday",
"building_hour",
]
DROP_COLS = [
# time columns
"year",
"timestamp",
"hour_x",
"hour_y",
# weather extremum
"air_temperature_min_lag7",
"air_temperature_max_lag7",
"air_temperature_min_lag73",
"air_temperature_max_lag73",
# first-order gte
# "gte_hour",
# "gte_weekday",
# "gte_month",
# "gte_building_id",
# "gte_meter",
# "gte_meter_hour",
# "gte_primary_use",
# "gte_site_id",
## second-order gte
# "gte_meter_weekday",
# "gte_meter_month",
# "gte_meter_building_id",
# "gte_meter_primary_use",
# "gte_meter_site_id",
# month columns
"month_x",
"month_y",
"building_month", # "month",
# "gte_meter_building_id_month",
]
if __name__ == "__main__":
args = parser.parse_args()
# load config file from CLI
with open(str(args.file), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
output_location = config["output_location"]
with timer("Loading data"):
test = load_data(
"test_clean", algorithm=algorithm, output_location=output_location
)
test.drop(DROP_COLS, axis=1, inplace=True)
with timer("Preprocesing"):
for x in CAT_COLS:
test[x] = test[x].astype("category")
#if args.normalize_target:
# target_encode_cols = [x for x in test.columns if "gte" in x]
# test[target_encode_cols] = test[target_encode_cols] / np.log1p(
# test[["square_feet"]].values
# )
with timer("Predicting"):
# get base file name
test_preds = np.zeros(len(test))
for m in [0]: #range(4):
# create sub model path
if args.normalize_target:
sub_model_path = (
f"models/{algorithm}/lgb-split_meter/target_normalization/meter_{m}"
)
else:
sub_model_path = f"models/{algorithm}/lgb-split_meter/no_normalization/meter_{m}"
# remove indices not in this meter
X = test.loc[test.meter == m, FEATURES]
print(f"split meter {m}: test size {len(X)}")
# load models
model_list = glob.glob(f"{sub_model_path}/*")
assert len(model_list) != 0, "No models to load"
# predict
msg = (
f"Predicting for meter {m} - models# {len(model_list)}, test# {len(X)}"
)
with timer(msg):
preds = 0
for model_name in model_list:
model = lgb.Booster(model_file=model_name)
with timer(f" Model {model_name}"):
preds += model.predict(X) / len(model_list)
test_preds[test.meter == m] = preds
# invert target transformation
if args.normalize_target:
test_preds *= np.log1p(test.square_feet)
test_preds = np.expm1(test_preds)
# correct site 0
test_preds[(test.site_id == 0) & (test.meter == 0)] *= 3.4118
test_preds[test_preds < 0] = 0
# save data
make_dir(f"output/{algorithm}")
if args.normalize_target:
np.save(f"output/{algorithm}/lgb-split_meter-target_normalization", test_preds)
else:
np.save(f"output/{algorithm}/lgb-split_meter-no_normalization", test_preds)
| 4,845 | 25.480874 | 97 | py |
aldiplusplus | aldiplusplus-main/aldi_gmm_dyn_none_both.py | from scipy import stats
import math
import torch
#import stumpy
import pyscamp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import calmap # not working with latest pandas
import calplot
import joypy
import sys
import time
import datetime as dt
from sklearn.mixture import GaussianMixture
class ALDI():
def __init__(self,
df_meters,
df_metadata,
m=24,
col_id='building_id',
site_id='',
meter_id='',
verbose=False,
gpu=False,
hourly_processing=False,
aldi_name='aldi_gmm_dyn_none_both',
):
"""
Parameter
----------
df_meters:
sorted NxM dataframe with M buildings and N rows with hourly
timestamp as indices
df_metadata:
dataframe with metadata regarding the buildings
m:
hourly window size, one day = 24
col_id:
string name of the column with building ids in df_meters and df_metadata
site_id:
id of the current portfolio being analyzed
meter_id:
id of the current sensor reading being analyzed
verbose:
boolean value to enable debugging printing
gpu:
TODO: text
hourly_processing:
TODO: text
aldi_name:
TODO: text
"""
self.df_meters = df_meters.copy()
self.df_metadata = df_metadata.copy()
self.base_timestamps = df_meters.copy().index
self.m = m
self.col_id = col_id
self.site_id = site_id
self.meter_id = meter_id
self.verbose = verbose
self.aldi_name = aldi_name
self.hourly = hourly_processing
self.cuda = True and gpu if torch.cuda.is_available() else False
if self.cuda:
print('Using GPU')
# set auxiliary variables
self.list_all_bdg = df_meters.columns.values
# placeholder for upcoming class variables
self.mp_adj = None
self.mp_ind = None
self.df_result = None
self.num_readings = None
self.num_buildings = None
self.df_result_meta = None
self.df_test = None
self.df_test_det = None # placeholder
# start the engine
self.pipeline()
def pipeline(self):
if self.verbose:
print(f'Start ALDI. hourly = {self.hourly}')
##### EXECUTE ALDI
#### STEP 1: get mp-values and -indices
self.mp_adj, self.mp_ind = self.get_mp()
#### STEP 2: select midnight mp-values and base dataframe
self.df_result, self.num_readings, self.num_buildings = self.data_reconstruction()
self.df_result_meta = self.add_metadata()
#### STEP 4: run one KS-tests
self.df_ks_test = self.ks_test()
#### STEP 5: Classification of the results of the stat test
#### (Initiated by the user from the outside)
# self.df_test_det = self.get_result_df()
def get_mp(self):
"""
Calculates matrix profile and matrix profile indices for a time-stamp
sorted dataframe where the columns are buildings from the same site
and rows are meter readings.
Returns:
mp_adj: dataframe with the matrix profile values
mp_ind: dataframe with the matrix profile indices
"""
mp_adj = pd.DataFrame(columns=self.list_all_bdg)
mp_ind = pd.DataFrame(columns=self.list_all_bdg)
for col in self.list_all_bdg:
bldg = self.df_meters[col]
mp_profile, mp_index = pyscamp.selfjoin(bldg, self.m)
#if self.cuda:
# mp = stumpy.gpu_stump(bldg, m=self.m)
#else:
# mp = stumpy.stump(bldg, m=self.m)
# append np.nan to matrix profile to allow plotting against raw data
#madj = np.append(mp[:,0], np.zeros(self.m-1) + np.nan)
#mind = np.append(mp[:,1], np.zeros(self.m-1) + np.nan)
madj = np.append(mp_profile, np.zeros(self.m-1) + np.nan)
mind = np.append(mp_index, np.zeros(self.m-1) + np.nan)
# save mp information
mp_adj[col] = madj
mp_ind[col] = mind
return mp_adj, mp_ind
def data_reconstruction(self):
"""
Puts together calculated values into one single dataframe
Returns:
----------
df_result: pandas.DataFrame
text
num_readings: int
text
num_buildings: int
text
"""
df_result = pd.DataFrame(columns=['raw','mp','mp_ind'])
# Previous get_midnight_values()
df_e, df_mp, df_mpind = self.prepare_mp_values()
num_readings = df_e.shape[0]
num_buildings = df_e.shape[1]
if self.verbose:
print(f'num of readings: {num_readings}') # debug
# combining the matrix profile and indices values
df_result['raw'] = df_e.values.reshape(num_readings * num_buildings)
df_result['mp'] = df_mp.values.reshape(num_readings * num_buildings)
df_result['mp_ind'] = df_mpind.values.reshape(num_readings * num_buildings)
if self.verbose:
print(f'Combining raw and calculated values:\n{df_result}')
# combining the building names and dates
if self.hourly:
# HOURLY SOLUTION
df_names = np.tile(self.list_all_bdg, num_readings)
steps = np.repeat(list(range(num_readings)), len(self.list_all_bdg))
df_interim_dates = (pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq='H')
).to_pydatetime().tolist()
df_dates = np.repeat(df_interim_dates, len(self.list_all_bdg))
else:
# DAYS SOLUTION
df_names = np.tile(self.list_all_bdg, num_readings)
steps = np.repeat(list(range(num_readings)), len(self.list_all_bdg))
df_interim_dates = (pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq='d')
).to_pydatetime().tolist()
df_dates = np.repeat(df_interim_dates, len(self.list_all_bdg))
df_result[self.col_id] = df_names
df_result['date'] = df_dates
if self.verbose:
print(f'Updating the combined values with building names ' +
f'and full dates:\n{df_result}')
# combining the breakdown of the dates
df_result['month'] = df_result['date'].dt.strftime('%b')
df_result['daytype'] = df_result['date'].dt.strftime('%a')
df_result['day'] = df_result['date'].dt.strftime('%d')
df_result['hour'] = (df_result['date'].dt.strftime('%H')).astype('int8')
if self.verbose:
print(f'Updating the combined values with broken down dates:\n{df_result}')
return df_result, num_readings, num_buildings
def prepare_mp_values(self):
"""
Picks daily matrix profile at midnight
Returns:
----------
df_e: pandas.DataFrame
text
df_mp: pandas.DataFrame
text
df_mpind: pandas.DataFrame
text
"""
df_e = self.df_meters.copy()
df_mp = self.mp_adj.set_index(df_e.index)
df_mpind = self.mp_ind.set_index(df_e.index)
if not self.hourly:
df_e = df_e[df_e.index.hour==0]
df_mp = df_mp[df_mp.index.hour==0]
df_mpind = df_mpind[df_mpind.index.hour==0]
if self.verbose:
print(f'Prepared MP values:\n{df_mp}')
print(f'Shape midnight results:')
print(f'raw: {df_e.shape}')
print(f'mp: {df_mp.shape}')
print(f'mpi: {df_mpind.shape}\n')
return df_e, df_mp, df_mpind
def add_metadata(self):
"""
Combines the processed dataframe with matrix profile calculation
alongside the metadata file
Returns:
----------
df_result_meta: pandas.DataFrame
text
"""
df_result_meta = self.df_result.merge(self.df_metadata,
on=self.col_id)
if self.verbose:
print(f'Merging available metadata:\n{df_result_meta.head()}')
return df_result_meta
def ks_test(self):
"""
Computes an statistical test for each daily distribution
Returns:
----------
ks_test: pandas.DataFrame
text
"""
reference_dist = self.daytype_dist()
if self.hourly:
curr_freq = 'H'
else:
curr_freq = 'D'
ks_test = pd.DataFrame(columns=['D','p'],
index=pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq=curr_freq)
)
if self.verbose:
print(f'CAUTION: curr_freq: {curr_freq}')
print(f'Starting to fill the ks_test df: \n{ks_test}')
for i in ks_test.index:
events = self.df_result.mp[self.df_result.date == i]
if self.hourly:
reference = reference_dist[i.strftime('%a')][int(i.strftime('%H'))]
else:
reference = reference_dist[i.strftime('%a')]
test = stats.ks_2samp(events, reference)
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if self.verbose:
print(f'KS test dataframe:\n{ks_test}')
return ks_test
def daytype_dist(self):
"""
Computes daytype distributions
Returns:
----------
daytype_dist: dictionary
text
"""
daytype_dist = {}
weekdays = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
if self.hourly:
times = list(range(24))
for curr_day in weekdays:
daytype_dist[curr_day] = {}
for curr_time in times:
daytype_dist[curr_day][curr_time] = self.df_result.mp[
(self.df_result.daytype == curr_day)
& (self.df_result.hour == curr_time) ]
else:
for curr_day in weekdays:
daytype_dist[curr_day] = self.df_result.mp[(
self.df_result.daytype == curr_day)]
return daytype_dist
####################################################################
# #
# ||| Methods that are called from the outside ||| #
# VVV VVV #
####################################################################
def set_gmm_model(self, gmm_data='D', gmm_max_comp=10):
self.gmm_data = gmm_data
self.gmm_max_comp = gmm_max_comp
self.gm_model, self.gmm_components = self._gmm_train()
def get_result_df(self, forecast_out=False):
"""
Calculates the discords
"""
# dynamic share calculation
max_gauss_mean = self.gmm_components['gauss_mean'].max()
share_comp_of_interest = 1 - max_gauss_mean
abs_comp_of_interest = math.trunc( share_comp_of_interest
* self.gm_model.n_components)
sorted_gmm_components = self.gmm_components.sort_values('gauss_mean').copy()
special_gmm_comp = sorted_gmm_components[:abs_comp_of_interest]
if self.verbose:
print(f'Share components of interest: {share_comp_of_interest}')
print(f'Number components of interest: {abs_comp_of_interest}')
gmm_proba = self.gm_model.predict_proba(
self.df_ks_test.D.values.reshape(-1,1))
df_gmm_proba = pd.DataFrame(gmm_proba, index= self.df_ks_test.index)
df_gmm_proba['max_comp'] = df_gmm_proba.idxmax(axis='columns')
if self.gmm_data == 'D':
gmm_proba = self.gm_model.predict_proba(
self.df_ks_test.D.values.reshape(-1,1))
df_gmm_proba = pd.DataFrame(gmm_proba, index= self.df_ks_test.index)
df_gmm_proba['max_comp'] = df_gmm_proba.idxmax(axis='columns')
# *Important comparison* - The max_comp must be inside the list
# for the day to be classified as a "non-discord day"
df_gmm_proba['is_discord'] = np.where(df_gmm_proba['max_comp'].isin(
special_gmm_comp.component), 0, 1)
else:
gmm_proba = self.gm_model.predict_proba(
self.df_ks_test.p.values.reshape(-1,1))
df_gmm_proba = pd.DataFrame(gmm_proba, index= self.df_ks_test.index)
df_gmm_proba['max_comp'] = df_gmm_proba.idxmax(axis='columns')
# *Important comparison* - The max_comp must be inside the list
# for the day to be classified as a "non-discord day"
df_gmm_proba['is_discord'] = np.where(df_gmm_proba['max_comp'].isin(
special_gmm_comp.component), 1, 0)
df_is_discord = pd.DataFrame(index=df_gmm_proba.index)
df_is_discord['is_discord'] = df_gmm_proba['is_discord']
# prepare index and column for resulting dataframes
all_bdg = self.list_all_bdg.copy()
if forecast_out:
columns = all_bdg
else:
columns = [f'is_discord_{x}' for x in all_bdg]
# hand waving specialization (caution) of discords for all bdgs
for col in columns:
df_is_discord[col] = df_is_discord['is_discord']
df_is_discord = df_is_discord.drop(['is_discord'],
axis=1)
if forecast_out:
if not self.hourly:
hourly_timestamps = self.base_timestamps
df_hourly_is_discord = pd.DataFrame(index=hourly_timestamps)
# copy daily dataframe to hourly dataframe
df_hourly_is_discord['day'] = df_hourly_is_discord.index.date
df_is_discord.index = df_is_discord.index.date
df_hourly_is_discord = df_hourly_is_discord.join(df_is_discord,
on='day',
how='left')
df_hourly_is_discord = df_hourly_is_discord.drop(['day'], axis=1)
df_is_discord_hourly = df_hourly_is_discord.astype('int8')
else:
df_is_discord_hourly = df_is_discord
df_is_discord_hourly['timestamp'] = df_is_discord_hourly.index
df_is_discord_hourly = df_is_discord_hourly.melt(
id_vars=['timestamp'],
var_name='building_id',
value_name='is_discord')
# Exportable variable
df_is_discord = df_is_discord_hourly
return df_is_discord
def get_result_using_threshold(self,
ks_type='D',
threshold=0.6,
forecast_out=False):
"""
Method offers additional possibility to get a discore
classification (predicted). For this purpose, all time points
at which a predefined threshold is exceeded are classified as
discord and vice versa.
Parameters:
----------
ks_test: str , required
Describes which result type of the ks test should be used
treshold: float , required
Describes the threshold to be used to distinguish between
discord and non-discord
forecast_out: bool , required
This parameter controls the formatting of the return type.
If False, the resulting dataframe will have columns
identifying the different buildings of the site. The index
consists of timestamps.
If True, the previously described results are formatted
into a single column result. The building ID and timestamp
are then keys and have their own columns.
Returns:
----------
df_result: pandas.DataFrame
see description at parameter 'forecast_out'
"""
if self.hourly:
curr_freq = 'H'
else:
curr_freq = 'D'
df_is_discord = pd.DataFrame(
columns=['is_discord'],
index=pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq=curr_freq)
)
df_is_discord['is_discord'] = np.where(
self.df_ks_test[ks_type] < threshold, 1, 0)
# prepare index and column for resulting dataframes
all_bdg = self.list_all_bdg.copy()
if forecast_out:
columns = all_bdg
else:
columns = [f'is_discord_{x}' for x in all_bdg]
# hand waving specialization (caution) of discords for all bdgs
for col in columns:
df_is_discord[col] = df_is_discord['is_discord']
df_is_discord = df_is_discord.drop(['is_discord'],
axis=1)
if (forecast_out & (not self.hourly)):
df_hourly_is_discord = pd.DataFrame(
index=pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq='H')
)
# copy daily dataframe to hourly dataframe
df_hourly_is_discord['day'] = df_hourly_is_discord.index.date
df_is_discord.index = df_is_discord.index.date
df_hourly_is_discord = df_hourly_is_discord.join(
df_is_discord,
on='day', how='left')
df_hourly_is_discord = df_hourly_is_discord.drop(['day'], axis=1)
df_result = df_hourly_is_discord.astype('int8')
else:
df_result = df_is_discord
if forecast_out:
df_result['timestamp'] = df_result.index
df_result = df_result.melt(id_vars=['timestamp'],
var_name='building_id',
value_name='is_discord')
return df_result
def plot_true_n_gmm(self, df_true_labels, df_ks_results=None, gmm=None):
"""
method does something
"""
if df_ks_results is None:
df_ks_results = self.df_ks_test
if gmm is None:
gmm = self.gm_model
df_true_labels_day = df_true_labels.groupby(df_true_labels.index.date).max()
df_ks_results_D = df_ks_results[['D']]
df_ks_results_D_spez = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels_day.columns)
for col in df_ks_results_D_spez.columns:
df_ks_results_D_spez[col] = df_ks_results_D['D']
assert (df_true_labels_day.shape == df_ks_results_D_spez.shape)
df_D_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels_day.columns)
df_D_non_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels_day.columns)
for col in df_D_discord.columns:
df_D_discord[col] = np.where(df_true_labels_day[col] == 1,
df_ks_results_D_spez[col],
math.nan)
df_D_non_discord[col] = np.where(df_true_labels_day[col] == 0,
df_ks_results_D_spez[col],
math.nan)
#### HERE THE PLOTTING BEGINNS ###
x_values = np.linspace(0, 1, 1000)
logprob = gmm.score_samples(x_values.reshape(-1, 1))
responsibilities = gmm.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
number_plot_rows = math.ceil(df_D_discord.shape[1]/2)
figure, axes = plt.subplots(nrows=number_plot_rows,
ncols=2,
figsize=(22, 4*number_plot_rows))
figure.patch.set_facecolor('white')
figure.subplots_adjust(top=0.97)
figure.suptitle(f'D-values of discord and non-discord days from \
site {self.site_id}',
fontsize=20)
next_odd = True
for num, df_col in enumerate(df_D_discord.columns):
if next_odd:
plot_col = 0
next_odd = False
else:
plot_col = 1
next_odd = True
plot_row_num = math.floor(num/2)
try:
axes[plot_row_num, plot_col].hist([df_D_non_discord[df_col], df_D_discord[df_col]],
100,
density=True,
histtype='stepfilled',
alpha=0.7,
label=['non-discord','discord'])
except AttributeError:
print('ooouw that hurts')
axes[plot_row_num,plot_col].plot(x_values, pdf, '-k')
axes[plot_row_num,plot_col].plot(x_values, pdf_individual)
axes[plot_row_num,plot_col].set_title(f'Information about {df_col}')
axes[plot_row_num,plot_col].legend(loc='upper right')
figure.savefig(f'img/D_visualization/{self.aldi_name}/site_{self.site_id}.png',
format='PNG')
plt.clf()
def plot_true_one_gmm(self,
df_true_labels,
agg_type=None,
gmm_data='D',
df_ks_results=None,
gmm=None):
"""
method creates a plot. Two histograms are shown on the plot.
The first histogram shows the distribution of the D-values of
the (true) discords. The second histogram shows the distribution
of the D-values of the (true) non-discords.
Furthermore, the components of the GMM are also visualized.
Parameters:
----------
df_true_labels: pandas.DataFrame , required
text
gmm_data : str , required
text
df_ks_results: pandas.DataFrame , optional
text
gmm: sklearn.mixture.GaussianMixture , optional
text
Returns:
----------
Method saves a plot.
"""
if df_ks_results is None:
df_ks_results = self.df_ks_test
if gmm is None:
gmm = self.gm_model
if agg_type is None:
path_prefix = f'img/D_visualization/{self.aldi_name}/'
else:
path_prefix = f'img/D_visualization/{self.aldi_name}/{agg_type}/'
assert (df_true_labels.shape[0] == df_ks_results.shape[0]), 'same length please'
df_ks_results_D = df_ks_results[[gmm_data]]
df_ks_results_D_spez = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels.columns)
for col in df_ks_results_D_spez.columns:
df_ks_results_D_spez[col] = df_ks_results_D[gmm_data]
assert (df_true_labels.shape == df_ks_results_D_spez.shape)
df_D_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels.columns)
df_D_non_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels.columns)
for col in df_D_discord.columns:
df_D_discord[col] = np.where(df_true_labels[col] == 1,
df_ks_results_D_spez[col],
math.nan)
df_D_non_discord[col] = np.where(df_true_labels[col] == 0,
df_ks_results_D_spez[col],
math.nan)
list_D_non_discord = df_D_non_discord.values.flatten()
list_D_discord = df_D_discord.values.flatten()
cleaned_list_D_non_discord = \
[x for x in list_D_non_discord if str(x) != 'nan']
cleaned_list_D_discord = \
[x for x in list_D_discord if str(x) != 'nan']
#### HERE THE PLOTTING BEGINNS ###
fontsize=22
# first # ONLY HISTOGRAMMS
x_values = np.linspace(0, 1, 1000)
logprob = gmm.score_samples(x_values.reshape(-1, 1))
responsibilities = gmm.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Histogram of the Distance Values from the KS Test'
f' (Site {self.site_id})',
fontsize=fontsize+4)
try:
axes.hist([cleaned_list_D_non_discord,
cleaned_list_D_discord],
100,
density=False,
histtype='stepfilled',
alpha=0.7,
range=(0,1),
stacked=True,
label=['non-discord','discord'])
except AttributeError:
print('ooouw that hurts')
#axes.plot(x_values, pdf, '-k')
#axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Frequency', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}site_{self.site_id}.png',
format='PNG')
plt.clf()
# second # COMBINED PLOT
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Histogram and Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=fontsize+4)
try:
axes.hist([cleaned_list_D_non_discord,
cleaned_list_D_discord],
100,
density=True,
histtype='stepfilled',
alpha=0.7,
range=(0,1),
stacked=True,
label=['non-discord','discord'])
except AttributeError:
print('ooouw that hurts')
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}density_site_{self.site_id}.png',
format='PNG')
plt.clf()
# third # ONLY GMM
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=20)
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}gmm_site_{self.site_id}.png',
format='PNG')
plt.clf()
plt.close('all')
# third # GMM + unlabeled Histo
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=fontsize+4)
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}gmm_site_{self.site_id}.png',
format='PNG')
plt.clf()
# forth # COMBINED PLOT (UNLABELED)
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Histogram and Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=fontsize+4)
try:
axes.hist((cleaned_list_D_non_discord + cleaned_list_D_discord),
100,
density=True,
histtype='stepfilled',
alpha=0.7,
range=(0,1),
stacked=True)
except AttributeError:
print('ooouw that hurts')
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}unlabeled_site_{self.site_id}.png',
format='PNG')
plt.clf()
plt.close('all')
def plot_common_pD( self,
df_true_labels,
agg_type='',
df_ks_results=None):
"""
method does something
Parameters:
----------
df_true_labels: pandas.DataFrame , required
text
agg_type: string , optional
text
df_ks_results: pandas.DataFrame , optional
text
gmm: sklearn.mixture.GaussianMixture , optional
text
Returns:
----------
Method saves a plot.
"""
if df_ks_results is None:
df_ks_results = self.df_ks_test
# columns = ['D','p']
# index = timestamps, DatetimeIndex, either hourly or daily
if agg_type is None:
path_prefix = f'img/D_visualization/{self.aldi_name}/'
else:
path_prefix = f'img/D_visualization/{self.aldi_name}/{agg_type}/'
assert (df_true_labels.shape[0] == df_ks_results.shape[0]), 'same length please'
df_all_dat = pd.DataFrame(columns=['date', 'D', 'p', 'label'])
# build one dataframe with following structure:
# columsn = ['date', 'D', 'p', 'label']
# index = range(N) (N = number of ks-results
# * number of buildings within the site)
# dataframe units all labesl & KS results within single columns
for label_col in df_true_labels.columns:
# Prepare true label df
df_label_tmp = df_true_labels[[label_col]].copy()
df_label_tmp['date'] = df_label_tmp.index
df_label_tmp = df_label_tmp.reset_index(drop=True)
# Prepare KS test result
df_ks_tmp = df_ks_results.copy()
df_ks_tmp['date'] = df_ks_tmp.index
df_ks_tmp['date'] = df_ks_tmp['date'].dt.date
df_ks_tmp = df_ks_tmp.reset_index(drop=True)
df_both_tmp = df_ks_tmp.merge(df_label_tmp, how='inner', on='date')
df_both_tmp = df_both_tmp.rename(columns={label_col: 'label'})
df_all_dat = df_all_dat.append(df_both_tmp, ignore_index=True)
# Create 2D plots
self._creat_single_pD_2D(df_all_dat, path_prefix)
self._creat_common_pD_2D(df_all_dat, path_prefix)
# Create 3D plots
self._creat_pD_3D(df_all_dat, path_prefix)
plt.close('all')
####################################################################
# ||| Support methods for access from outside ||| #
# VVV VVV #
####################################################################
def _creat_single_pD_2D(self, df_all_dat, path_prefix):
# Data preparation
df_ks_true_discord = df_all_dat.query('label == 1')
df_ks_true_non_discord = df_all_dat.query('label == 0')
# set plotting parameters
colors = ['red', 'blue']
markers = ['o', '^']
labels = ['true_discord', 'true_non_discord']
#### FIRST PLOT: DISCORD SCATTER
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,
title=(f'Joint visualisation (2D) of D and p values; '
f'only discords (site {self.site_id})'))
ax.set_xlabel('D-value')
ax.set_ylabel('p-value')
ax.set_xlim(0, 1) #D
ax.set_ylim(0, 1) #p
scatter_dis = ax.scatter(x=df_ks_true_discord['D'],
y=df_ks_true_discord['p'],
color=colors[0],
alpha=0.3,
marker=markers[0])
ax.legend( [scatter_dis],
[labels[0]],
numpoints = 1)
fig.savefig(f'{path_prefix}pD_2D_discord_site_{self.site_id}.png',
format='PNG')
plt.clf()
#### SECOND PLOT: NON DISCORD SCATTER
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,
title=(f'Joint visualisation (2D) of D and p values; '
f'only non-discords (site {self.site_id})'))
ax.set_xlabel('D-value')
ax.set_ylabel('p-value')
ax.set_xlim(0, 1) #D
ax.set_ylim(0, 1) #p
scatter_non_dis = ax.scatter(x=df_ks_true_non_discord['D'],
y=df_ks_true_non_discord['p'],
color=colors[1],
alpha=0.3,
marker=markers[1])
ax.legend( [scatter_non_dis],
[labels[1]],
numpoints = 1)
fig.savefig(f'{path_prefix}pD_2D_non_discord_site_{self.site_id}.png',
format='PNG')
plt.clf()
def _creat_common_pD_2D(self, df_all_dat, path_prefix):
# Data preparation
df_ks_true_discord = df_all_dat.query('label == 1')
df_ks_true_non_discord = df_all_dat.query('label == 0')
# set plotting parameters
colors = ['red', 'blue']
markers = ['o', '^']
labels = ['true_discord', 'true_non_discord']
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,
title=(f'Joint visualisation (2D) of D and p values '
f'(site {self.site_id})'))
ax.set_xlabel('D-value')
ax.set_ylabel('p-value')
ax.set_xlim(0, 1) #D
ax.set_ylim(0, 1) #p
scatter_dis = ax.scatter(x=df_ks_true_discord['D'],
y=df_ks_true_discord['p'],
color=colors[0],
alpha=0.3,
marker=markers[0])
scatter_non_dis = ax.scatter(x=df_ks_true_non_discord['D'],
y=df_ks_true_non_discord['p'],
color=colors[1],
alpha=0.3,
marker=markers[1])
ax.legend( [scatter_dis, scatter_non_dis],
[labels[0], labels[1]],
numpoints = 1)
fig.savefig(f'{path_prefix}pD_2D_site_{self.site_id}.png',
format='PNG')
plt.clf()
def _creat_pD_3D(self, df_all_dat, path_prefix):
# Data preparation
df_ks_true_discord = df_all_dat.query('label == 1')
df_ks_true_non_discord = df_all_dat.query('label == 0')
# set plotting parameters
scale_x = 1 # true discord labels
scale_y = 3 # D value
scale_z = 3 # p value
colors = ['red', 'blue']
markers = ['o', '^']
labels = ['true_discord', 'true_non_discord']
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(111,
title=(f'Joint visualisation of D and p '
f'values (site {self.site_id})'),
projection='3d')
ax.set_xlim(0, 1)
ax.set_xticks([0,1]) # true discord labels
ax.set_ylim(0, 1) # D value
ax.set_zlim(0, 1) # p value
ax.set_xlabel('Discord Label')
ax.set_ylabel('D-value')
ax.set_zlabel('p-value')
# scale the plot if wanted - did not look quite good
#ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([scale_x, scale_y, scale_z, 2]))
# create discord scatter
ax.scatter(ys=df_ks_true_discord['D'],
zs=df_ks_true_discord['p'],
xs=df_ks_true_discord['label'],
color=colors[0],
alpha=0.3,
marker=markers[0])
# create non discord scatter
ax.scatter(ys=df_ks_true_non_discord['D'],
zs=df_ks_true_non_discord['p'],
xs=df_ks_true_non_discord['label'],
color=colors[1],
alpha=0.3,
marker=markers[1])
# Add legend - need some hidden plots -.-
scatter1_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=colors[0], marker = markers[0])
scatter2_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=colors[1], marker = markers[1])
ax.legend([scatter1_proxy, scatter2_proxy], [labels[0], labels[1]], numpoints = 1)
fig.savefig(f'{path_prefix}pD_3D_site_{self.site_id}.png',
format='PNG')
plt.clf()
def _gmm_train(self):
"""
trains several GM models based on the given data (train_data)
and returns the best one (evaluated by AIC) (best_gmm)
Also returns a dataframe with a summary of the different
GMM components (gmm_components)
"""
train_data = self._data_for_gmm_training()
y_values = np.array([[val] for val in train_data])
N = np.arange(1, (self.gmm_max_comp + 1))
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GaussianMixture(N[i]).fit(y_values)
AIC = [m.aic(y_values) for m in models]
#BIC = [m.bic(y_values) for m in models]
best_gmm = models[np.argmin(AIC)]
gmm_components = pd.DataFrame(columns=['component',
'gauss_mean',
'gauss_covariance'])
gmm_components['component'] = list(range(0, best_gmm.n_components))
gmm_components['gauss_mean'] = best_gmm.means_
gmm_components['gauss_covariance'] = best_gmm.covariances_.reshape(-1,1)
if self.verbose:
print(f'calculated GMM')
print(f'components:\n {gmm_components}')
return best_gmm, gmm_components
def _plot_gmm_results(self, gmm):
"""
Method prepares a plot. On it you can see the PDF (Probability
density function) trained by the given GMM (black line). In
addition, the profiles of the individual components of the GMM
are displayed (colored lines).
If the original data on which the GMM was trained are also
given, a histogram is shown in the background.
"""
x_values = np.linspace(0, 1, 1000)
y_values = self._data_for_gmm_training()
logprob = gmm.score_samples(x_values.reshape(-1, 1))
responsibilities = gmm.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
figure, axes = plt.subplots(1, 1, figsize=(20, 10))
figure.patch.set_facecolor('white')
axes.set_title(f'Trained GMM on {self.gmm_data}-values from site {self.site_id}',
fontsize=18)
axes.hist(y_values, 100, density=True,
histtype='stepfilled', alpha=0.4)
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
figure.savefig(f'img/pD_evaluation/{self.gmm_data}-value_aialdi_gmm_s{self.site_id}_m{self.meter_id}_data-{self.gmm_data}.png',
format='PNG')
plt.clf()
def _data_for_gmm_training(self):
if self.gmm_data == 'D':
y_values = self.df_ks_test.D
else:
y_values = self.df_ks_test.p
return y_values
| 43,203 | 36.865031 | 135 | py |
aldiplusplus | aldiplusplus-main/utils.py | import os
import time
import pickle
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib
import datetime
from datetime import datetime
from contextlib import contextmanager, redirect_stdout
from functools import partial
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from datetime import timedelta
import matplotlib.pyplot as plt
def save_variable(file_name, variable):
pickle.dump(variable, open(file_name + ".pickle", "wb"))
def load_variable(filename):
with open(filename + ".pickle", "rb") as f:
return pickle.load(f)
def zero_is_discord(df_site):
"""Flag all 0 readings as outliers. Only for electricity meter"""
df_hourly_is_discord = pd.DataFrame(index=df_site.index)
all_bdg = df_site.columns
columns = [f"is_discord_{x}" for x in all_bdg]
# replace NaN with 0
df_site = df_site.replace(0, np.nan)
# hand waving specialization (caution) of discords for all bdgs
for col, bdg in zip(columns, all_bdg):
df_hourly_is_discord[col] = np.where(df_site[bdg] == 0, 1, 0)
return df_hourly_is_discord
def get_data_chunks(dataframe, folds=12):
"""
Function that splits the current dataframe into "k" folds of data where
1 chunk is seen as train set and k-1 folds as test set.
Initially it is assume the "k" represents the number of months since the
time window of "dataframe" is one year.
This function also performs min-max scaling to the data.
Keyword arguments:
dataframe -- DataFrame with 24 features as hourly readings and "n" buildings
with a DateIndex
folds -- Int specifiying the number of folds required
Returns:
train_k_folds -- list with "k" elements where each elements is the respective
train dataframe for the current fold
test_k_folds -- list with "k" elements where each elements is the respective
test dataframe for the current fold
"""
df = dataframe.iloc[:, 0:-1].copy()
scaler = MinMaxScaler()
scaler.fit(df)
df_scaled = pd.DataFrame(scaler.transform(df), index=df.index)
df_scaled["building_id"] = dataframe["building_id"]
train_k_folds = []
test_k_folds = []
for k in range(1, folds + 1):
if folds == 12:
train_k = df_scaled[df_scaled.index.month == k]
test_k = df_scaled[df_scaled.index.month != k]
elif folds == 6 or folds == 4 or folds == 3 or folds == 2 :
months = list(range(1+(k-1)*int(12/folds), k*int(12/folds) + 1))
train_k = df_scaled[df_scaled.index.month.isin(months)]
test_k = df_scaled[~df_scaled.index.month.isin(months)]
else:
print("Sorry, that number hasn't been implemented yet")
return
# months should not overlap, intersection has to be an empty set
assert set.intersection(set(train_k.index.month), set(test_k.index.month)) == set()
train_k_folds.append(train_k)
test_k_folds.append(test_k)
return train_k_folds, test_k_folds, scaler
def create_daily_plots_for_single_bldg(self,
bldg_id,
df_bldg_meter,
df_ks_test_results,
list_days):
'''
'''
matplotlib.use('TkAgg')
print(f'Started plotting for building {bldg_id}')
# define standard scaler
std_scaler = StandardScaler()
df_curr_building = df_bldg_meter.copy()
#list_all_days = pd.to_datetime(df_ks_test_results['timestamp']).dt.date
save_path = f'img/daily_plots/bldg_{str(bldg_id).zfill(4)}'
os.makedirs(save_path, exist_ok=True)
for single_day in list_days:
curr_row_ks_test_df = df_ks_test_results[ (df_ks_test_results['timestamp'] == single_day.strftime('%Y-%m-%d'))
& (df_ks_test_results['building_id'] == bldg_id)].index[0]
current_D_val = df_ks_test_results.loc[curr_row_ks_test_df, 'D']
current_D_val = round(current_D_val, 6)
current_p_val = df_ks_test_results.loc[curr_row_ks_test_df, 'p']
current_p_val = round(current_p_val, 6)
### PREPARE PLOTTING DATA
df_curr_building_curr_day = df_curr_building[
(df_curr_building['timestamp'] >= single_day.strftime('%Y-%m-%d'))
& (df_curr_building['timestamp'] < (single_day + timedelta(days=1)).strftime('%Y-%m-%d'))].copy()
if df_curr_building_curr_day.empty:
continue
df_curr_building_curr_day['norm_meter_reading'] = \
std_scaler.fit_transform(df_curr_building_curr_day[['meter_reading']])
### START PLOTTING
fig,ax=plt.subplots()
fig.patch.set_facecolor('white')
df_curr_building_curr_day.plot(y='meter_reading',
x='timestamp',
ax=ax,
kind='line',
figsize=(20,5),
legend=False,
title=('Raw / Normalised load profile '
f'of bldg {bldg_id} '
f'on day {single_day}'),
color="red")
ax.set_ylabel('Raw load profile', fontsize=12, color="red")
ax2=ax.twinx()
df_curr_building_curr_day.plot(y='norm_meter_reading',
x='timestamp',
ax=ax2,
kind='line',
figsize=(18,5),
legend=False,
color='blue')
ax2.set_ylabel('Normalised load profile', fontsize=12, color='blue')
complete_path = f'{save_path}/{single_day.strftime("%Y-%m-%d")}_D-{current_D_val}_p-{current_p_val}.png'
fig.savefig(complete_path,
format='png',
dpi=50)
plt.close('all')
df_ks_test_results.loc[curr_row_ks_test_df, 'plot_path'] = complete_path
def forecasting_barplot(
dict_algo,
metric='rmsle',
plot_name='forecasting',
x_labels=['ALDI', 'ALDI++'],
figsize=(16,32),
ylim=(2,3),
fontsize=40
):
"""Plot the chosen forecasting RMSE based on different discord detectors"""
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x=list(dict_algo.keys()),
y=list(dict_algo.values()),
orient='v', ax=ax
)
ax.set_ylabel("RMSLE", fontsize=fontsize)
ax.set_xlabel("Discord detectors", fontsize=fontsize)
ax.set_xticklabels(x_labels)
ax.tick_params(length=20, direction="inout", labelsize=fontsize)
plt.ylim(ylim)
plt.hlines(
xmin=0 - 0.5,
xmax=len(list(dict_algo.keys()))-0.5,
y=list(dict_algo.values())[0],
colors='r',
linewidth=3 # vertical line at position 0
)
plt.tight_layout()
fig.savefig(f'img/barplot_comparison-{plot_name}.png', format='PNG')
def forecasting_bubble(
dict_algo,
plot_name='forecasting',
y_labels=['ALDI', 'ALDI++'],
figsize=(16,32),
xlim=(2,3),
fontsize=40
):
"""Plot the chosen forecasting RMSE based on different discord detectors"""
# prepare dataframe
df_discord_detectors = pd.DataFrame.from_dict(dict_algo, orient='index')
fig, _ = plt.subplots(1, 1, figsize=figsize)
ax = sns.scatterplot(
data=df_discord_detectors,
x="rmsle",
y=df_discord_detectors.index,
size="time",
alpha=1,
sizes={
1: 20,
8: 80,
32: 320,
40: 400,
480: 4800,
},
size_order=[8, 40, 480],
clip_on=False
)
ax.set_xlabel("RMSLE", fontsize=fontsize)
ax.set_ylabel("Discords labeled by", fontsize=fontsize)
ax.set_yticklabels(y_labels)
ax.tick_params(length=20, direction="inout", labelsize=fontsize)
ax.legend(
title="Computation \n time (min)",
title_fontsize=fontsize-3,
fontsize=fontsize-3,
frameon=False,
bbox_to_anchor=(1, 0.85),
ncol=1,
)
ax.margins(y=0.1)
plt.grid()
# ax.set(frame_on=False)
plt.xlim(xlim)
plt.tight_layout()
fig.savefig(f'img/bubbleplot_comparison-{plot_name}.png', format='PNG')
###############################################################################
# functions from here on are taken from:
# https://github.com/buds-lab/ashrae-great-energy-predictor-3-solution-analysis/blob/master/solutions/rank-1/ashrae/utils.py
@contextmanager
def timer(name):
print(f"{datetime.now()} - [{name}] ...")
t0 = time.time()
yield
print(f"{datetime.now()} - [{name}] done in {time.time() - t0:.0f} s\n")
def reduce_mem_usage(df, skip_cols=[], verbose=False):
"""Reduce memory usage in a pandas dataframe
Based on this great kernel:
https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65
"""
start_mem_usg = df.memory_usage().sum() / 1024 ** 2
print("Memory usage of properties dataframe is :", start_mem_usg, " MB")
NAlist = [] # Keeps track of columns that have missing values filled in.
for col in np.setdiff1d(df.columns, skip_cols):
if df[col].dtype != object: # Exclude strings
# print column type
if verbose:
print("******************************")
print("Column: ", col)
print("dtype before: ", df[col].dtype)
# make variables for Int, max and min
IsInt = False
mx = df[col].max()
mn = df[col].min()
if verbose:
print("min for this col: ", mn)
print("max for this col: ", mx)
# Integer does not support NA, therefore, NA needs to be filled
if not np.isfinite(df[col]).all():
NAlist.append(col)
df[col].fillna(mn - 1, inplace=True)
# test if column can be converted to an integer
asint = df[col].fillna(0).astype(np.int64)
result = df[col] - asint
result = result.sum()
if result > -0.01 and result < 0.01:
IsInt = True
# Make Integer/unsigned Integer datatypes
if IsInt:
if mn >= 0:
if mx < 255:
df[col] = df[col].astype(np.uint8)
elif mx < 65535:
df[col] = df[col].astype(np.uint16)
elif mx < 4294967295:
df[col] = df[col].astype(np.uint32)
else:
df[col] = df[col].astype(np.uint64)
else:
if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
# Make float datatypes 32 bit
else:
df[col] = df[col].astype(np.float32)
if verbose:
print("dtype after: ", df[col].dtype)
print("******************************")
# Print final result
if verbose:
print("___MEMORY USAGE AFTER COMPLETION:___")
mem_usg = df.memory_usage().sum() / 1024 ** 2
print("Memory usage is: ", mem_usg, " MB")
print("This is ", 100 * mem_usg / start_mem_usg, "% of the initial size")
return df, NAlist
def load_data(
data_name,
algorithm="baseline",
data_location="data",
discord_location="data/outliers",
discord_file="bad_meter_readings.csv",
output_location="data/preprocessed",
):
"""Loads and formats data"""
# raw
if data_name == "train":
return pd.read_csv(f"{data_location}/train.csv")
if data_name == "test":
return pd.read_csv(f"{data_location}/test.csv")
if data_name == "input":
return load_data("train", data_location=data_location), load_data(
"test", data_location=data_location
)
# clean
if data_name == "train_clean":
return pd.read_pickle(f"{output_location}/train_clean_{algorithm}.pkl")
if data_name == "test_clean":
return pd.read_pickle(f"{output_location}/test_clean_{algorithm}.pkl")
if data_name == "clean":
return (
load_data(
"train_clean", output_location=output_location, algorithm=algorithm
),
load_data(
"test_clean", output_location=output_location, algorithm=algorithm
),
)
# # debug 1000
# if data_name == "train_clean_debug_1000":
# return pd.read_pickle("data/preprocessed/train_clean_debug_1000.pkl")
# if data_name == "test_clean_debug_1000":
# return pd.read_pickle("data/preprocessed/test_clean_debug_1000.pkl")
# if data_name == "clean_debug_1000":
# return load_data("train_clean_debug_1000"), load_data("test_clean_debug_1000")
# if data_name == "leak_debug_1000":
# return pd.read_pickle("data/preprocessed/leak_debug_1000.pkl")
# # debug 10000
# if data_name == "train_clean_debug_10000":
# return pd.read_pickle("data/preprocessed/train_clean_debug_10000.pkl")
# if data_name == "test_clean_debug_10000":
# return pd.read_pickle("data/preprocessed/test_clean_debug_10000.pkl")
# if data_name == "clean_debug_10000":
# return load_data("train_clean_debug_10000"), load_data("test_clean_debug_10000")
# if data_name == "leak_debug_10000":
# return pd.read_pickle("data/preprocessed/leak_debug_10000.pkl")
# raw weather
if data_name == "train_weather":
return pd.read_csv(f"{data_location}/weather_train.csv")
if data_name == "test_weather":
return pd.read_csv(f"{data_location}/weather_test.csv")
if data_name == "weather":
return load_data("train_weather", data_location=data_location), load_data(
"test_weather", data_location=data_location
)
# discord/outliers (rows to drop)
if data_name == "discord":
return pd.read_csv(f"{discord_location}/{discord_file}")
# meta
if data_name == "meta":
return pd.read_csv(f"{data_location}/building_metadata.csv")
# submissions
if data_name == "sample_submission":
return pd.read_csv(f"{data_location}/sample_submission.csv")
class Logger(object):
"""Save a string line(s) to a file."""
def __init__(self, file_path, mode="w", verbose=False):
self.file_path = file_path
self.verbose = verbose
open(file_path, mode=mode)
def append(self, line, print_line=None):
if print_line or self.verbose:
print(line)
with open(self.file_path, "a") as f:
with redirect_stdout(f):
print(line)
def make_dir(dir_name):
"""Create a directory if it doesn"t already exist"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def get_validation_months(n_months):
validation_months_list = [
np.arange(i + 1, i + 2 + n_months - 1)
for shift in range(n_months)
for i in range(shift, 12 + shift, n_months)
]
validation_months_list = [(x - 1) % 12 + 1 for x in validation_months_list]
return validation_months_list
def get_daily_resolution(df_hourly_data, agg_method="logic_or"):
"""
The method takes a dataframe with an hourly resolution and
transforms it into a dataframe with a daily resolution. All data
points of a day are aggregated with an aggregation function.
Parameters
-----------
df_hourly_data: pandas.DataFrame, required
Dataframe with is_discord information (coded in 0: non-discord
and 1: discord) per building with hourly resolution. The index
consists of hourly timestamps. The columns identify the
individual buildings with the following scheme
'is_discord_{bldg_id}'.
agg_method: str, optional
String describing the function to be used to aggregate the
24 values per day. Default is a logic_OR, i.e. if one hour of a
day is marked as discord, the whole day is considered as
Discord.
Returns
-----------
df_daily_data: pandas.DataFrame
Dataframe with is_discord information (coded in 0: non-discord
and 1: discord) per building with daily resolution. The index
consists of daily timestamps. The columns identify the
individual buildings with the following scheme
'is_discord_{bldg_id}'.
"""
# STEP 1 - CALCULATE DAILY SUMS
df_step_1 = df_hourly_data.groupby(df_hourly_data.index.date).sum()
# STEP 2 - MARKING DISCORD
discord_cond = {
"logic_or": (df_step_1 >= 1),
"logic_and": (df_step_1 == 24),
"majority": (df_step_1 >= 12),
"majority_plus": (df_step_1 >= 15),
}
assert agg_method in discord_cond.keys()
discord_agg_func = discord_cond.get(agg_method)
df_step_2 = df_step_1.where(~discord_agg_func, other=1)
# STEP 3 - MARKING NON-DISCORD
non_discord_cond = {
"fill_zero": (df_step_2 != 1),
}
non_discord_agg_func = non_discord_cond.get("fill_zero")
df_step_3 = df_step_2.where(~non_discord_agg_func, other=0)
# STEP 4 - FINALIZATION
df_daily_data = df_step_3.astype("int8")
return df_daily_data
def rmsle(x, y):
x = np.log1p(x)
y = np.log1p(y)
return np.sqrt(mean_squared_error(x, y))
class GeneralizedMeanBlender:
"""Combines multiple predictions using generalized mean"""
def __init__(self, p_range=(0, 1), random_state=42):
"""
Args:
p_range: Range of the power in the generalized mean. Defalut is (0,2).
random_state: Seed for the random number generator.
Returns: GeneralizedMeanBlender object
"""
self.p_range = p_range
self.random_state = random_state
self.p = None
self.c = None
self.weights = None
def _objective(self, trial, X, y):
# create hyperparameters
p = trial.suggest_uniform(f"p", *self.p_range)
c = trial.suggest_uniform(f"c", 0.95, 1.05)
weights = [trial.suggest_uniform(f"w{i}", 0, 1) for i in range(X.shape[1])]
# blend predictions
blend_preds, total_weight = 0, 0
if p == 0:
for j, w in enumerate(weights):
blend_preds += w * np.log1p(X[:, j])
total_weight += w
blend_preds = c * np.expm1(blend_preds / total_weight)
else:
for j, w in enumerate(weights):
blend_preds += w * X[:, j] ** p
total_weight += w
blend_preds = c * (blend_preds / total_weight) ** (1 / p)
# calculate mean squared error
return np.sqrt(mean_squared_error(y, blend_preds))
def fit(self, X, y, n_trials=10):
# optimize objective
obj = partial(self._objective, X=X, y=y)
sampler = optuna.samplers.TPESampler(seed=self.random_state)
study = optuna.create_study(sampler=sampler)
study.optimize(obj, n_trials=n_trials)
# extract best weights
if self.p is None:
self.p = [v for k, v in study.best_params.items() if "p" in k][0]
self.c = [v for k, v in study.best_params.items() if "c" in k][0]
self.weights = np.array([v for k, v in study.best_params.items() if "w" in k])
self.weights /= self.weights.sum()
def transform(self, X):
assert (
self.weights is not None and self.p is not None
), "Must call fit method before transform"
if self.p == 0:
return self.c * np.expm1(np.dot(np.log1p(X), self.weights))
else:
return self.c * np.dot(X ** self.p, self.weights) ** (1 / self.p)
def fit_transform(self, X, y, **kwargs):
self.fit(X, y, **kwargs)
return self.transform(X)
| 20,883 | 34.336717 | 124 | py |
aldiplusplus | aldiplusplus-main/vae.py | import torch
from torch import nn
from torch.utils.data import DataLoader
class VAE(nn.Module):
def __init__(self, num_input, latent_dim, hidden_size=[300, 200, 100]):
super().__init__()
self.latent_dim = latent_dim
self.num_input = num_input
self.encoder = nn.Sequential(
nn.Linear(num_input, hidden_size[0]),
nn.Tanh(),
nn.Linear(hidden_size[0], hidden_size[1]),
nn.Tanh(),
nn.Linear(hidden_size[1], hidden_size[2]),
nn.Tanh(),
nn.Linear(hidden_size[2], latent_dim),
nn.Tanh(),
)
self.mu = nn.Linear(latent_dim, latent_dim)
self.log_var= nn.Linear(latent_dim, latent_dim)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, hidden_size[2]),
nn.Tanh(),
nn.Linear(hidden_size[2], hidden_size[1]),
nn.Tanh(),
nn.Linear(hidden_size[1], hidden_size[0]),
nn.Tanh(),
nn.Linear(hidden_size[0], num_input),
nn.Tanh(),
)
def reparameterize(self, mu, log_var):
"""Reparameterization trick for backprop"""
if self.training:
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return eps*std + mu
return mu
def encode(self, x):
"""Transform input into latent dimension"""
hidden = self.encoder(x)
mu = self.mu(hidden)
log_var = self.log_var(hidden)
return mu, log_var
def forward(self, x):
mu, log_var = self.encode(x)
z = self.reparameterize(mu, log_var)
return self.decoder(z), mu, log_var
def loss_function(self, x_hat, x, mu, log_var, beta=1):
kl_loss = 0.5 * torch.sum(torch.exp(log_var) - log_var - 1 + mu**2)
mse = nn.MSELoss() # reconstruction loss
recon_loss = mse(x_hat, x)
return recon_loss + beta * kl_loss
| 1,984 | 30.015625 | 75 | py |
aldiplusplus | aldiplusplus-main/aldi_evaluation_metrics.py | from functools import reduce
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
class AldiEvaluationMetrics():
"""
Provides various metrics for the evaluation of discord detectors
"""
def get_roc_auc(self, df_true, df_pred):
"""
Calculates column-wise the accuracy of two datasframes
(same shape) with the same column names.
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
labels --
Returns:
df_accuracies -- dataframe with the column-wise accuracies
- index are the column names of the
input dataframe
- column is 'accuracy'
"""
assert df_true.shape == df_pred.shape, (
"the dataframes must have the same shape")
df_roc_auc = pd.DataFrame(columns=['roc_auc'])
# in order to avoid buildings where all `y_true` are either 0 or 1,
# the entire site is evaluated as a whole
try:
df_roc_auc = roc_auc_score(
df_true.values.ravel(), df_pred.values.ravel())
except ValueError as v_error:
print(f'ValueError w/ msg {v_error.message}')
df_roc_auc = 0
return df_roc_auc
def get_accuracy(self, df_true, df_pred):
"""
Calculates column-wise the accuracy of two datasframes
(same shape) with the same column names.
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
Returns:
df_accuracies -- dataframe with the column-wise accuracies
- index are the column names of the
input dataframe
- column is 'accuracy'
"""
assert df_true.shape == df_pred.shape, (
"the dataframes must have the same shape")
df_accuracies = pd.DataFrame(index=df_true.columns,
columns=['accuracy'])
for entry in df_true.columns:
single_accuracy = accuracy_score(df_true[entry], df_pred[entry])
df_accuracies.at[entry, 'accuracy'] = single_accuracy
return df_accuracies
def get_heatmap(
self,
list_metric,
list_sites,
aldi_impl,
metric='roc_auc',
meter_type=0,
p_value=0.01
):
"""
Calculates a site-level accuracy heatmap
Keyword arguments:
list_metric -- list with all the performance metric values (e.g., roc_auc, accuracy)
list_sites -- list with all sites
aldi_impl -- string with the algorithm name
metric -- string of chosen metric (e.g., 'roc_auc', 'accuracy')
meter_type -- int of chosen meter
p_value -- float of chosen p-value used for K-S test
"""
df_all_metrics = pd.DataFrame(
{'site_id': list_sites}).set_index('site_id')
# `roc_auc` doesn't analyze each building individually, it stores the
# value for the entire site
if metric == 'roc_auc':
df_all_metrics[aldi_impl] = [list_metric[site_id]
for site_id in list_sites]
else:
df_all_metrics[aldi_impl] = [list_metric[site_id]
[metric].mean() for site_id in list_sites]
df_all_metrics.to_csv(
f'data/results/{metric}_ai-{aldi_impl}_p{p_value}_m{meter_type}.csv')
plt.title(f'{metric} of the different discord detectors', fontsize=18)
fig = sns.heatmap(df_all_metrics, vmin=0, vmax=1,
cmap='YlGnBu').get_figure()
fig.savefig(
f'img/{metric}_heatmap_ai-{aldi_impl}_p{p_value}_m{meter_type}.png', format='PNG')
plt.show()
def get_heatmap_comparison(
self,
list_aldi_impl,
list_sites,
dict_meter_type,
dict_p_value,
metric='roc_auc',
plot_name='baselines',
fontsize=20
):
"""
Compares the accuracy of different ALDI implementations in a heatmap.
Dictionary arguments have their respective 'aldi_impl' as key.
Keyword arguments:
list_aldi_impl -- list with strings of algorithms names
list_sites -- list with all sites common for
dict_meter_type -- list with int of chosen meter (values)
dict_p_value -- list with float of chosen p-value used for K-S test (values)
"""
list_metric = []
for aldi_impl in list_aldi_impl:
p_value = dict_p_value[aldi_impl]
meter_type = dict_meter_type[aldi_impl]
list_metric.append(pd.read_csv(f'data/results/{metric}_ai-{aldi_impl}_p{p_value}_m{meter_type}.csv',
index_col=0))
df_metric = pd.concat(list_metric, axis=1)
fig, ax = plt.subplots(figsize=(16, 16))
sns.heatmap(df_acc[list_aldi_impl],
cmap='YlGnBu', vmin=0, vmax=1, ax=ax)
if metric == 'roc_auc':
metric_str = 'ROC-AUC'
else:
metric_str = metric
ax.set_title(f"{metric_str} on Electricity meters",
fontsize=fontsize * 2)
ax.set_xlabel("Discord detectors", fontsize=fontsize * 2)
ax.set_ylabel("Site ID", fontsize=fontsize * 2)
ax.tick_params(labelsize=fontsize)
cax = plt.gcf().axes[-1]
cax.tick_params(labelsize=fontsize * 2)
plt.xticks(rotation=90)
plt.tight_layout()
fig.savefig(f'img/{metric}_heatmap_{plot_name}.png', format='PNG')
def get_class_report(
self,
df_true,
df_pred,
aldi_impl,
level_name,
meter_type=0,
figsize=(10, 10),
fontsize=40,
path=''
):
"""
Calculates the classification report and matrix based on two
dataframes
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
aldi_impl -- string with the algorithm name
level_name -- string with the level of comparison (e.g., all, site_id, building_id)
meter_type -- int of chosen meter
path -- string with relative path
Returns:
cf_report -- classification report generated through scitkit-learn
"""
vector_true = df_true.values.ravel()
vector_pred = df_pred.values.ravel()
cm = confusion_matrix(vector_true, vector_pred,
labels=np.unique(vector_true))
cf_report = classification_report(vector_true, vector_pred)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
#annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
annot[i, j] = '%.1f%%' % (p)
elif c == 0:
annot[i, j] = ''
else:
#annot[i, j] = '%.1f%%\n%d' % (p, c)
annot[i, j] = '%.1f%%' % (p)
cm_perc = pd.DataFrame(cm_perc, index=np.unique(
vector_true), columns=np.unique(vector_true))
cm_perc.index.name = 'Actual'
cm_perc.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm_perc,
cmap="YlGnBu",
annot=annot,
vmin=0,
vmax=100,
fmt='',
ax=ax,
annot_kws={"fontsize": fontsize})
# ax.set_title(f'Confusion matrix aldi implementation\n{aldi_impl} site {level_name}',
# fontsize=fontsize+4)
ax.set_xlabel("Predicted", fontsize=fontsize)
ax.set_ylabel("Actual", fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
cax = plt.gcf().axes[-1]
cax.tick_params(labelsize=fontsize)
if path == '':
fig.savefig(f'img/classification_report_ai-{aldi_impl}_{level_name}_m{meter_type}.png',
format='PNG')
else:
fig.savefig(f'{path}/confusion_matrix_{aldi_impl}_{level_name}.png',
format='PNG')
plt.clf()
return cf_report
def accuracy_barplot( # TODO: finish
self,
list_aldi_impl,
list_sites,
dict_meter_type,
dict_p_value,
plot_name='baselines',
fontsize=20
):
"""Plot accuracies of different models"""
| 9,212 | 33.897727 | 112 | py |
aldiplusplus | aldiplusplus-main/prepare_predictions.py | import os
import argparse
import glob
import numpy as np
import pandas as pd
from functools import partial
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from utils import (
load_data,
rmsle,
timer,
GeneralizedMeanBlender
)
parser = argparse.ArgumentParser(description="")
parser.add_argument("--file", help="Configuration file")
if __name__ == "__main__":
args = parser.parse_args()
# load config file from CLI
with open(str(args.file), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
output_location = config["output_location"]
MODEL_LIST = [
f"output/{algorithm}/lgb-split_meter-no_normalization.npy",
]
# load test data
with timer("load test data"):
test = load_data("test_clean", algorithm=algorithm, output_location=output_location)
# load predictions
with timer("load predictions"):
preds_matrix = [np.load(x) for x in MODEL_LIST if ".npy" in x]
preds_matrix = np.vstack(preds_matrix).T
# preds_matrix[preds_matrix < 0] = 0
# blend predictions
with timer("blend predictions"):
gmb = GeneralizedMeanBlender()
gmb.p = 0.11375872112626925
gmb.c = 0.99817730007820798
gmb.weights = [1]
test_preds = 0.99576627605010293*np.expm1(gmb.transform(np.log1p(preds_matrix)))
# create submission
with timer("create submission"):
subm = load_data("sample_submission", data_location=data_location)
subm.loc[test.meter == 0, "meter_reading"] = test_preds
subm.loc[subm.meter_reading < 0, "meter_reading"] = 0
# save data
with timer("save data"):
subm.to_csv(f"output/{algorithm}/final_submission.csv", index=False)
| 1,835 | 28.142857 | 92 | py |
aldiplusplus | aldiplusplus-main/data_import_ashrae.py | import numpy as np
import pandas as pd
class DataImportAshrae():
"""
class provides different methods to import BDG2 data
for experiments with Discord Detectors
"""
def __init__(self):
"""
method initializes df_all_data
"""
self.df_all_data = None
def get_meter_data( self, energy_type_list: list, site_list: list,
verbose=False):
"""
method returns a sorted NxM dataframe with M buildings and N
rows with hourly timestamp as indices
Keyword arguments:
energy_type_list -- List with the requested meter types
site_list -- List with the requested site ids
verbose -- enable debugging printing (default False)
Returns:
df_result -- dataframe (NxM) (N = #timestamps, M = #buildings)
with readings at corresponding times at corresponding buildings
"""
df_site_meter = self._prepare_and_filter_raw_data( energy_type_list,
site_list)
filter_col = ['meter_reading']
df_result = self._build_desired_df( df_site_meter,
filter_col,
addBuildingID= True,
addDiscordID= False)
return df_result
def get_daily_profiles( self, meter_type=0, site_list=[]):
"""
method returns a sorted Nx24 dataframe with M buildings and 24
features (hourly readings) and hourly timestamp as indices
Keyword arguments:
meter_type --Integer with the requested meter types
site_list -- List with the requested site ids
Returns:
df_result -- dataframe (NxM) (N = #buildings x days, 24 = hourly
readings)
"""
df_site_meter = DataImportAshrae().get_meter_data([meter_type], site_list)
building_list = df_site_meter.columns
og_idx = df_site_meter.reset_index()["timestamp"].copy()
df_site_meter = df_site_meter.reset_index(drop=True)
day_list = []
bdg_list = []
idx_list = []
for bid in building_list:
for i in range(0, df_site_meter.shape[0], 24):
day = df_site_meter.loc[i:i+23, bid]
day_list.append(day)
idx_list.append(og_idx[i])
bdg_list.append(bid)
df_all = pd.DataFrame(np.stack(day_list, axis=0), index=idx_list)
df_all['building_id'] = bdg_list
# remove days with nan values
df_all = df_all.dropna(axis=0, how='any')
assert df_all.isnull().values.any() == False
return df_all
def get_labeled_meter_data( self, energy_type_list: list, site_list: list,
verbose=False):
"""
method returns a sorted Nx(2*M) dataframe with M buildings,
M corresponding labels (is_discord?) and N rows with
hourly timestamp as indices
Keyword arguments:
energy_type_list -- List with the requested meter types
site_list -- List with the requested site ids
verbose -- enable debugging printing (default False)
Returns:
df_result -- dataframe (Nx(2*M)) (N = #timestamps, M = #buildings)
with readings at corresponding times at corresponding buildings
and a label that describes if the reading is a discord
"""
df_site_meter = self._prepare_and_filter_raw_data( energy_type_list,
site_list)
filter_col = ['meter_reading', 'is_discord']
df_result = self._build_desired_df( df_site_meter,
filter_col,
addBuildingID= True,
addDiscordID= True)
return df_result
def get_label_data(self, energy_type_list, site_list, verbose=False):
"""
method returns a sorted NxM dataframe with M buildings and
N rows with hourly timestamp as indices
Keyword arguments:
energy_type_list -- List with the requested meter types
site_list -- List with the requested site ids
verbose -- enable debugging printing (default False)
Returns:
df_result -- dataframe (NxM) (N = #timestamps, M = #buildings)
with labels (is_discord?) at corresponding times
at corresponding buildings
"""
df_site_meter = self._prepare_and_filter_raw_data( energy_type_list,
site_list)
filter_col = ['is_discord']
df_result = self._build_desired_df( df_site_meter,
filter_col,
addBuildingID= False,
addDiscordID= True)
return df_result
def get_meta_data(self, verbose= False):
"""
method returns a dataframe with metadata from the BDG2 dataset
Keyword arguments:
verbose -- enable debugging printing (default False)
Returns:
df_result -- metadata from BDG2
"""
if self.df_all_data is None:
self.df_all_data = self._get_raw_data().copy()
df_result = self.df_all_data.copy()
df_result = df_result.filter([ 'site_id',
'building_id',
'primary_use',
'square_feet',
'year_built',
'floor_count',],
axis=1)
df_result = df_result.drop_duplicates(subset=[ 'site_id',
'building_id',])
return df_result
def get_timestamps(self, verbose= False):
"""
Method returns a dataframe with all reading times
Keyword arguments:
verbose -- enable debugging printing (default False)
Returns:
df_result -- dataframe with all reading times
"""
if self.df_all_data is None:
self.df_all_data = self._get_raw_data().copy()
df_result = self.df_all_data.copy()
df_result = df_result.filter(['timestamp'], axis=1)
df_result = df_result.drop_duplicates()
return df_result
def get_timestamps_buildings(self, resolution='H'):
"""
TODO
"""
assert (resolution in ['H', 'D']), ('Make sure that the '
'resolution is either "H" '
'(hourly) or "D" (daily)')
if self.df_all_data is None:
self.df_all_data = self._get_raw_data().copy()
df_result = self.df_all_data.copy()
df_result = df_result.filter(['timestamp',
'building_id',
'meter',
], axis=1)
if resolution == 'D':
df_result['date'] = df_result['timestamp'].dt.date
df_result = df_result.drop(['timestamp'], axis=1)
df_result = df_result.drop_duplicates()
df_result = df_result.rename(columns={'date': 'timestamp'})
df_result['timestamp'] = pd.to_datetime(df_result['timestamp'])
assert (True not in df_result.duplicated().unique()), (
'Something went wrong. At this point, duplicates must'
'no longer appear in the dataframe df_result!')
return df_result
def get_vacation_data(self, site_id:int, verbose=False):
"""
TODO
"""
excel_vacations = pd.ExcelFile(r'data/holidays/Great Energy Predictor III Schedule Data Collection.xlsx')
dict_site_sheet = {
1:{'id':1,
'name':'site1',
'sheet':'University College London',
},
2:{'id':2,
'name':'site2',
'sheet':'Arizona State',
},
4:{'id':4,
'name':'site4',
'sheet':'University of California Berkel',
},
14:{'id':14,
'name':'site14',
'sheet':'Princeton University',
},
15:{'id':15,
'name':'site15',
'sheet':'Cornell',
},
}
list_available_sites = list(dict_site_sheet.keys())
# check if the request is legal
assert site_id in list_available_sites, "Only vacation data for sites 1, 2, 4, 14 and 15 can be exported"
# select necessary data
df_vacations = pd.read_excel(excel_vacations, dict_site_sheet[site_id]['sheet'])
list_columns = list(df_vacations.columns)
list_columns.remove('Date')
# calculate is_normal and is_discord column
df_vacations['Label 1'] = df_vacations['Label 1'].astype('category')
df_vacations['is_normal'] = pd.get_dummies(df_vacations,
columns=['Label 1']
)['Label 1_Regular']
assert (df_vacations['is_normal'].sum()
== df_vacations[df_vacations['Label 1'] == 'Regular'].shape[0])
df_vacations['is_discord'] = df_vacations['is_normal'].replace({0:1, 1:0})
# clean up dataframe
df_vacations = df_vacations.drop(list_columns, axis=1)
df_vacations = df_vacations.drop('is_normal', axis=1)
# filtering the necessary data
df_vacations = df_vacations.set_index('Date')
df_vacations = df_vacations.loc['2016-01-01':'2016-12-31']
return df_vacations
def _get_raw_data(self, verbose= False):
assert self.df_all_data is None, "The data has already been loaded"
# prepare base data
#(ashrae energy predictor + winning solution s data)
str_path_prefix = 'data/ashrae-energy-prediction/'
df_meters = pd.read_csv(str_path_prefix + 'train.csv',
parse_dates= True)
df_weather = pd.read_csv( str_path_prefix + 'weather_train.csv',
parse_dates= True)
df_metadata = pd.read_csv( str_path_prefix + 'building_metadata.csv',
parse_dates= True)
df_discord_labels = pd.read_csv('data/outliers/bad_meter_readings.csv',
dtype=
{'is_bad_meter_reading': np.int64})
df_discord_labels = df_discord_labels.rename(
columns={'is_bad_meter_reading': 'is_discord'})
# merging the files
df_all_data = df_meters.merge( df_metadata,
on= 'building_id', how= 'left')
df_all_data = df_all_data.merge(df_weather,
on= ['site_id', 'timestamp'],
how= 'left')
df_all_data = df_all_data.merge(df_discord_labels, left_index=True,
right_index=True, how='outer')
df_all_data.timestamp = pd.to_datetime(df_all_data.timestamp)
return df_all_data
def _filter_sites(self, df_base, site_list, verbose=False):
df_reduced = df_base[(df_base['site_id'].isin(site_list))]
return df_reduced
def _filter_energy_type(self, df_base, energy_type_list, verbose=False):
df_reduced = df_base[(df_base['meter'].isin(energy_type_list))]
return df_reduced
def _correction_power(self, df_base, verbose=False):
assert not df_base[(df_base['meter'] == 0)
& (df_base['site_id'] == 0)].empty,(
"Nothing needs to be corrected in this section of the data")
# from the rank-1 solution:
# https://github.com/buds-lab/ashrae-great-energy-predictor-3-solution-analysis/blob/master/solutions/rank-1/scripts/02_preprocess_data.py
df_base[(df_base['meter']== 0)
& (df_base['site_id']== 0)].meter_reading.mul(0.2931)
return df_base
def _prepare_and_filter_raw_data( self, energy_type_list: list,
site_list: list):
if self.df_all_data is None:
self.df_all_data = self._get_raw_data()
df_site_meter = self._filter_sites(self.df_all_data.copy(), site_list)
df_site_meter = self._filter_energy_type(df_site_meter, energy_type_list)
if (0 in energy_type_list) and (0 in site_list):
df_site_meter = self._correction_power(df_site_meter)
return df_site_meter
def _build_desired_df( self, df_site_meter, filter_col: list,
addBuildingID=False, addDiscordID=False):
df_timestamps = pd.DataFrame(
{'timestamp': self.df_all_data.timestamp.unique()})
collector_list = [df_timestamps.set_index('timestamp')]
# restructure
for building in df_site_meter.building_id.unique():
# filter all data for one building
curr_building = df_site_meter[(df_site_meter['building_id']
== building)]
# ensure that all dates are taken into account
curr_building = curr_building.merge(df_timestamps, how='outer',
on='timestamp')
# selection of the wanted data and assignment of suitable names
curr_building = curr_building.filter( ['timestamp'] + filter_col,
axis=1)
if addBuildingID:
curr_building = curr_building.rename(
columns={'meter_reading': building})
if addDiscordID:
curr_building['is_discord'] = curr_building['is_discord'].fillna(1)
curr_building['is_discord'] = curr_building['is_discord'].astype('int64')
curr_building = curr_building.rename(
columns={'is_discord':
('is_discord_' + str(building))})
curr_building = curr_building.set_index('timestamp')
# appending all buildings
collector_list.append(curr_building)
df_result = pd.concat(collector_list, axis=1)
df_result = df_result.sort_index()
return df_result
| 14,877 | 39.210811 | 146 | py |
aldiplusplus | aldiplusplus-main/anomaly_detection.py | import warnings
import os
import sys
import logging
import yaml
import wandb
import torch
import pandas as pd
from sklearn.cluster import SpectralClustering, KMeans
from sklearn.metrics import silhouette_score
from datetime import timedelta
from collections import Counter
from matplotlib import pyplot as plt
from utils import load_variable, save_variable, get_data_chunks
from vae import VAE
from data_import_ashrae import DataImportAshrae
def warn(*args, **kwargs):
pass
warnings.warn = warn
# load config file from CLI
with open(str(sys.argv[1]), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# extract parameters from config file
name = config["name"]
seed = config["seed"]
meter_type = config["meter_type"]
data_folds = config["data_folds"]
num_input = config["num_input"]
latent_dim = config["latent_dim"]
batch_size = config["batch_size"]
hidden_size = config["hidden_size"]
learning_rate = config["learning_rate"]
epochs = config["epochs"]
# global variables
k_range = range(2, 11)
# starting wandb
wandb.init(project=name, entity="matiasqr")
config = wandb.config
config.data_folds = data_folds
config.latent_dim = latent_dim
config.batch_size = batch_size
config.hidden_size = hidden_size
config.learning_rate = learning_rate
config.epochs = epochs
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load data
print("Loading data ...")
df_metadata = DataImportAshrae().get_meta_data()
site_list = df_metadata["site_id"].unique()
site_list = [1] # DEBUG
for site in site_list:
print(f"Site {site} ...")
df_all = DataImportAshrae().get_daily_profiles(meter_type, [site])
# prepare site data
train_folds, test_folds, scaler = get_data_chunks(df_all, folds=data_folds)
df_exportable = {}
for fold in range(0, data_folds):
train_loader = torch.utils.data.DataLoader(
train_folds[fold].to_numpy(),
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed,
drop_last=True,
)
test_loader = torch.utils.data.DataLoader(
test_folds[fold].to_numpy(),
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed,
drop_last=True,
)
# model
model = VAE(num_input, latent_dim, hidden_size).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
wandb.watch(model)
# training
print("Training model ...")
codes = dict(mu=list(), log_sigma2=list())
for epoch in range(0, epochs + 1):
# Training
if epoch > 0:
model.train()
train_loss = 0
for _, x in enumerate(train_loader):
x = x[:,0:-1].to(device)
# forward
x_hat, mu, logvar = model(x.float())
loss = model.loss_function(x_hat.float(), x.float(), mu, logvar)
train_loss += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log
wandb.log({f"train_loss_site{site}_fold{fold}": train_loss / len(train_loader.dataset)})
# Testing
means, logvars, labels = list(), list(), list()
with torch.no_grad():
model.eval()
test_loss = 0
for _, x in enumerate(test_loader):
x = x[:,0:-1].to(device)
# forward
x_hat, mu, logvar = model(x.float())
test_loss += model.loss_function(
x_hat.float(), x.float(), mu, logvar
).item()
# log
means.append(mu.detach())
logvars.append(logvar.detach())
# log
codes["mu"].append(torch.cat(means))
codes["log_sigma2"].append(torch.cat(logvars))
test_loss /= len(test_loader.dataset)
wandb.log({f"test_loss_site{site}_fold{fold}": test_loss})
# end of training loop
# latent space clustering with different k
print("Latent space clustering ...")
mu, _ = model.encode(torch.from_numpy(test_folds[fold].iloc[:,0:-1].to_numpy()).float())
ssi_list = []
for k in k_range:
clust_algo = KMeans(n_clusters=k, random_state=seed).fit(mu.detach())
labels = clust_algo.predict(mu.detach())
ssi = silhouette_score(mu.detach(), labels)
ssi_list.append(ssi)
wandb.log({f"ssi_site{site}_fold{fold}": ssi, 'k': k})
# latent space clustering with unique k
k = 2 # NOTE: replace accordingly
clust_algo = KMeans(n_clusters=k, random_state=seed).fit(mu.detach())
labels = clust_algo.predict(mu.detach())
# find the cluster with the least number of members
print("Finding anomalies ...")
dict_label_members = Counter(labels)
min_cluster = min(dict_label_members, key=dict_label_members.get)
# get indices of days that are members of this cluster
test_data_label = test_folds[fold].copy()
test_data_label['label'] = labels # append cluster label to data
test_data_label = test_data_label[test_data_label['label'] == min_cluster]
df_pred_labels = test_data_label.copy().reset_index(drop=False).rename(columns={"index":"timestamp"})[["timestamp","building_id"]]
df_pred_labels["is_discord"] = 1
df_pred_labels["meter"] = meter_type
# use proper format following original train data
df_left_keys = DataImportAshrae().get_train_data()
df_left_keys["timestamp"] = df_left_keys["timestamp"].astype("datetime64[ns]")
df_exportable[fold] = pd.merge(df_left_keys, df_pred_labels, how="left", on=["building_id", "meter", "timestamp"])
df_exportable[fold]["is_discord"] = df_exportable[fold]["is_discord"].fillna(0) # NaNs are padded with 0
df_exportable[fold]["is_discord"] = df_exportable[fold]["is_discord"].astype("int8")
print(f"Transforming {(df_exportable[fold][df_exportable[fold]['is_discord'] == 1]).shape[0]} daily discords to hourly ...")
# fill out remaining hours of a discord day as discords
for idx, row in df_exportable[fold][df_exportable[fold]["is_discord"] == 1].iterrows():
for h in range(1, 24):
new_time = row["timestamp"] + timedelta(hours=h)
base_idx = df_exportable[fold].index[(df_exportable[fold]["timestamp"] == new_time) & (df_exportable[fold]["meter"] == row["meter"]) & (df_exportable[fold]["building_id"] == row["building_id"])]
df_exportable[fold].loc[base_idx, "is_discord"] = 1
# end of data_folds loop
print("Merging all folds discords ...")
df_final_exportable = df_exportable[0].copy()
for fold in range(0, data_folds):
df_final_exportable["is_discord"] = df_final_exportable["is_discord"] | df_exportable[fold]["is_discord"]
# export here now the 'is_discord'
df_final_exportable["is_discord"].to_csv(f'data/pred_discord/discords_{name}.csv', index=False)
print(df_final_exportable[df_final_exportable["is_discord"] == 1])
# TODO: only export site for each buildin_site
# end of site loop | 7,508 | 37.116751 | 210 | py |
aldiplusplus | aldiplusplus-main/preprocess_modeling.py | import gc
import sys
import logging
import yaml
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from utils import timer, load_data, reduce_mem_usage
from encoders import GaussianTargetEncoder
# define groupings and corresponding priors
groups_and_priors = {
# single encodings
("hour",): None,
("weekday",): None,
("month",): None,
("building_id",): None,
("primary_use",): None,
("site_id",): None,
# ("meter",): None,
# # second-order interactions
# ("meter", "hour"): ["gte_meter", "gte_hour"],
# ("meter", "weekday"): ["gte_meter", "gte_weekday"],
# ("meter", "month"): ["gte_meter", "gte_month"],
# ("meter", "building_id"): ["gte_meter", "gte_building_id"],
# ("meter", "primary_use"): ["gte_meter", "gte_primary_use"],
# ("meter", "site_id"): ["gte_meter", "gte_site_id"],
# # higher-order interactions with building_id
# ("meter", "building_id", "hour"): ["gte_meter_building_id", "gte_meter_hour"],
# ("meter", "building_id", "weekday"): ["gte_meter_building_id", "gte_meter_weekday"],
# ("meter", "building_id", "month"): ["gte_meter_building_id", "gte_meter_month"],
}
def process_timestamp(df):
df.timestamp = pd.to_datetime(df.timestamp)
df.timestamp = (
df.timestamp - pd.to_datetime("2016-01-01")
).dt.total_seconds() // 3600
def process_weather(
df, dataset, fix_timestamps=True, interpolate_na=True, add_na_indicators=True
):
if fix_timestamps:
site_GMT_offsets = [-5, 0, -7, -5, -8, 0, -5, -5, -5, -6, -7, -5, 0, -6, -5, -5]
GMT_offset_map = {site: offset for site, offset in enumerate(site_GMT_offsets)}
df.timestamp = df.timestamp + df.site_id.map(GMT_offset_map)
if interpolate_na:
site_dfs = []
for site_id in df.site_id.unique():
# Make sure that we include all possible hours so that we can interpolate evenly
if dataset == "train":
site_df = (
df[df.site_id == site_id]
.set_index("timestamp")
.reindex(range(8784))
)
elif dataset == "test":
site_df = (
df[df.site_id == site_id]
.set_index("timestamp")
.reindex(range(8784, 26304))
)
else:
raise ValueError(f"dataset={dataset} not recognized")
site_df.site_id = site_id
for col in [c for c in site_df.columns if c != "site_id"]:
if add_na_indicators:
site_df[f"had_{col}"] = ~site_df[col].isna()
site_df[col] = site_df[col].interpolate(
limit_direction="both",
method="spline",
order=3,
)
# Some sites are completely missing some columns, so use this fallback
site_df[col] = site_df[col].fillna(df[col].median())
site_dfs.append(site_df)
df = pd.concat(
site_dfs
).reset_index() # make timestamp back into a regular column
if add_na_indicators:
for col in df.columns:
if df[col].isna().any():
df[f"had_{col}"] = ~df[col].isna()
return df.fillna(-1) # .set_index(["site_id", "timestamp"])
def add_lag_feature(df, window=3, group_cols="site_id", lag_cols=["air_temperature"]):
rolled = df.groupby(group_cols)[lag_cols].rolling(
window=window, min_periods=0, center=True
)
lag_mean = rolled.mean().reset_index().astype(np.float16)
lag_max = rolled.quantile(0.95).reset_index().astype(np.float16)
lag_min = rolled.quantile(0.05).reset_index().astype(np.float16)
lag_std = rolled.std().reset_index().astype(np.float16)
for col in lag_cols:
df[f"{col}_mean_lag{window}"] = lag_mean[col]
df[f"{col}_max_lag{window}"] = lag_max[col]
df[f"{col}_min_lag{window}"] = lag_min[col]
df[f"{col}_std_lag{window}"] = lag_std[col]
def add_features(df):
# time features
df["hour"] = df.ts.dt.hour
df["weekday"] = df.ts.dt.weekday
df["month"] = df.ts.dt.month
df["year"] = df.ts.dt.year
# time interactions
df["weekday_hour"] = df.weekday.astype(str) + "-" + df.hour.astype(str)
# apply cyclic encoding of periodic features
df["hour_x"] = np.cos(2 * np.pi * df.timestamp / 24)
df["hour_y"] = np.sin(2 * np.pi * df.timestamp / 24)
df["month_x"] = np.cos(2 * np.pi * df.timestamp / (30.4 * 24))
df["month_y"] = np.sin(2 * np.pi * df.timestamp / (30.4 * 24))
df["weekday_x"] = np.cos(2 * np.pi * df.timestamp / (7 * 24))
df["weekday_y"] = np.sin(2 * np.pi * df.timestamp / (7 * 24))
# meta data features
df["year_built"] = df["year_built"] - 1900
# bulding_id interactions
# bm_ = df.building_id.astype(str) + "-" + df.meter.astype(str) + "-"
bm_ = df.building_id.astype(str) + "-"
df["building_weekday_hour"] = bm_ + df.weekday_hour
df["building_weekday"] = bm_ + df.weekday.astype(str)
df["building_month"] = bm_ + df.month.astype(str)
df["building_hour"] = bm_ + df.hour.astype(str)
# df["building_meter"] = bm_
# get holidays
dates_range = pd.date_range(start="2015-12-31", end="2019-01-01")
us_holidays = calendar().holidays(start=dates_range.min(), end=dates_range.max())
df["is_holiday"] = (df.ts.dt.date.astype("datetime64").isin(us_holidays)).astype(
np.int8
)
if __name__ == "__main__":
# load config file from CLI
with open(str(sys.argv[1]), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
algorithm = config["algorithm"]
discord_file = config["discord_file"]
data_location = config["data_location"]
discord_location = config["discord_location"]
output_location = config["output_location"]
# logging file
logging.basicConfig(
filename=algorithm + ".log",
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s",
)
logging.info(f"Experiment: {algorithm}")
with timer("Loading data"):
logging.info("Loading data")
train, test = load_data("input", data_location=data_location)
building_meta = load_data("meta", data_location=data_location)
train_weather, test_weather = load_data("weather", data_location=data_location)
with timer("Process timestamp"):
logging.info("Process timestamp")
train["ts"] = pd.to_datetime(train.timestamp)
test["ts"] = pd.to_datetime(test.timestamp)
process_timestamp(train)
process_timestamp(test)
process_timestamp(train_weather)
process_timestamp(test_weather)
with timer("Process weather"):
logging.info("Process weather")
process_weather(train_weather, "train")
process_weather(test_weather, "test")
for window_size in [7, 73]:
add_lag_feature(train_weather, window=window_size)
add_lag_feature(test_weather, window=window_size)
with timer("Combine data"):
logging.info("Combine data")
train = pd.merge(train, building_meta, "left", "building_id")
train = pd.merge(train, train_weather, "left", ["site_id", "timestamp"])
test = pd.merge(test, building_meta, "left", "building_id")
test = pd.merge(test, test_weather, "left", ["site_id", "timestamp"])
with timer("Flag bad meter readings"):
logging.info("Flag bad meter readings")
# loading discords/outliers
is_bad_meter_reading = load_data(
"discord", discord_location=discord_location, discord_file=discord_file
).values
train["is_bad_meter_reading"] = is_bad_meter_reading
with timer("Correct site 0 meter reading"):
logging.info("Correct site 0 meter reading")
train.loc[(train.site_id == 0) & (train.meter == 0), "meter_reading"] *= 0.2931
with timer("Add base features to train"):
logging.info("Add base features to train")
add_features(train)
with timer("Add base features to test"):
logging.info("Add base features to test")
add_features(test)
with timer("Free up memory"):
logging.info("Free up memory")
del train_weather, test_weather, building_meta
gc.collect()
with timer("Reduce memory usage"):
logging.info("Reduce memory usage")
train, _ = reduce_mem_usage(train, skip_cols=["ts", "timestamp"], verbose=False)
test, _ = reduce_mem_usage(test, skip_cols=["ts", "timestamp"], verbose=False)
with timer("Add target encoding features - train"):
logging.info("Add target encoding features - train")
train["target"] = np.log1p(train.meter_reading)
test["target"] = np.mean(train["target"])
features = []
good_train = train[train.is_bad_meter_reading.values == 0].copy()
good_train_ = good_train.copy()
for group_cols, prior_cols in groups_and_priors.items():
print(group_cols)
features.append(f"gte_{'_'.join(group_cols)}")
gte = GaussianTargetEncoder(list(group_cols), "target", prior_cols)
good_train[features[-1]] = gte.fit_transform(good_train)
train[features[-1]] = gte.transform(train)
with timer("Save as pickle - train"):
logging.info("Save as pickle - train")
train.drop(["ts", "target"], 1, inplace=True)
train, _ = reduce_mem_usage(train, skip_cols=["ts", "timestamp"], verbose=False)
train.to_pickle(f"{output_location}/train_clean_{algorithm}.pkl")
with timer("Free up memory"):
logging.info("Free up memory")
del train, good_train, gte
gc.collect()
with timer("Add target encoding features - test"):
logging.info("Add target encoding features - test")
features = []
good_train = good_train_
for group_cols, prior_cols in groups_and_priors.items():
print(group_cols)
features.append(f"gte_{'_'.join(group_cols)}")
gte = GaussianTargetEncoder(list(group_cols), "target", prior_cols)
good_train[features[-1]] = gte.fit_transform(good_train)
test[features[-1]] = gte.transform(test)
with timer("Free up memory"):
logging.info("Free up memory")
del good_train, good_train_, gte
gc.collect()
with timer("Save as pickle - test"):
logging.info("Save as pickle - test")
test.drop(["ts", "target"], 1, inplace=True)
test, _ = reduce_mem_usage(test, skip_cols=["ts", "timestamp"], verbose=False)
gc.collect()
test.to_pickle(f"{output_location}/test_clean_{algorithm}.pkl")
| 10,908 | 38.241007 | 94 | py |
aldiplusplus | aldiplusplus-main/encoders.py | import numpy as np
class FastLabelEncoder():
"""Map categorical variable into {0, 1, ..., n_categories}.
Note: https://stackoverflow.com/questions/45321999/how-can-i-optimize-label-encoding-for-large-data-sets-sci-kit-learn?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
"""
def __init__(self):
self.lookup = None
def fit(self, x):
labels = np.unique(x, return_inverse=True)[1]
self.lookup = dict(zip(x.flatten(),labels))
def transform(self, x):
return np.vectorize(self.lookup.get)(x)
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
class GaussianTargetEncoder():
def __init__(self, group_cols, target_col="target", prior_cols=None):
self.group_cols = group_cols
self.target_col = target_col
self.prior_cols = prior_cols
def _get_prior(self, df):
if self.prior_cols is None:
prior = np.full(len(df), df[self.target_col].mean())
else:
prior = df[self.prior_cols].mean(1)
return prior
def fit(self, df):
self.stats = df.assign(mu_prior=self._get_prior(df), y=df[self.target_col])
self.stats = self.stats.groupby(self.group_cols).agg(
n = ("y", "count"),
mu_mle = ("y", np.mean),
sig2_mle = ("y", np.var),
mu_prior = ("mu_prior", np.mean),
)
def transform(self, df, prior_precision=1e-6, stat_type="mean"):
precision = prior_precision + self.stats.n/self.stats.sig2_mle
if stat_type == "mean":
numer = prior_precision*self.stats.mu_prior\
+ self.stats.n/self.stats.sig2_mle*self.stats.mu_mle
denom = precision
elif stat_type == "var":
numer = 1.0
denom = precision
elif stat_type == "precision":
numer = precision
denom = 1.0
else:
raise ValueError(f"stat_type={stat_type} not recognized.")
mapper = dict(zip(self.stats.index, numer / denom))
if isinstance(self.group_cols, str):
keys = df[self.group_cols].values.tolist()
elif len(self.group_cols) == 1:
keys = df[self.group_cols[0]].values.tolist()
else:
keys = zip(*[df[x] for x in self.group_cols])
values = np.array([mapper.get(k) for k in keys]).astype(float)
prior = self._get_prior(df)
values[~np.isfinite(values)] = prior[~np.isfinite(values)]
return values
def fit_transform(self, df, *args, **kwargs):
self.fit(df)
return self.transform(df, *args, **kwargs)
| 2,816 | 33.353659 | 199 | py |
aldiplusplus | aldiplusplus-main/GMM_training.py | import pandas as pd
import numpy as np
from sklearn.mixture import GaussianMixture
class GMMTraining():
def __init__(self, values):
self.values = np.array([[val] for val in values])
self.x_values = np.linspace(0, 1, 1000)
'''
p_values = np.array([ [val] for val in df_pD_values.p.values])
D_values = np.array([ [val] for val in df_pD_values.D.values])
N = np.arange(1, 11)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GaussianMixture(N[i]).fit(p_values)
AIC = [m.aic(p_values) for m in models]
BIC = [m.bic(p_values) for m in models]
best_GMM = models[np.argmin(AIC)]
logprob = best_GMM.score_samples(x_values.reshape(-1, 1))
responsibilities = best_GMM.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
'''
N = np.arange(1, 11)
self.models = [None for i in range(len(N))]
for i in range(len(N)):
self.models[i] = GaussianMixture(N[i]).fit(self.values)
self.AIC = [m.aic(self.values) for m in self.models]
self.BIC = [m.bic(self.values) for m in self.models]
self.best_GMM = self.models[np.argmin(self.AIC)]
self.logprob = self.best_GMM.score_samples(self.x_values.reshape(-1, 1))
self.responsibilities = self.best_GMM.predict_proba(self.x_values.reshape(-1, 1))
self.pdf = np.exp(self.logprob)
self.pdf_individual = self.responsibilities * self.pdf[:, np.newaxis]
def get_pdf(self):
return self.pdf
def get_individual_pdf(self):
return self.pdf_individual
def get_x_values(self):
return self.x_values
| 1,803 | 33.037736 | 89 | py |
aldiplusplus | aldiplusplus-main/aldi.py | from scipy import stats
import stumpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import calmap # not working with latest pandas
import calplot
import joypy
import sys
import time
import datetime as dt
class ALDI():
def __init__(self, df_meters, df_metadata, m=24, col_id='building_id', site_id='', meter_id='', verbose=False):
"""
Args:
df_meters: sorted NxM dataframe with M buildings and N rows with hourly
timestamp as indices
df_metadata: dataframe with metadata regarding the buildings
m: hourly window size, one day = 24
col_id: string name of the column with building ids in df_meters and df_metadata
site_id: id of the current portfolio being analyzed
meter_id: id of the current sensor reading being analyzed
verbose: boolean value to enable debugging printing
"""
self.df_meters = df_meters.copy()
self.df_metadata = df_metadata.copy()
self.base_timestamps = df_meters.copy().index
self.m = m
self.col_id = col_id
self.site_id = site_id
self.meter_id = meter_id
self.verbose = verbose
# auxiliary variables needed
self.name_list = df_meters.columns
##### execute ALDI
self.mp_adj, self.mp_ind = self.get_mp() # get matrix profile and indices
# merge information to one single dataframe
self.df_result, self.num_days, self.num_buildings = self.data_reconstruction()
self.df_result_meta = self.add_metadata()
# calculate k-test
self.df_ks_test = self.k_test()
self.df_ks_test_det = None # placeholder
def zero_coun(self): # TODO: implement
pass
def get_mp(self):
"""
Calculates matrix profile and matrix profile indices for a time-stamp
sorted dataframe where the columns are buildings from the same site
and rows are meter readings.
Returns:
mp_adj: dataframe with the matrix profile values
mp_ind: dataframe with the matrix profile indices
"""
mp_adj = pd.DataFrame(columns=self.name_list)
mp_ind = pd.DataFrame(columns=self.name_list)
for col in self.name_list:
bldg = self.df_meters[col]
mp = stumpy.stump(bldg, m=self.m)
# append np.nan to matrix profile to allow plotting against raw data
madj = np.append(mp[:,0], np.zeros(self.m-1) + np.nan)
mind = np.append(mp[:,1], np.zeros(self.m-1) + np.nan)
# save mp information
mp_adj[col] = madj
mp_ind[col] = mind
return mp_adj, mp_ind
def midnight_mp(self):
"""
Picks daily matrix profile at midnight
"""
# use only available timestamps
df_e = self.df_meters.copy()
df_mp = self.mp_adj.set_index(df_e.index)
df_mpind = self.mp_ind.set_index(df_e.index)
df_e_0 = df_e[df_e.index.hour==0]
df_mp_0 = df_mp[df_mp.index.hour==0]
df_mpind_0 = df_mpind[df_mpind.index.hour==0]
if self.verbose:
print(f'Midnight MP values:\n{df_e_0}')
return df_e_0, df_mp_0, df_mpind_0
def data_reconstruction(self):
"""
Puts together calculated values into one single dataframe
"""
df_result = pd.DataFrame(columns=['raw','mp','mp_ind'])
df_e_0, df_mp_0, df_mpind_0 = self.midnight_mp()
num_days = df_e_0.shape[0]
num_buildings = df_e_0.shape[1]
print(f'num of days: {num_days}') # debug
# combining the matrix profile and indices values
df_result['raw'] = df_e_0.values.reshape(num_days * num_buildings)
df_result['mp'] = df_mp_0.values.reshape(num_days * num_buildings)
df_result['mp_ind'] = df_mpind_0.values.reshape(num_days * num_buildings)
if self.verbose:
print(f'Combining raw and calculated values:\n{df_result}')
df_names=[]
df_dates=[]
days=[]
self.year = df_e_0.index[0].year
self.month = df_e_0.index[0].month
self.day = df_e_0.index[0].day
# combining the building names and dates
for i in range(num_days):
df_names = np.append(df_names, np.array(self.name_list))
days = np.append(days, np.ones(len(self.name_list))*i)
for i in range(len(days)):
df_dates = df_dates + \
[dt.datetime(year=self.year,month=self.month,day=self.day) + \
dt.timedelta(days=days[i])]
df_result[self.col_id] = df_names
df_result['date'] = df_dates
if self.verbose:
print(f'Updating the combined values with building names and full dates:\n{df_result}')
# combining the breakdown of the dates
df_month=[]
df_daytype=[]
df_day=[]
for i in range(len(df_result)):
df_month = np.append(df_month, df_result.date[i].strftime('%b'))
df_daytype = np.append(df_daytype, df_result.date[i].strftime('%a'))
df_day = np.append(df_day, df_result.date[i].strftime('%d'))
df_result['month'] = df_month
df_result['daytype'] = df_daytype
df_result['day'] = df_day
if self.verbose:
print(f'Updating the combined values with broken down dates:\n{df_result}')
return df_result, num_days, num_buildings
def add_metadata(self):
"""
Combines the processed dataframe with matrix profile calculation
alongside the metadata file
"""
df_result_meta = self.df_result.merge(self.df_metadata, on=self.col_id)
if self.verbose:
print(f'Merging available metadata:\n{df_result_meta.head()}')
return df_result_meta
def daytype_dist(self):
"""Computes daytype distributions"""
daytype_dist = {}
daytype_dist['mon'] = self.df_result.mp[self.df_result.daytype == 'Mon']
daytype_dist['tue'] = self.df_result.mp[self.df_result.daytype == 'Tue']
daytype_dist['wed'] = self.df_result.mp[self.df_result.daytype == 'Wed']
daytype_dist['thu'] = self.df_result.mp[self.df_result.daytype == 'Thu']
daytype_dist['fri'] = self.df_result.mp[self.df_result.daytype == 'Fri']
daytype_dist['sat'] = self.df_result.mp[self.df_result.daytype == 'Sat']
daytype_dist['sun'] = self.df_result.mp[self.df_result.daytype == 'Sun']
return daytype_dist
def k_test(self):
"""Computes k-s test for each daily distribution"""
daytype_dist = self.daytype_dist() # compute daily distributions
ks_test = pd.DataFrame(columns=['D','p'],
index=pd.date_range(pd.datetime(year=self.year, month=self.month, day=self.day),
periods=self.num_days))
for i in pd.date_range(pd.datetime(year=self.year, month=self.month, day=self.day), periods=self.num_days):
events = self.df_result.mp[self.df_result.date == i]
if i.weekday() == 0:
test = stats.ks_2samp(events, daytype_dist['mon'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 1:
test = stats.ks_2samp(events, daytype_dist['tue'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 2:
test = stats.ks_2samp(events, daytype_dist['wed'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 3:
test = stats.ks_2samp(events, daytype_dist['thu'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 4:
test = stats.ks_2samp(events, daytype_dist['fri'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 5:
test = stats.ks_2samp(events, daytype_dist['sat'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if i.weekday() == 6:
test = stats.ks_2samp(events, daytype_dist['sun'])
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if self.verbose:
print(f'K-S test dataframe:\n{ks_test}')
return ks_test
def get_rejected_days(self):
"""
Calculates the rejected days at commonly used p-values
Returns:
p_nr: dataframe with the total number of rejected days at
the given p-value(s)
"""
ks_test = self.df_ks_test.copy()
p_nr = pd.DataFrame(columns=['p','nr'])
# by default compute commonly used p-values
p_nr.p = [0.01, 0.05, 0.1, 0.15, 0.2]
p_nr.nr = np.zeros(len(p_nr.p))
for i in range(len(p_nr)):
ks_test['det_aux'] = np.where(ks_test['p'] < p_nr.p[i], 1, 0)
temp = ks_test
temp = pd.Series(ks_test.det_aux)
p_nr.nr[i] = np.sum(temp)
return p_nr
def get_discords(self, pvalue=0.01):
"""Calculates the discords at a given p-value"""
# filter based on pvalue
ks_test = self.df_ks_test.copy()
ks_test['det'] = np.where(ks_test['p'] < pvalue, 1, 0)
discord = ks_test[ks_test['det'] == 1]
# plot
sns.set(context='notebook', style='whitegrid', palette='deep', font='sans-serif', font_scale=1.8)
plt.figure(figsize=[3, 5])
sns.boxplot(data=discord['D'], orient='vertical')
plt.ylim(0,1)
plt.xlabel(f'Site {self.col_id}')
plt.ylabel('D')
plt.savefig(f'img/discords_{pvalue}-{self.site_id}-{self.meter_id}.png', bbox_inches='tight', format='PNG')
plt.close()
# sort the dataframe and calculate quantiles
discord_sort = discord.sort_values(by='D')
discord_q = self.get_discords_quantiles(discord_sort)
self.df_ks_test_det = ks_test
return discord_sort, discord_q
def get_result_df(self, p_value=0.01):
"""Calculates the discords at a given p-value"""
# prepare index and column for resulting dataframes
hourly_timestamps = self.base_timestamps.copy()
all_bdg = self.name_list.copy()
columns = [f'is_discord_{x}' for x in all_bdg]
# filter based on p_value
df_daily_is_discord = self.df_ks_test.copy()
df_daily_is_discord['is_discord'] = np.where(
df_daily_is_discord['p'] < p_value, 1, 0)
# hand waving specialization (caution) of discords for all bdgs
for col in columns:
df_daily_is_discord[col] = df_daily_is_discord['is_discord']
df_daily_is_discord = df_daily_is_discord.drop(['p', 'D', 'is_discord'], axis=1)
df_hourly_is_discord = pd.DataFrame(index = hourly_timestamps)
# copy daily dataframe to hourly dataframe
df_hourly_is_discord['day'] = df_hourly_is_discord.index.date
df_daily_is_discord.index = df_daily_is_discord.index.date
df_hourly_is_discord = df_hourly_is_discord.join(df_daily_is_discord,
on='day', how='left')
df_hourly_is_discord = df_hourly_is_discord.drop(['day'], axis=1)
df_hourly_is_discord = df_hourly_is_discord.astype('int8')
return df_hourly_is_discord
def get_discords_quantiles(self, discord_sorted):
"""Calculates the IQR discords"""
df_e = self.df_meters.copy()
df_e_z = pd.DataFrame(stats.zscore(df_e, axis=0, nan_policy='omit'),index=df_e.index)
for i in discord_sorted.index[-3:]: # why 3?
discord_temp = df_e_z[i:i + dt.timedelta(hours=self.m-1)] # 23 for daily
# print(i, self.df_ks_test.D[i], self.df_ks_test.p[i])
discord_q = pd.DataFrame(columns=['q1','q2','q3'],index=discord_temp.index)
for j in range(len(discord_temp)):
# replaced np.percentile with nanpercentile
discord_q['q1'][j] = np.nanpercentile(discord_temp.iloc[j,:], 25)
discord_q['q2'][j] = np.nanpercentile(discord_temp.iloc[j,:], 50)
discord_q['q3'][j] = np.nanpercentile(discord_temp.iloc[j,:], 75)
sns.set(style='white', font_scale=1.5)
plt.figure(figsize=(5,2))
plt.plot(discord_q.q1, '--', color='tomato')
plt.plot(discord_q.q2, color='red')
plt.plot(discord_q.q3, '--', color='tomato')
plt.yticks([-10,0,10,20,30])
plt.xticks([])
plt.ylim(-18,35)
plt.savefig(f'img/discord_quantiles-{self.site_id}-{self.meter_id}.png', bbox_inches='tight', format="PNG")
plt.close()
return discord_q
def plot_mp_dist(self, variable):
"""
Plots the matrix profile values according to the selected variable
"""
sns.set(context='notebook',
style='white',
palette='deep',
font='sans-serif',
font_scale=1.5,
color_codes=True,
rc=None)
if variable == 'day-month':
months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
available_months = list(self.df_result.month.unique())
fig, axes = plt.subplots(len(months), 1, figsize=[20,5*len(months)])
for i, idx in zip(months, range(0, len(months))):
if i not in available_months:
print(f'Month {i} not available on this site')
continue
events = self.df_result[self.df_result.month == i]
sns.boxplot(x='day', y='mp', data=events, color='lightgray', ax=axes[idx])
axes[idx].set_title(i)
# plt.ylim(-0.5,5.5)
axes[idx].set_xlim(-1,31)
axes[idx].set_xlabel('Days of month')
axes[idx].set_ylabel('Matrix profile')
fig.tight_layout()
elif variable == 'daily':
plt.figure(figsize=[5,5])
sns.boxplot(data=self.df_result_meta.mp, color='lightgray', orient='vertical')
plt.xlabel(variable)
plt.ylabel('Matrix profile')
else:
plt.figure(figsize=[10,5])
if variable == 'daytype':
sns.boxplot(x=variable, y='mp', data=self.df_result_meta, color='lightgray',
order=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])
else:
sns.boxplot(x=variable, y='mp', data=self.df_result_meta, color='lightgray')
plt.xlabel(variable)
plt.ylabel('Matrix profile')
# plt.ylim(-0.5,10)
plt.savefig(f'img/mp_dist_{variable}-{self.site_id}-{self.meter_id}.png', bbox_inches='tight', format='PNG')
plt.close()
def all_plots_mp(self):
"""Plots all mp distribution variants"""
# mp distribiution
self.plot_mp_dist('month')
self.plot_mp_dist('daytype')
self.plot_mp_dist('day-month')
self.plot_mp_dist('primary_use')
self.plot_mp_dist('daily')
def plot_ks_test_result(self, value='d'):
"""Visualize k-s test"""
events = self.df_ks_test.copy()
if value == 'd':
events = pd.Series(self.df_ks_test.D)
cmap = "YlGnBu_r"
elif value == 'p':
events = pd.Series(self.df_ks_test.p)
cmap = "Greys_r"
else:
events = pd.Series(self.df_ks_test_det.det)
cmap = "Greys"
fig, ax = calplot.calplot(events,
cmap=cmap,
figsize=[20, 4],
daylabels='MTWTFSS',
linewidth=1,
linecolor='grey',
fillcolor='grey')
plt.savefig(f'img/ks_test_{value}-{self.site_id}-{self.meter_id}.png', bbox_inches='tight', format='PNG')
plt.close()
def all_plots_ks(self):
"""Plots all ks-test visualisations"""
self.plot_ks_test_result('d')
self.plot_ks_test_result('p')
self.plot_ks_test_result('det')
def get_motifs(self, n):
"""Plots top n motifs"""
ks_test = self.df_ks_test.copy()
median_pvalue = ks_test['p'].median()
motifs = ks_test[ks_test['p'] <= median_pvalue]
motifs_sorted = motifs.sort_values(by='D', ascending=False)
# plot distribution
sns.set(context='notebook', style='whitegrid', palette='deep', font='sans-serif', font_scale=1.8)
plt.figure(figsize=[3, 5])
sns.boxplot(data=motifs_sorted['D'], orient='vertical')
plt.ylim(0,1)
plt.xlabel(f'Site {self.col_id}')
plt.ylabel('D')
plt.savefig(f'img/motifs_median-{self.site_id}-{self.meter_id}.png', bbox_inches='tight', format='PNG')
plt.close()
# plot motifs
df_e = self.df_meters.copy()
df_e_z = pd.DataFrame(stats.zscore(df_e, axis=0),index=df_e.index)
for i in motifs_sorted.index[:n]:
motif_temp = df_e_z[i:i+dt.timedelta(hours=self.m-1)]
print(i,ks_test.D[i],ks_test.p[i])
motif_q = pd.DataFrame(columns=['q1','q2','q3'], index=motif_temp.index)
for j in range(len(motif_temp)):
# replaced np.percentile with nanpercentile
motif_q['q1'][j] = np.nanpercentile(motif_temp.iloc[j,:], 25)
motif_q['q2'][j] = np.nanpercentile(motif_temp.iloc[j,:], 50)
motif_q['q3'][j] = np.nanpercentile(motif_temp.iloc[j,:], 75)
sns.set(style='white', font_scale=1.5)
plt.figure(figsize=(5,2))
plt.plot(motif_q.q1, '--', color='grey')
plt.plot(motif_q.q2, color='k')
plt.plot(motif_q.q3, '--', color='grey')
plt.xticks([])
plt.xlim(i,i + dt.timedelta(hours=23))
#plt.savefig("Graph" + str(i) +".png", bbox_inches='tight', format="PNG")
plt.show()
# plot raw data at motif dates
for i in motifs_sorted.index[:n]:
sns.set(style='white', font_scale=1.5)
# print(i,ks_test.D[i],ks_test.p[i])
plt.figure(figsize=(5,2))
plt.plot(df_e_z[i:i+dt.timedelta(hours=self.m-1)])
#plt.yticks([])
plt.xticks([])
#plt.xlim(i,i + dt.timedelta(hours=23))
#plt.savefig("Graph" + str(i) +".png", bbox_inches='tight', format="PNG")
plt.show() | 19,418 | 37.993976 | 119 | py |
pytorch_RVAE | pytorch_RVAE-master/sample.py | import argparse
import os
import numpy as np
import torch as t
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae import RVAE
if __name__ == '__main__':
assert os.path.exists('trained_RVAE'), \
'trained model not found'
parser = argparse.ArgumentParser(description='Sampler')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--num-sample', type=int, default=10, metavar='NS',
help='num samplings (default: 10)')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE(parameters)
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
for iteration in range(args.num_sample):
seed = np.random.normal(size=[1, parameters.latent_variable_size])
result = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print(result)
print() | 1,265 | 31.461538 | 78 | py |
pytorch_RVAE | pytorch_RVAE-master/__init__.py | from . import nn_layers
from . import utility
| 46 | 14.666667 | 23 | py |
pytorch_RVAE | pytorch_RVAE-master/train_word_embeddings.py | import argparse
import numpy as np
import torch as t
from torch.autograd import Variable
from torch.optim import SGD
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from selfModules.neg import NEG_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='word2vec')
parser.add_argument('--num-iterations', type=int, default=1000000, metavar='NI',
help='num iterations (default: 1000000)')
parser.add_argument('--batch-size', type=int, default=10, metavar='BS',
help='batch size (default: 10)')
parser.add_argument('--num-sample', type=int, default=5, metavar='NS',
help='num sample (default: 5)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
args = parser.parse_args()
batch_loader = BatchLoader('')
params = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
neg_loss = NEG_loss(params.word_vocab_size, params.word_embed_size)
if args.use_cuda:
neg_loss = neg_loss.cuda()
# NEG_loss is defined over two embedding matrixes with shape of [params.word_vocab_size, params.word_embed_size]
optimizer = SGD(neg_loss.parameters(), 0.1)
for iteration in range(args.num_iterations):
input_idx, target_idx = batch_loader.next_embedding_seq(args.batch_size)
input = Variable(t.from_numpy(input_idx).long())
target = Variable(t.from_numpy(target_idx).long())
if args.use_cuda:
input, target = input.cuda(), target.cuda()
out = neg_loss(input, target, args.num_sample).mean()
optimizer.zero_grad()
out.backward()
optimizer.step()
if iteration % 500 == 0:
out = out.cpu().data.numpy()[0]
print('iteration = {}, loss = {}'.format(iteration, out))
word_embeddings = neg_loss.input_embeddings()
np.save('data/word_embeddings.npy', word_embeddings)
| 2,183 | 36.016949 | 116 | py |
pytorch_RVAE | pytorch_RVAE-master/train.py | import argparse
import os
import numpy as np
import torch as t
from torch.optim import Adam
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae import RVAE
if __name__ == "__main__":
if not os.path.exists('data/word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
parser = argparse.ArgumentParser(description='RVAE')
parser.add_argument('--num-iterations', type=int, default=120000, metavar='NI',
help='num iterations (default: 120000)')
parser.add_argument('--batch-size', type=int, default=32, metavar='BS',
help='batch size (default: 32)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--learning-rate', type=float, default=0.00005, metavar='LR',
help='learning rate (default: 0.00005)')
parser.add_argument('--dropout', type=float, default=0.3, metavar='DR',
help='dropout (default: 0.3)')
parser.add_argument('--use-trained', type=bool, default=False, metavar='UT',
help='load pretrained model (default: False)')
parser.add_argument('--ce-result', default='', metavar='CE',
help='ce result path (default: '')')
parser.add_argument('--kld-result', default='', metavar='KLD',
help='ce result path (default: '')')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE(parameters)
if args.use_trained:
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
train_step = rvae.trainer(optimizer, batch_loader)
validate = rvae.validater(batch_loader)
ce_result = []
kld_result = []
for iteration in range(args.num_iterations):
cross_entropy, kld, coef = train_step(iteration, args.batch_size, args.use_cuda, args.dropout)
if iteration % 5 == 0:
print('\n')
print('------------TRAIN-------------')
print('----------ITERATION-----------')
print(iteration)
print('--------CROSS-ENTROPY---------')
print(cross_entropy.data.cpu().numpy()[0])
print('-------------KLD--------------')
print(kld.data.cpu().numpy()[0])
print('-----------KLD-coef-----------')
print(coef)
print('------------------------------')
if iteration % 10 == 0:
cross_entropy, kld = validate(args.batch_size, args.use_cuda)
cross_entropy = cross_entropy.data.cpu().numpy()[0]
kld = kld.data.cpu().numpy()[0]
print('\n')
print('------------VALID-------------')
print('--------CROSS-ENTROPY---------')
print(cross_entropy)
print('-------------KLD--------------')
print(kld)
print('------------------------------')
ce_result += [cross_entropy]
kld_result += [kld]
if iteration % 20 == 0:
seed = np.random.normal(size=[1, parameters.latent_variable_size])
sample = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print('\n')
print('------------SAMPLE------------')
print('------------------------------')
print(sample)
print('------------------------------')
t.save(rvae.state_dict(), 'trained_RVAE')
np.save('ce_result_{}.npy'.format(args.ce_result), np.array(ce_result))
np.save('kld_result_npy_{}'.format(args.kld_result), np.array(kld_result))
| 4,032 | 37.04717 | 102 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/embedding.py | import numpy as np
import torch as t
import torch.nn as nn
from torch.nn import Parameter
from .tdnn import TDNN
class Embedding(nn.Module):
def __init__(self, params, path='../../../'):
super(Embedding, self).__init__()
self.params = params
word_embed = np.load(path + 'data/word_embeddings.npy')
self.word_embed = nn.Embedding(self.params.word_vocab_size, self.params.word_embed_size)
self.char_embed = nn.Embedding(self.params.char_vocab_size, self.params.char_embed_size)
self.word_embed.weight = Parameter(t.from_numpy(word_embed).float(), requires_grad=False)
self.char_embed.weight = Parameter(
t.Tensor(self.params.char_vocab_size, self.params.char_embed_size).uniform_(-1, 1))
self.TDNN = TDNN(self.params)
def forward(self, word_input, character_input):
"""
:param word_input: [batch_size, seq_len] tensor of Long type
:param character_input: [batch_size, seq_len, max_word_len] tensor of Long type
:return: input embedding with shape of [batch_size, seq_len, word_embed_size + sum_depth]
"""
assert word_input.size()[:2] == character_input.size()[:2], \
'Word input and character input must have the same sizes, but {} and {} found'.format(
word_input.size(), character_input.size())
[batch_size, seq_len] = word_input.size()
word_input = self.word_embed(word_input)
character_input = character_input.view(-1, self.params.max_word_len)
character_input = self.char_embed(character_input)
character_input = character_input.view(batch_size,
seq_len,
self.params.max_word_len,
self.params.char_embed_size)
character_input = self.TDNN(character_input)
result = t.cat([word_input, character_input], 2)
return result
| 2,001 | 37.5 | 98 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/highway.py | import torch.nn as nn
import torch.nn.functional as F
class Highway(nn.Module):
def __init__(self, size, num_layers, f):
super(Highway, self).__init__()
self.num_layers = num_layers
self.nonlinear = [nn.Linear(size, size) for _ in range(num_layers)]
for i, module in enumerate(self.nonlinear):
self._add_to_parameters(module.parameters(), 'nonlinear_module_{}'.format(i))
self.linear = [nn.Linear(size, size) for _ in range(num_layers)]
for i, module in enumerate(self.linear):
self._add_to_parameters(module.parameters(), 'linear_module_{}'.format(i))
self.gate = [nn.Linear(size, size) for _ in range(num_layers)]
for i, module in enumerate(self.gate):
self._add_to_parameters(module.parameters(), 'gate_module_{}'.format(i))
self.f = f
def forward(self, x):
"""
:param x: tensor with shape of [batch_size, size]
:return: tensor with shape of [batch_size, size]
applies σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x)) transformation | G and Q is affine transformation,
f is non-linear transformation, σ(x) is affine transformation with sigmoid non-linearition
and ⨀ is element-wise multiplication
"""
for layer in range(self.num_layers):
gate = F.sigmoid(self.gate[layer](x))
nonlinear = self.f(self.nonlinear[layer](x))
linear = self.linear[layer](x)
x = gate * nonlinear + (1 - gate) * linear
return x
def _add_to_parameters(self, parameters, name):
for i, parameter in enumerate(parameters):
self.register_parameter(name='{}-{}'.format(name, i), param=parameter)
| 1,743 | 33.88 | 105 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/neg.py | import torch as t
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
from utils.functional import *
class NEG_loss(nn.Module):
def __init__(self, num_classes, embed_size):
"""
:param num_classes: An int. The number of possible classes.
:param embed_size: An int. Embedding size
"""
super(NEG_loss, self).__init__()
self.num_classes = num_classes
self.embed_size = embed_size
self.out_embed = nn.Embedding(self.num_classes, self.embed_size)
self.out_embed.weight = Parameter(t.FloatTensor(self.num_classes, self.embed_size).uniform_(-1, 1))
self.in_embed = nn.Embedding(self.num_classes, self.embed_size)
self.in_embed.weight = Parameter(t.FloatTensor(self.num_classes, self.embed_size).uniform_(-1, 1))
def forward(self, input_labes, out_labels, num_sampled):
"""
:param input_labes: Tensor with shape of [batch_size] of Long type
:param out_labels: Tensor with shape of [batch_size] of Long type
:param num_sampled: An int. The number of sampled from noise examples
:return: Loss estimation with shape of [batch_size]
loss defined in Mikolov et al. Distributed Representations of Words and Phrases and their Compositionality
papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf
"""
assert parameters_allocation_check(self), \
"""
Invalid CUDA options. out_embed and in_embed parameters both should be stored in the same memory
got out_embed.is_cuda = {}, in_embed.is_cuda = {}
""".format(self.out_embed.weight.is_cuda, self.in_embed.weight.is_cuda)
use_cuda = self.out_embed.weight.is_cuda
[batch_size] = input_labes.size()
input = self.in_embed(input_labes)
output = self.out_embed(out_labels)
noise = Variable(t.Tensor(batch_size, num_sampled).uniform_(0, self.num_classes - 1).long())
if use_cuda:
noise = noise.cuda()
noise = self.out_embed(noise).neg()
log_target = (input * output).sum(1).squeeze().sigmoid().log()
''' ∑[batch_size, num_sampled, embed_size] * [batch_size, embed_size, 1] ->
∑[batch_size, num_sampled] -> [batch_size] '''
sum_log_sampled = t.bmm(noise, input.unsqueeze(2)).sigmoid().log().sum(1).squeeze()
loss = log_target + sum_log_sampled
return -loss
def input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
| 2,619 | 37.529412 | 118 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/tdnn.py | import torch as t
from torch.nn import Parameter
import torch.nn as nn
import torch.nn.functional as F
class TDNN(nn.Module):
def __init__(self, params):
super(TDNN, self).__init__()
self.params = params
self.kernels = [Parameter(t.Tensor(out_dim, self.params.char_embed_size, kW).uniform_(-1, 1))
for kW, out_dim in params.kernels]
self._add_to_parameters(self.kernels, 'TDNN_kernel')
def forward(self, x):
"""
:param x: tensor with shape [batch_size, max_seq_len, max_word_len, char_embed_size]
:return: tensor with shape [batch_size, max_seq_len, depth_sum]
applies multikenrel 1d-conv layer along every word in input with max-over-time pooling
to emit fixed-size output
"""
input_size = x.size()
input_size_len = len(input_size)
assert input_size_len == 4, \
'Wrong input rang, must be equal to 4, but {} found'.format(input_size_len)
[batch_size, seq_len, _, embed_size] = input_size
assert embed_size == self.params.char_embed_size, \
'Wrong embedding size, must be equal to {}, but {} found'.format(self.params.char_embed_size, embed_size)
# leaps with shape
x = x.view(-1, self.params.max_word_len, self.params.char_embed_size).transpose(1, 2).contiguous()
xs = [F.tanh(F.conv1d(x, kernel)) for kernel in self.kernels]
xs = [x.max(2)[0].squeeze(2) for x in xs]
x = t.cat(xs, 1)
x = x.view(batch_size, seq_len, -1)
return x
def _add_to_parameters(self, parameters, name):
for i, parameter in enumerate(parameters):
self.register_parameter(name='{}-{}'.format(name, i), param=parameter)
| 1,769 | 33.038462 | 117 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/__init__.py | 0 | 0 | 0 | py | |
pytorch_RVAE | pytorch_RVAE-master/utils/visualize_word_embeddings.py | import os
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from utils.batch_loader import BatchLoader
if __name__ == "__main__":
if not os.path.exists('../../data/word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
pca = PCA(n_components=2)
word_embeddings = np.load('../../data/word_embeddings.npy')
word_embeddings_pca = pca.fit_transform(word_embeddings)
batch_loader = BatchLoader()
words = batch_loader.idx_to_word
fig, ax = plt.subplots()
fig.set_size_inches(150, 150)
x = word_embeddings_pca[:, 0]
y = word_embeddings_pca[:, 1]
ax.scatter(x, y)
for i, word in enumerate(words):
ax.annotate(word, (x[i], y[i]))
fig.savefig('word_embedding.png', dpi=100)
| 807 | 25.933333 | 67 | py |
pytorch_RVAE | pytorch_RVAE-master/utils/functional.py | def fold(f, l, a):
return a if (len(l) == 0) else fold(f, l[1:], f(a, l[0]))
def f_and(x, y):
return x and y
def f_or(x, y):
return x or y
def parameters_allocation_check(module):
parameters = list(module.parameters())
return fold(f_and, parameters, True) or not fold(f_or, parameters, False)
def handle_inputs(inputs, use_cuda):
import torch as t
from torch.autograd import Variable
result = [Variable(t.from_numpy(var)) for var in inputs]
result = [var.cuda() if use_cuda else var for var in result]
return result
def kld_coef(i):
import math
return (math.tanh((i - 3500)/1000) + 1)/2
| 648 | 19.28125 | 77 | py |
pytorch_RVAE | pytorch_RVAE-master/utils/batch_loader.py | import collections
import os
import re
import numpy as np
from six.moves import cPickle
from .functional import *
class BatchLoader:
def __init__(self, path='../../'):
'''
:properties
data_files - array containing paths to data sources
idx_files - array of paths to vocabulury files
tensor_files - matrix with shape of [2, target_num] containing paths to files
with data represented as tensors
where first index in shape corresponds to types of representation of data,
i.e. word representation and character-aware representation
blind_symbol - special symbol to fill spaces in every word in character-aware representation
to make all words be the same lenght
pad_token - the same special symbol as blind_symbol, but in case of lines of words
go_token - start of sequence symbol
end_token - end of sequence symbol
chars_vocab_size - number of unique characters
idx_to_char - array of shape [chars_vocab_size] containing ordered list of inique characters
char_to_idx - dictionary of shape [chars_vocab_size]
such that idx_to_char[char_to_idx[some_char]] = some_char
where some_char is such that idx_to_char contains it
words_vocab_size, idx_to_word, word_to_idx - same as for characters
max_word_len - maximum word length
max_seq_len - maximum sequence length
num_lines - num of lines in data with shape [target_num]
word_tensor - tensor of shape [target_num, num_lines, line_lenght] c
ontains word's indexes instead of words itself
character_tensor - tensor of shape [target_num, num_lines, line_lenght, max_word_len].
Rows contain character indexes for every word in data
:methods
build_character_vocab(self, data) -> chars_vocab_size, idx_to_char, char_to_idx
chars_vocab_size - size of unique characters in corpus
idx_to_char - array of shape [chars_vocab_size] containing ordered list of inique characters
char_to_idx - dictionary of shape [chars_vocab_size]
such that idx_to_char[char_to_idx[some_char]] = some_char
where some_char is such that idx_to_char contains it
build_word_vocab(self, sentences) -> words_vocab_size, idx_to_word, word_to_idx
same as for characters
preprocess(self, data_files, idx_files, tensor_files) -> Void
preprocessed and initialized properties and then save them
load_preprocessed(self, data_files, idx_files, tensor_files) -> Void
load and and initialized properties
next_batch(self, batch_size, target_str) -> encoder_word_input, encoder_character_input, input_seq_len,
decoder_input, decoder_output
randomly sampled batch_size num of sequences for target from target_str.
fills sequences with pad tokens to made them the same lenght.
encoder_word_input and encoder_character_input have reversed order of the words
in case of performance
'''
self.data_files = [path + 'data/train.txt',
path + 'data/test.txt']
self.idx_files = [path + 'data/words_vocab.pkl',
path + 'data/characters_vocab.pkl']
self.tensor_files = [[path + 'data/train_word_tensor.npy',
path + 'data/valid_word_tensor.npy'],
[path + 'data/train_character_tensor.npy',
path + 'data/valid_character_tensor.npy']]
self.blind_symbol = ''
self.pad_token = '_'
self.go_token = '>'
self.end_token = '|'
idx_exists = fold(f_and,
[os.path.exists(file) for file in self.idx_files],
True)
tensors_exists = fold(f_and,
[os.path.exists(file) for target in self.tensor_files
for file in target],
True)
if idx_exists and tensors_exists:
self.load_preprocessed(self.data_files,
self.idx_files,
self.tensor_files)
print('preprocessed data was found and loaded')
else:
self.preprocess(self.data_files,
self.idx_files,
self.tensor_files)
print('data have preprocessed')
self.word_embedding_index = 0
def clean_whole_data(self, string):
string = re.sub('^[\d\:]+ ', '', string, 0, re.M)
string = re.sub('\n\s{11}', ' ', string, 0, re.M)
string = re.sub('\n{2}', '\n', string, 0, re.M)
return string.lower()
def clean_str(self, string):
'''
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data
'''
string = re.sub(r"[^가-힣A-Za-z0-9(),!?:;.\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r"\.", " . ", string)
string = re.sub(r",", " , ", string)
string = re.sub(r":", " : ", string)
string = re.sub(r";", " ; ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " ( ", string)
string = re.sub(r"\)", " ) ", string)
string = re.sub(r"\?", " ? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip()
def build_character_vocab(self, data):
# unique characters with blind symbol
chars = list(set(data)) + [self.blind_symbol, self.pad_token, self.go_token, self.end_token]
chars_vocab_size = len(chars)
# mappings itself
idx_to_char = chars
char_to_idx = {x: i for i, x in enumerate(idx_to_char)}
return chars_vocab_size, idx_to_char, char_to_idx
def build_word_vocab(self, sentences):
# Build vocabulary
word_counts = collections.Counter(sentences)
# Mapping from index to word
idx_to_word = [x[0] for x in word_counts.most_common()]
idx_to_word = list(sorted(idx_to_word)) + [self.pad_token, self.go_token, self.end_token]
words_vocab_size = len(idx_to_word)
# Mapping from word to index
word_to_idx = {x: i for i, x in enumerate(idx_to_word)}
return words_vocab_size, idx_to_word, word_to_idx
def preprocess(self, data_files, idx_files, tensor_files):
data = [open(file, "r").read() for file in data_files]
merged_data = data[0] + '\n' + data[1]
self.chars_vocab_size, self.idx_to_char, self.char_to_idx = self.build_character_vocab(merged_data)
with open(idx_files[1], 'wb') as f:
cPickle.dump(self.idx_to_char, f)
data_words = [[line.split() for line in target.split('\n')] for target in data]
merged_data_words = merged_data.split()
self.words_vocab_size, self.idx_to_word, self.word_to_idx = self.build_word_vocab(merged_data_words)
self.max_word_len = np.amax([len(word) for word in self.idx_to_word])
self.max_seq_len = np.amax([len(line) for target in data_words for line in target])
self.num_lines = [len(target) for target in data_words]
with open(idx_files[0], 'wb') as f:
cPickle.dump(self.idx_to_word, f)
self.word_tensor = np.array(
[[list(map(self.word_to_idx.get, line)) for line in target] for target in data_words])
print(self.word_tensor.shape)
for i, path in enumerate(tensor_files[0]):
np.save(path, self.word_tensor[i])
self.character_tensor = np.array(
[[list(map(self.encode_characters, line)) for line in target] for target in data_words])
for i, path in enumerate(tensor_files[1]):
np.save(path, self.character_tensor[i])
self.just_words = [word for line in self.word_tensor[0] for word in line]
def load_preprocessed(self, data_files, idx_files, tensor_files):
data = [open(file, "r").read() for file in data_files]
data_words = [[line.split() for line in target.split('\n')] for target in data]
self.max_seq_len = np.amax([len(line) for target in data_words for line in target])
self.num_lines = [len(target) for target in data_words]
[self.idx_to_word, self.idx_to_char] = [cPickle.load(open(file, "rb")) for file in idx_files]
[self.words_vocab_size, self.chars_vocab_size] = [len(idx) for idx in [self.idx_to_word, self.idx_to_char]]
[self.word_to_idx, self.char_to_idx] = [dict(zip(idx, range(len(idx)))) for idx in
[self.idx_to_word, self.idx_to_char]]
self.max_word_len = np.amax([len(word) for word in self.idx_to_word])
[self.word_tensor, self.character_tensor] = [np.array([np.load(target) for target in input_type])
for input_type in tensor_files]
self.just_words = [word for line in self.word_tensor[0] for word in line]
def next_batch(self, batch_size, target_str):
target = 0 if target_str == 'train' else 1
indexes = np.array(np.random.randint(self.num_lines[target], size=batch_size))
encoder_word_input = [self.word_tensor[target][index] for index in indexes]
encoder_character_input = [self.character_tensor[target][index] for index in indexes]
input_seq_len = [len(line) for line in encoder_word_input]
max_input_seq_len = np.amax(input_seq_len)
encoded_words = [[idx for idx in line] for line in encoder_word_input]
decoder_word_input = [[self.word_to_idx[self.go_token]] + line for line in encoder_word_input]
decoder_character_input = [[self.encode_characters(self.go_token)] + line for line in encoder_character_input]
decoder_output = [line + [self.word_to_idx[self.end_token]] for line in encoded_words]
# sorry
for i, line in enumerate(decoder_word_input):
line_len = input_seq_len[i]
to_add = max_input_seq_len - line_len
decoder_word_input[i] = line + [self.word_to_idx[self.pad_token]] * to_add
for i, line in enumerate(decoder_character_input):
line_len = input_seq_len[i]
to_add = max_input_seq_len - line_len
decoder_character_input[i] = line + [self.encode_characters(self.pad_token)] * to_add
for i, line in enumerate(decoder_output):
line_len = input_seq_len[i]
to_add = max_input_seq_len - line_len
decoder_output[i] = line + [self.word_to_idx[self.pad_token]] * to_add
for i, line in enumerate(encoder_word_input):
line_len = input_seq_len[i]
to_add = max_input_seq_len - line_len
encoder_word_input[i] = [self.word_to_idx[self.pad_token]] * to_add + line[::-1]
for i, line in enumerate(encoder_character_input):
line_len = input_seq_len[i]
to_add = max_input_seq_len - line_len
encoder_character_input[i] = [self.encode_characters(self.pad_token)] * to_add + line[::-1]
return np.array(encoder_word_input), np.array(encoder_character_input), \
np.array(decoder_word_input), np.array(decoder_character_input), np.array(decoder_output)
def next_embedding_seq(self, seq_len):
"""
:return:
tuple of input and output for word embedding learning,
where input = [b, b, c, c, d, d, e, e]
and output = [a, c, b, d, d, e, d, g]
for line [a, b, c, d, e, g] at index i
"""
words_len = len(self.just_words)
seq = [self.just_words[i % words_len]
for i in np.arange(self.word_embedding_index, self.word_embedding_index + seq_len)]
result = []
for i in range(seq_len - 2):
result.append([seq[i + 1], seq[i]])
result.append([seq[i + 1], seq[i + 2]])
self.word_embedding_index = (self.word_embedding_index + seq_len) % words_len - 2
# input and target
result = np.array(result)
return result[:, 0], result[:, 1]
def go_input(self, batch_size):
go_word_input = [[self.word_to_idx[self.go_token]] for _ in range(batch_size)]
go_character_input = [[self.encode_characters(self.go_token)] for _ in range(batch_size)]
return np.array(go_word_input), np.array(go_character_input)
def encode_word(self, idx):
result = np.zeros(self.words_vocab_size)
result[idx] = 1
return result
def decode_word(self, word_idx):
word = self.idx_to_word[word_idx]
return word
def sample_word_from_distribution(self, distribution):
ix = np.random.choice(range(self.words_vocab_size), p=distribution.ravel())
x = np.zeros((self.words_vocab_size, 1))
x[ix] = 1
return self.idx_to_word[np.argmax(x)]
def encode_characters(self, characters):
word_len = len(characters)
to_add = self.max_word_len - word_len
characters_idx = [self.char_to_idx[i] for i in characters] + to_add * [self.char_to_idx['']]
return characters_idx
def decode_characters(self, characters_idx):
characters = [self.idx_to_char[i] for i in characters_idx]
return ''.join(characters)
| 14,202 | 42.434251 | 119 | py |
pytorch_RVAE | pytorch_RVAE-master/utils/parameters.py | from .functional import *
class Parameters:
def __init__(self, max_word_len, max_seq_len, word_vocab_size, char_vocab_size):
self.max_word_len = int(max_word_len)
self.max_seq_len = int(max_seq_len) + 1 # go or eos token
self.word_vocab_size = int(word_vocab_size)
self.char_vocab_size = int(char_vocab_size)
self.word_embed_size = 300
self.char_embed_size = 15
self.kernels = [(1, 25), (2, 50), (3, 75), (4, 100), (5, 125), (6, 150)]
self.sum_depth = fold(lambda x, y: x + y, [depth for _, depth in self.kernels], 0)
self.encoder_rnn_size = 600
self.encoder_num_layers = 1
self.latent_variable_size = 1100
self.decoder_rnn_size = 800
self.decoder_num_layers = 2
| 780 | 30.24 | 90 | py |
pytorch_RVAE | pytorch_RVAE-master/utils/__init__.py | 0 | 0 | 0 | py | |
pytorch_RVAE | pytorch_RVAE-master/model/rvae.py | import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .decoder import Decoder
from .encoder import Encoder
from selfModules.embedding import Embedding
from utils.functional import kld_coef, parameters_allocation_check, fold
class RVAE(nn.Module):
def __init__(self, params):
super(RVAE, self).__init__()
self.params = params
self.embedding = Embedding(self.params, '')
self.encoder = Encoder(self.params)
self.context_to_mu = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
self.context_to_logvar = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
self.decoder = Decoder(self.params)
def forward(self, drop_prob,
encoder_word_input=None, encoder_character_input=None,
decoder_word_input=None, decoder_character_input=None,
z=None, initial_state=None):
"""
:param encoder_word_input: An tensor with shape of [batch_size, seq_len] of Long type
:param encoder_character_input: An tensor with shape of [batch_size, seq_len, max_word_len] of Long type
:param decoder_word_input: An tensor with shape of [batch_size, max_seq_len + 1] of Long type
:param initial_state: initial state of decoder rnn in order to perform sampling
:param drop_prob: probability of an element of decoder input to be zeroed in sense of dropout
:param z: context if sampling is performing
:return: unnormalized logits of sentence words distribution probabilities
with shape of [batch_size, seq_len, word_vocab_size]
final rnn state with shape of [num_layers, batch_size, decoder_rnn_size]
"""
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
use_cuda = self.embedding.word_embed.weight.is_cuda
assert z is None and fold(lambda acc, parameter: acc and parameter is not None,
[encoder_word_input, encoder_character_input, decoder_word_input],
True) \
or (z is not None and decoder_word_input is not None), \
"Invalid input. If z is None then encoder and decoder inputs should be passed as arguments"
if z is None:
''' Get context from encoder and sample z ~ N(mu, std)
'''
[batch_size, _] = encoder_word_input.size()
encoder_input = self.embedding(encoder_word_input, encoder_character_input)
context = self.encoder(encoder_input)
mu = self.context_to_mu(context)
logvar = self.context_to_logvar(context)
std = t.exp(0.5 * logvar)
z = Variable(t.randn([batch_size, self.params.latent_variable_size]))
if use_cuda:
z = z.cuda()
z = z * std + mu
kld = (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean().squeeze()
else:
kld = None
decoder_input = self.embedding.word_embed(decoder_word_input)
out, final_state = self.decoder(decoder_input, z, drop_prob, initial_state)
return out, final_state, kld
def learnable_parameters(self):
# word_embedding is constant parameter thus it must be dropped from list of parameters for optimizer
return [p for p in self.parameters() if p.requires_grad]
def trainer(self, optimizer, batch_loader):
def train(i, batch_size, use_cuda, dropout):
input = batch_loader.next_batch(batch_size, 'train')
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
logits, _, kld = self(dropout,
encoder_word_input, encoder_character_input,
decoder_word_input, decoder_character_input,
z=None)
logits = logits.view(-1, self.params.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
loss = 79 * cross_entropy + kld_coef(i) * kld
optimizer.zero_grad()
loss.backward()
optimizer.step()
return cross_entropy, kld, kld_coef(i)
return train
def validater(self, batch_loader):
def validate(batch_size, use_cuda):
input = batch_loader.next_batch(batch_size, 'valid')
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
logits, _, kld = self(0.,
encoder_word_input, encoder_character_input,
decoder_word_input, decoder_character_input,
z=None)
logits = logits.view(-1, self.params.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
return cross_entropy, kld
return validate
def sample(self, batch_loader, seq_len, seed, use_cuda):
seed = Variable(t.from_numpy(seed).float())
if use_cuda:
seed = seed.cuda()
decoder_word_input_np, decoder_character_input_np = batch_loader.go_input(1)
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
result = ''
initial_state = None
for i in range(seq_len):
logits, initial_state, _ = self(0., None, None,
decoder_word_input, decoder_character_input,
seed, initial_state)
logits = logits.view(-1, self.params.word_vocab_size)
prediction = F.softmax(logits)
word = batch_loader.sample_word_from_distribution(prediction.data.cpu().numpy()[-1])
if word == batch_loader.end_token:
break
result += ' ' + word
decoder_word_input_np = np.array([[batch_loader.word_to_idx[word]]])
decoder_character_input_np = np.array([[batch_loader.encode_characters(word)]])
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
return result
| 7,319 | 38.567568 | 119 | py |
pytorch_RVAE | pytorch_RVAE-master/model/encoder.py | import torch as t
import torch.nn as nn
import torch.nn.functional as F
from selfModules.highway import Highway
from utils.functional import parameters_allocation_check
class Encoder(nn.Module):
def __init__(self, params):
super(Encoder, self).__init__()
self.params = params
self.hw1 = Highway(self.params.sum_depth + self.params.word_embed_size, 2, F.relu)
self.rnn = nn.LSTM(input_size=self.params.word_embed_size + self.params.sum_depth,
hidden_size=self.params.encoder_rnn_size,
num_layers=self.params.encoder_num_layers,
batch_first=True,
bidirectional=True)
def forward(self, input):
"""
:param input: [batch_size, seq_len, embed_size] tensor
:return: context of input sentenses with shape of [batch_size, latent_variable_size]
"""
[batch_size, seq_len, embed_size] = input.size()
input = input.view(-1, embed_size)
input = self.hw1(input)
input = input.view(batch_size, seq_len, embed_size)
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
''' Unfold rnn with zero initial state and get its final state from the last layer
'''
_, (_, final_state) = self.rnn(input)
final_state = final_state.view(self.params.encoder_num_layers, 2, batch_size, self.params.encoder_rnn_size)
final_state = final_state[-1]
h_1, h_2 = final_state[0], final_state[1]
final_state = t.cat([h_1, h_2], 1)
return final_state
| 1,685 | 34.125 | 115 | py |
pytorch_RVAE | pytorch_RVAE-master/model/decoder.py | import torch as t
import torch.nn as nn
import torch.nn.functional as F
from utils.functional import parameters_allocation_check
class Decoder(nn.Module):
def __init__(self, params):
super(Decoder, self).__init__()
self.params = params
self.rnn = nn.LSTM(input_size=self.params.latent_variable_size + self.params.word_embed_size,
hidden_size=self.params.decoder_rnn_size,
num_layers=self.params.decoder_num_layers,
batch_first=True)
self.fc = nn.Linear(self.params.decoder_rnn_size, self.params.word_vocab_size)
def forward(self, decoder_input, z, drop_prob, initial_state=None):
"""
:param decoder_input: tensor with shape of [batch_size, seq_len, embed_size]
:param z: sequence context with shape of [batch_size, latent_variable_size]
:param drop_prob: probability of an element of decoder input to be zeroed in sense of dropout
:param initial_state: initial state of decoder rnn
:return: unnormalized logits of sentense words distribution probabilities
with shape of [batch_size, seq_len, word_vocab_size]
final rnn state with shape of [num_layers, batch_size, decoder_rnn_size]
"""
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
[batch_size, seq_len, _] = decoder_input.size()
'''
decoder rnn is conditioned on context via additional bias = W_cond * z to every input token
'''
decoder_input = F.dropout(decoder_input, drop_prob)
z = t.cat([z] * seq_len, 1).view(batch_size, seq_len, self.params.latent_variable_size)
decoder_input = t.cat([decoder_input, z], 2)
rnn_out, final_state = self.rnn(decoder_input, initial_state)
rnn_out = rnn_out.contiguous().view(-1, self.params.decoder_rnn_size)
result = self.fc(rnn_out)
result = result.view(batch_size, seq_len, self.params.word_vocab_size)
return result, final_state
| 2,142 | 39.433962 | 103 | py |
pytorch_RVAE | pytorch_RVAE-master/model/__init__.py | 0 | 0 | 0 | py | |
semantic-abstraction | semantic-abstraction-main/plot_utils.py | import numpy as np
from matplotlib.patches import Patch
import matplotlib.pyplot as plt
import io
from PIL import Image
import open3d as o3d
from skimage.measure import block_reduce
import matplotlib.cm as cm
import matplotlib as mpl
def plot_to_png(fig):
buf = io.BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
img = np.array(Image.open(buf)).astype(np.uint8)
return img
def set_view_and_save_img(fig, ax, views):
for elev, azim in views:
ax.view_init(elev=elev, azim=azim)
yield plot_to_png(fig)
def plot_pointcloud(
xyz,
features,
object_labels=None,
background_color=(0.1, 0.1, 0.1, 0.99),
num_points=50000,
views=[(45, 135)],
pts_size=3,
alpha=0.5,
plot_empty=False,
visualize_ghost_points=False,
object_colors=None,
delete_fig=True,
show_plot=False,
bounds=[[-1, -1, -0.1], [1, 1, 1.9]],
):
is_semantic = len(features.shape) == 1
if type(alpha) is float:
alpha = np.ones(xyz.shape[0]).astype(np.float32) * alpha
if not plot_empty and is_semantic and object_labels is not None:
mask = np.ones_like(alpha).astype(bool)
for remove_label in ["empty", "unlabelled", "out of bounds"]:
if remove_label in object_labels.tolist():
remove_idx = object_labels.tolist().index(remove_label)
mask = np.logical_and(mask, features != remove_idx)
xyz = xyz[mask, :]
features = features[mask, ...]
alpha = alpha[mask]
if type(pts_size) != int and type(pts_size) != float:
pts_size = pts_size[mask]
# subsample
if xyz.shape[0] > num_points:
indices = np.random.choice(xyz.shape[0], size=num_points, replace=False)
xyz = xyz[indices, :]
features = features[indices, ...]
alpha = alpha[indices]
if type(pts_size) != int and type(pts_size) != float:
pts_size = pts_size[indices]
fig = plt.figure(figsize=(6, 6), dpi=160)
ax = fig.add_subplot(111, projection="3d")
x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]
ax.set_facecolor(background_color)
ax.w_xaxis.set_pane_color(background_color)
ax.w_yaxis.set_pane_color(background_color)
ax.w_zaxis.set_pane_color(background_color)
# ax._axis3don = False
if is_semantic and object_labels is not None:
object_ids = list(np.unique(features))
object_labels = object_labels[object_ids].tolist()
if object_colors is not None:
object_colors = object_colors[object_ids]
features = features.astype(np.int)
# repack object ids
repacked_obj_ids = np.zeros(features.shape).astype(np.uint32)
for i, j in enumerate(object_ids):
repacked_obj_ids[features == j] = i
features = repacked_obj_ids
object_ids = list(np.unique(features))
colors = np.zeros((len(features), 4)).astype(np.uint8)
if object_colors is None:
cmap = plt.get_cmap("tab20")
object_colors = (255 * cmap(np.array(object_ids) % 20)).astype(np.uint8)
for obj_id in np.unique(features):
colors[features == obj_id, :] = object_colors[obj_id]
colors = colors.astype(float) / 255.0
object_colors = object_colors.astype(float) / 255
handles = [
Patch(facecolor=c, edgecolor="grey", label=label)
for label, c in zip(object_labels, object_colors)
]
l = ax.legend(
handles=handles,
labels=object_labels,
loc="lower center",
bbox_to_anchor=(0.5, 0),
ncol=4,
facecolor=(0, 0, 0, 0.1),
fontsize=8,
framealpha=0,
)
plt.setp(l.get_texts(), color=(0.8, 0.8, 0.8))
else:
colors = features.astype(float)
if colors.max() > 1.0:
colors /= 255.0
assert colors.max() <= 1.0
# ensure alpha has same dims as colors
if colors.shape[-1] == 4:
colors[:, -1] = alpha
ax.scatter(x, y, z, c=colors, s=pts_size)
if visualize_ghost_points:
x, y, z = np.array(np.unique(xyz, axis=0)).T
ax.scatter(x, y, z, color=[1.0, 1.0, 1.0, 0.02], s=pts_size)
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.axes.set_xlim3d(left=bounds[0][0], right=bounds[1][0])
ax.axes.set_ylim3d(bottom=bounds[0][1], top=bounds[1][1])
ax.axes.set_zlim3d(bottom=bounds[0][2], top=bounds[1][2])
plt.tight_layout(pad=0)
imgs = list(set_view_and_save_img(fig, ax, views))
if show_plot:
plt.show()
if delete_fig:
plt.close(fig)
return imgs
# meshes = []
# for class_id in np.unique(features):
# mask = features == class_id
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(xyz[mask, :])
# pcd.estimate_normals(
# search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
# radii = [0.005, 0.01, 0.02, 0.04]
# rec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
# pcd, o3d.utility.DoubleVector(radii))
# rec_mesh.paint_uniform_color(object_colors[class_id][:3])
# meshes.append(rec_mesh)
# o3d.visualization.draw_geometries(meshes)
def view_tsdf(tsdf, simplify=True):
main_color = "#00000055"
mpl.rcParams["text.color"] = main_color
mpl.rcParams["axes.labelcolor"] = main_color
mpl.rcParams["xtick.color"] = main_color
mpl.rcParams["ytick.color"] = main_color
mpl.rc("axes", edgecolor=main_color)
mpl.rcParams["grid.color"] = "#00000033"
if simplify:
tsdf = block_reduce(tsdf, block_size=(8, 8, 8), func=np.mean)
print("block reduced", tsdf.shape)
x = np.arange(tsdf.shape[0])[:, None, None]
y = np.arange(tsdf.shape[1])[None, :, None]
z = np.arange(tsdf.shape[2])[None, None, :]
x, y, z = np.broadcast_arrays(x, y, z)
c = cm.plasma((tsdf.ravel() + 1))
alphas = (tsdf.ravel() < 0).astype(float)
c[..., -1] = alphas
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.scatter(x.ravel(), y.ravel(), z.ravel(), c=c, s=1)
ax.w_xaxis.set_pane_color((0.0, 0.0, 0.0, 0.0))
ax.w_yaxis.set_pane_color((0.0, 0.0, 0.0, 0.0))
ax.w_zaxis.set_pane_color((0.0, 0.0, 0.0, 0.0))
# Hide axes ticks
ax.tick_params(axis="x", colors=(0.0, 0.0, 0.0, 0.0))
ax.tick_params(axis="y", colors=(0.0, 0.0, 0.0, 0.0))
ax.tick_params(axis="z", colors=(0.0, 0.0, 0.0, 0.0))
ax.view_init(20, -110)
plt.show()
| 6,575 | 33.429319 | 84 | py |
semantic-abstraction | semantic-abstraction-main/generate_relevancy.py | from typing import List
from pathlib import Path
import h5py
import torch
from tqdm import tqdm
import ray
from utils import write_to_hdf5
from filelock import FileLock
import numpy as np
from CLIP.clip import ClipWrapper, saliency_configs, imagenet_templates
from dataset import synonyms, deref_h5py
import typer
import imageio
from matplotlib import pyplot as plt
import cv2
from time import time
app = typer.Typer()
def resize_and_add_data(dataset, data):
data_shape = np.array(data.shape)
dataset_shape = np.array(dataset.shape)
assert (dataset_shape[1:] == data_shape[1:]).all()
dataset.resize(dataset_shape[0] + data_shape[0], axis=0)
dataset[-data_shape[0] :, ...] = data
return [
dataset.regionref[dataset_shape[0] + i, ...]
for i in np.arange(0, data_shape[0])
]
def get_datastructure(image_shape, relevancy_shape, tsdf_dim, num_output_pts, **kwargs):
image_shape = list(image_shape)
relevancy_shape = list(relevancy_shape)
return {
"rgb": {"dtype": "uint8", "item_shape": image_shape + [3]},
"depth": {"dtype": "f", "item_shape": image_shape},
"seg": {"dtype": "i", "item_shape": image_shape},
"saliencies": {"dtype": "f", "item_shape": relevancy_shape},
"tsdf_value_pts": {"dtype": "f", "item_shape": [np.prod(tsdf_dim)]},
"tsdf_xyz_pts": {"dtype": "f", "item_shape": [np.prod(tsdf_dim), 3]},
"full_xyz_pts": {"dtype": "f", "item_shape": [num_output_pts, 3]},
"full_objid_pts": {"dtype": "i", "item_shape": [num_output_pts]},
}
def init_dataset(file_path, data_structure):
with h5py.File(file_path, mode="w") as file:
# setup
for key, data_info in data_structure.items():
file.create_dataset(
name=key,
shape=tuple([0] + data_info["item_shape"]),
dtype=data_info["dtype"],
chunks=tuple([1] + data_info["item_shape"]),
compression="gzip",
compression_opts=9,
maxshape=tuple([None] + data_info["item_shape"]),
)
@ray.remote
def generate_saliency_helper(
clip_wrapper, rgb_inputs, prompts, text_labels, scene_path, replace
):
saliencies = {
rgb_name: {
saliency_config_name: ray.get(
clip_wrapper.get_clip_saliency.remote(
img=rgb,
text_labels=text_labels,
prompts=prompts
if "imagenet_prompt_ensemble"
not in saliency_config(img_dim=min(rgb.shape[:2]))
or not saliency_config(img_dim=min(rgb.shape[:2]))[
"imagenet_prompt_ensemble"
]
else imagenet_templates,
**saliency_config(img_dim=min(rgb.shape[:2])),
)
)
for saliency_config_name, saliency_config in saliency_configs.items()
}
for rgb_name, rgb in rgb_inputs.items()
}
with FileLock(scene_path + ".lock"):
with h5py.File(scene_path, mode="a") as f:
saliency_group = f["data"].create_group("saliencies")
for rgb_name, rgb_saliencies in saliencies.items():
for (
saliency_config_name,
(config_saliency, text_label_features),
) in rgb_saliencies.items():
storage_dims = np.array(f["saliencies"].shape)[1:]
config_saliency = torch.nn.functional.interpolate(
config_saliency[:, None, :, :],
size=tuple(storage_dims),
mode="nearest-exact"
# mode='bilinear',
# align_corners=False
)[:, 0]
config_saliency = torch.cat(
[config_saliency, config_saliency.mean(dim=0, keepdim=True)],
dim=0,
)
text_label_features = torch.cat(
[
text_label_features,
text_label_features.mean(dim=0, keepdim=True),
],
dim=0,
)
text_label_features /= text_label_features.norm(
dim=-1, keepdim=True
)
write_to_hdf5(
saliency_group,
key=rgb_name
+ "|"
+ saliency_config_name
+ "|saliency_text_labels",
value=np.array(text_labels + ["mean"]).astype("S"),
replace=replace,
)
write_to_hdf5(
saliency_group,
key=rgb_name
+ "|"
+ saliency_config_name
+ "|saliency_text_label_features",
value=text_label_features,
replace=replace,
)
region_references = resize_and_add_data(
dataset=f["saliencies"], data=config_saliency
)
write_to_hdf5(
saliency_group,
key=rgb_name + "|" + saliency_config_name,
dtype=h5py.regionref_dtype,
value=region_references,
replace=replace,
)
return clip_wrapper
@app.command()
def dataset(
file_path: str,
num_processes: int,
local: bool,
prompts: List[str] = ["a render of a {} in a game engine."],
replace=False,
):
if "matterport" in file_path or "nyu" in file_path:
prompts = ["a photograph of a {} in a home."]
print(prompts)
tasks = []
ray.init(log_to_driver=True, local_mode=local)
num_cuda_devices = torch.cuda.device_count()
assert num_cuda_devices > 0
print(f"[INFO] FOUND {num_cuda_devices} CUDA DEVICE")
wrapper_actor_cls = ray.remote(ClipWrapper)
available_clip_wrappers = [
wrapper_actor_cls.options(num_gpus=num_cuda_devices / num_processes).remote(
clip_model_type="ViT-B/32", device="cuda"
)
for _ in range(num_processes)
]
scene_paths = list(reversed(sorted(map(str, Path(file_path).rglob("*.hdf5")))))
if replace:
if input("Replace = True. Delete existing relevancies? [y/n]") != "y":
exit()
for scene_path in tqdm(
scene_paths, dynamic_ncols=True, desc="deleting existing relevancies"
):
try:
with h5py.File(scene_path, mode="a") as f:
for k in f["data"]:
if "salienc" in k:
del f[f"data/{k}"]
if "saliencies" in f:
data_shape = list(f["saliencies"].shape[1:])
del f["saliencies"]
f.create_dataset(
name="saliencies",
shape=tuple([0] + data_shape),
dtype="f",
chunks=tuple([1] + data_shape),
compression="gzip",
compression_opts=9,
maxshape=tuple([None] + data_shape),
)
except Exception as e:
print(e, scene_path)
exit()
for scene_path in tqdm(
scene_paths, dynamic_ncols=True, desc="generating relevancies", smoothing=0.001
):
assert len(available_clip_wrappers) > 0
try:
with h5py.File(scene_path, mode="a") as f:
scene_already_done = "saliencies" in f["data"]
if not scene_already_done or replace:
if scene_already_done:
for k in f["data"]:
if "salienc" in k:
del f[f"data/{k}"]
data_shape = f["saliencies"].shape[1:]
if "saliencies" in f:
del f["saliencies"]
f.create_dataset(
name="saliencies",
shape=tuple([0] + data_shape),
dtype="f",
chunks=tuple([1] + data_shape),
compression="gzip",
compression_opts=9,
maxshape=tuple([None] + data_shape),
)
if "data/visible_scene_obj_labels" in f:
del f["data/visible_scene_obj_labels"]
objid_to_class = np.array(f[f"data/objid_to_class"]).astype(str)
text_labels = objid_to_class.copy()
scene_has_groundtruth = (
"seg" in f["data"] and "full_objid_pts" in f["data"]
)
visible_scene_obj_labels = text_labels.copy()
if scene_has_groundtruth:
objids_in_scene = list(
set(
deref_h5py(
dataset=f["full_objid_pts"],
refs=f["data/full_objid_pts"],
)
.astype(int)
.reshape(-1)
)
- {-1}
) # remove empty
scene_object_labels = text_labels.copy()[objids_in_scene]
# remove objects which are not in view
gt_seg = deref_h5py(dataset=f["seg"], refs=f["data"]["seg"])[0]
visible_obj_ids = list(map(int, set(np.unique(gt_seg)) - {-1}))
visible_obj_labels = text_labels[visible_obj_ids]
visible_scene_obj_labels = list(
set(visible_obj_labels).intersection(
set(scene_object_labels)
)
)
visible_scene_obj_labels = list(
sorted(
set(
map(
lambda c: c.split("[")[0].lstrip().rstrip(),
visible_scene_obj_labels,
)
)
)
)
# visible_scene_obj_labels used to filter
# objects both visible and in scene
text_labels = visible_obj_labels.copy()
text_labels = set(text_labels)
# create saliency maps necessary for descriptions
if (
"descriptions" in f["data"]
and len(np.array(f["data/descriptions/spatial_relation_name"]))
> 0
):
target_obj_names = np.array(
f["data/descriptions/target_obj_name"]
).astype(str)
reference_obj_names = np.array(
f["data/descriptions/reference_obj_name"]
).astype(str)
spatial_relation_names = np.array(
f["data/descriptions/spatial_relation_name"]
).astype(str)
text_labels = text_labels.union(
target_obj_names.tolist() + reference_obj_names.tolist()
)
# gradcam for clip spatial
descriptions = ""
for desc_part in [
target_obj_names,
" ",
spatial_relation_names,
" a ",
reference_obj_names,
]:
descriptions = np.char.add(descriptions, desc_part)
text_labels = text_labels.union(descriptions)
# descriptions with synonyms
descriptions = ""
for desc_part in [
np.array(
list(
map(
lambda x: x
if x not in synonyms.keys()
else synonyms[x],
target_obj_names,
)
)
),
" ",
spatial_relation_names,
" a ",
np.array(
list(
map(
lambda x: x
if x not in synonyms.keys()
else synonyms[x],
reference_obj_names,
)
)
),
]:
descriptions = np.char.add(descriptions, desc_part)
text_labels = text_labels.union(descriptions)
text_labels = set(
map(lambda c: c.split("[")[0].lstrip().rstrip(), text_labels)
)
# do synonyms
text_labels = text_labels.union(
map(
lambda text_label: synonyms[text_label],
filter(
lambda text_label: text_label in synonyms, text_labels
),
)
)
for remove_label in {"unlabelled", "empty", "out of bounds"}:
if remove_label in text_labels:
text_labels.remove(remove_label)
text_labels = list(sorted(text_labels))
rgb_inputs = {"rgb": np.array(f["rgb"][f["data"]["rgb"][0]][0])}
if (
"domain_randomized_rgb" in f["data"]
and len(np.array(f["data/domain_randomized_rgb"])[0].shape) > 1
):
rgb_inputs["domain_randomized_rgb"] = np.array(
f["data/domain_randomized_rgb"]
)[0]
write_to_hdf5(
f["data"],
key="visible_scene_obj_labels",
value=np.array(visible_scene_obj_labels).astype("S"),
replace=replace,
)
clip_wrapper = available_clip_wrappers.pop()
tasks.append(
generate_saliency_helper.remote(
clip_wrapper=clip_wrapper,
scene_path=scene_path,
rgb_inputs=rgb_inputs,
text_labels=text_labels,
prompts=prompts,
replace=replace,
)
)
except Exception as e:
print(e)
print(scene_path, "invalid hdf5 file")
if len(available_clip_wrappers) == 0:
readies, tasks = ray.wait(tasks, num_returns=1)
num_readies = len(readies)
try:
available_clip_wrappers.extend(ray.get(readies))
except Exception as e:
print(e)
available_clip_wrappers.extend(
[
wrapper_actor_cls.options(
num_gpus=num_cuda_devices / num_processes
).remote(clip_model_type="ViT-B/32", device="cuda")
for _ in range(num_readies)
]
)
ray.get(tasks)
@app.command()
def image(
file_path: str = typer.Argument(
default="matterport.png", help="path of image file"
),
labels: List[str] = typer.Option(
default=[
"basketball jersey",
"nintendo switch",
"television",
"ping pong table",
"vase",
"fireplace",
"abstract painting of a vespa",
"carpet",
"wall",
],
help='list of object categories (e.g.: "nintendo switch")',
),
prompts: List[str] = typer.Option(
default=["a photograph of a {} in a home."],
help="prompt template to use with CLIP.",
),
):
"""
Generates a multi-scale relevancy for image at `file_path`.
"""
img = np.array(imageio.imread(file_path))
assert img.dtype == np.uint8
h, w, c = img.shape
start = time()
grads = ClipWrapper.get_clip_saliency(
img=img,
text_labels=np.array(labels),
prompts=prompts,
**saliency_configs["ours"](h),
)[0]
print(f"get gradcam took {float(time() - start)} seconds", grads.shape)
grads -= grads.mean(axis=0)
grads = grads.cpu().numpy()
fig, axes = plt.subplots(3, 3)
axes = axes.flatten()
vmin = 0.002
cmap = plt.get_cmap("jet")
vmax = 0.008
for ax, label_grad, label in zip(axes, grads, labels):
ax.axis("off")
ax.imshow(img)
ax.set_title(label, fontsize=12)
grad = np.clip((label_grad - vmin) / (vmax - vmin), a_min=0.0, a_max=1.0)
colored_grad = cmap(grad)
grad = 1 - grad
colored_grad[..., -1] = grad * 0.7
ax.imshow(colored_grad)
plt.tight_layout(pad=0)
plt.savefig("grads.png")
print("dumped relevancy to grads.png")
plt.show()
if __name__ == "__main__":
app()
| 18,591 | 39.77193 | 88 | py |
semantic-abstraction | semantic-abstraction-main/fusion.py | # Copyright (c) 2018 Andy Zeng
# Source: https://github.com/andyzeng/tsdf-fusion-python/blob/master/fusion.py
# BSD 2-Clause License
# Copyright (c) 2019, Princeton University
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numba import njit, prange
from skimage import measure
class TSDFVolume:
"""Volumetric TSDF Fusion of RGB-D Images."""
def __init__(self, vol_bnds, voxel_size):
"""Constructor.
Args:
vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the
xyz bounds (min/max) in meters.
voxel_size (float): The volume discretization in meters.
"""
vol_bnds = np.asarray(vol_bnds)
assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)."
assert (vol_bnds[:, 0] < vol_bnds[:, 1]).all()
# Define voxel volume parameters
self._vol_bnds = vol_bnds
self._voxel_size = float(voxel_size)
self._trunc_margin = 5 * self._voxel_size # truncation on SDF
self._color_const = 256 * 256
# Adjust volume bounds and ensure C-order contiguous
self._vol_dim = (
np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size)
.copy(order="C")
.astype(int)
)
self._vol_bnds[:, 1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size
self._vol_origin = self._vol_bnds[:, 0].copy(order="C").astype(np.float32)
# Initialize pointers to voxel volume in CPU memory
# Assume all unobserved regions are occupied
self._tsdf_vol_cpu = -np.ones(self._vol_dim).astype(np.float32)
# for computing the cumulative moving average of observations per voxel
self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
# Get voxel grid coordinates
xv, yv, zv = np.meshgrid(
range(self._vol_dim[0]),
range(self._vol_dim[1]),
range(self._vol_dim[2]),
indexing="ij",
)
self.vox_coords = (
np.concatenate(
[xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)], axis=0
)
.astype(int)
.T
)
@staticmethod
@njit(parallel=True)
def vox2world(vol_origin, vox_coords, vox_size):
"""Convert voxel grid coordinates to world coordinates."""
vol_origin = vol_origin.astype(np.float32)
vox_coords = vox_coords.astype(np.float32)
cam_pts = np.empty_like(vox_coords, dtype=np.float32)
for i in prange(vox_coords.shape[0]):
for j in range(3):
cam_pts[i, j] = vol_origin[j] + (vox_size * vox_coords[i, j])
return cam_pts
@staticmethod
@njit(parallel=True)
def cam2pix(cam_pts, intr):
"""Convert camera coordinates to pixel coordinates."""
intr = intr.astype(np.float32)
fx, fy = intr[0, 0], intr[1, 1]
cx, cy = intr[0, 2], intr[1, 2]
pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
for i in prange(cam_pts.shape[0]):
pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
return pix
@staticmethod
@njit(parallel=True)
def integrate_tsdf(tsdf_vol, dist, w_old, obs_weight):
"""Integrate the TSDF volume."""
tsdf_vol_int = np.empty_like(tsdf_vol, dtype=np.float32)
w_new = np.empty_like(w_old, dtype=np.float32)
for i in prange(len(tsdf_vol)):
w_new[i] = w_old[i] + obs_weight
tsdf_vol_int[i] = (w_old[i] * tsdf_vol[i] + obs_weight * dist[i]) / w_new[i]
return tsdf_vol_int, w_new
def integrate(self, color_im, depth_im, cam_intr, cam_pose, obs_weight=1.0):
"""Integrate an RGB-D frame into the TSDF volume.
Args:
color_im (ndarray): An RGB image of shape (H, W, 3).
depth_im (ndarray): A depth image of shape (H, W).
cam_intr (ndarray): The camera intrinsics matrix of shape (3, 3).
cam_pose (ndarray): The camera pose (i.e. extrinsics) of shape (4, 4).
obs_weight (float): The weight to assign for the current observation. A higher
value
"""
im_h, im_w = depth_im.shape
# Fold RGB color image into a single channel image
color_im = color_im.astype(np.float32)
color_im = np.floor(
color_im[..., 2] * self._color_const
+ color_im[..., 1] * 256
+ color_im[..., 0]
)
# Convert voxel grid coordinates to pixel coordinates
cam_pts = self.vox2world(self._vol_origin, self.vox_coords, self._voxel_size)
cam_pts = rigid_transform(cam_pts, np.linalg.inv(cam_pose))
pix_z = cam_pts[:, 2]
pix = self.cam2pix(cam_pts, cam_intr)
pix_x, pix_y = pix[:, 0], pix[:, 1]
# Eliminate pixels outside view frustum
valid_pix = np.logical_and(
pix_x >= 0,
np.logical_and(
pix_x < im_w,
np.logical_and(pix_y >= 0, np.logical_and(pix_y < im_h, pix_z > 0)),
),
)
depth_val = np.zeros(pix_x.shape)
depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]]
# Integrate TSDF
depth_diff = depth_val - pix_z
valid_pts = np.logical_and(depth_val > 0, depth_diff >= -self._trunc_margin)
dist = np.maximum(-1, np.minimum(1, depth_diff / self._trunc_margin))
valid_vox_x = self.vox_coords[valid_pts, 0]
valid_vox_y = self.vox_coords[valid_pts, 1]
valid_vox_z = self.vox_coords[valid_pts, 2]
w_old = self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
tsdf_vals = self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
valid_dist = dist[valid_pts]
tsdf_vol_new, w_new = self.integrate_tsdf(
tsdf_vals, valid_dist, w_old, obs_weight
)
self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = w_new
self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = tsdf_vol_new
# Integrate color
old_color = self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
old_b = np.floor(old_color / self._color_const)
old_g = np.floor((old_color - old_b * self._color_const) / 256)
old_r = old_color - old_b * self._color_const - old_g * 256
new_color = color_im[pix_y[valid_pts], pix_x[valid_pts]]
new_b = np.floor(new_color / self._color_const)
new_g = np.floor((new_color - new_b * self._color_const) / 256)
new_r = new_color - new_b * self._color_const - new_g * 256
new_b = np.minimum(
255.0, np.round((w_old * old_b + obs_weight * new_b) / w_new)
)
new_g = np.minimum(
255.0, np.round((w_old * old_g + obs_weight * new_g) / w_new)
)
new_r = np.minimum(
255.0, np.round((w_old * old_r + obs_weight * new_r) / w_new)
)
self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = (
new_b * self._color_const + new_g * 256 + new_r
)
def get_volume(self):
# Fold RGB color image into a single channel image
color_vol = np.zeros([3] + list(self._color_vol_cpu.shape)).astype(np.uint8)
color_vol[2, ...] = np.floor(self._color_vol_cpu / self._color_const)
color_vol[1, ...] = np.floor(
(self._color_vol_cpu - color_vol[2, ...] * self._color_const) / 256
)
color_vol[0, ...] = (
self._color_vol_cpu
- color_vol[2, ...] * self._color_const
- color_vol[1, ...] * 256
)
return self._tsdf_vol_cpu, color_vol
def get_point_cloud(self):
"""Extract a point cloud from the voxel volume."""
tsdf_vol, color_vol = self.get_volume()
# Marching cubes
verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]
verts_ind = np.round(verts).astype(int)
verts = verts * self._voxel_size + self._vol_origin
# Get vertex colors
rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
colors_b = np.floor(rgb_vals / self._color_const)
colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
colors = colors.astype(np.uint8)
pc = np.hstack([verts, colors])
return pc
def get_mesh(self):
"""Compute a mesh from the voxel volume using marching cubes."""
tsdf_vol, color_vol = self.get_volume()
# Marching cubes
verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)
verts_ind = np.round(verts).astype(int)
# voxel grid coordinates to world coordinates
verts = verts * self._voxel_size + self._vol_origin
# Get vertex colors
rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
colors_b = np.floor(rgb_vals / self._color_const)
colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
colors = colors.astype(np.uint8)
return verts, faces, norms, colors
def rigid_transform(xyz, transform):
"""Applies a rigid transform to an (N, 3) pointcloud."""
xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)])
xyz_t_h = np.dot(transform, xyz_h.T).T
return xyz_t_h[:, :3]
def get_view_frustum(depth_im, cam_intr, cam_pose):
"""Get corners of 3D camera view frustum of depth image"""
im_h = depth_im.shape[0]
im_w = depth_im.shape[1]
max_depth = np.max(depth_im)
view_frust_pts = np.array(
[
(np.array([0, 0, 0, im_w, im_w]) - cam_intr[0, 2])
* np.array([0, max_depth, max_depth, max_depth, max_depth])
/ cam_intr[0, 0],
(np.array([0, 0, im_h, 0, im_h]) - cam_intr[1, 2])
* np.array([0, max_depth, max_depth, max_depth, max_depth])
/ cam_intr[1, 1],
np.array([0, max_depth, max_depth, max_depth, max_depth]),
]
)
view_frust_pts = rigid_transform(view_frust_pts.T, cam_pose).T
return view_frust_pts
def meshwrite(filename, verts, faces, norms, colors):
"""Save a 3D mesh to a polygon .ply file."""
# Write header
ply_file = open(filename, "w")
ply_file.write("ply\n")
ply_file.write("format ascii 1.0\n")
ply_file.write("element vertex %d\n" % (verts.shape[0]))
ply_file.write("property float x\n")
ply_file.write("property float y\n")
ply_file.write("property float z\n")
ply_file.write("property float nx\n")
ply_file.write("property float ny\n")
ply_file.write("property float nz\n")
ply_file.write("property uchar red\n")
ply_file.write("property uchar green\n")
ply_file.write("property uchar blue\n")
ply_file.write("element face %d\n" % (faces.shape[0]))
ply_file.write("property list uchar int vertex_index\n")
ply_file.write("end_header\n")
# Write vertex list
for i in range(verts.shape[0]):
ply_file.write(
"%f %f %f %f %f %f %d %d %d\n"
% (
verts[i, 0],
verts[i, 1],
verts[i, 2],
norms[i, 0],
norms[i, 1],
norms[i, 2],
colors[i, 0],
colors[i, 1],
colors[i, 2],
)
)
# Write face list
for i in range(faces.shape[0]):
ply_file.write("3 %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2]))
ply_file.close()
def pcwrite(filename, xyzrgb):
"""Save a point cloud to a polygon .ply file."""
xyz = xyzrgb[:, :3]
rgb = xyzrgb[:, 3:].astype(np.uint8)
# Write header
ply_file = open(filename, "w")
ply_file.write("ply\n")
ply_file.write("format ascii 1.0\n")
ply_file.write("element vertex %d\n" % (xyz.shape[0]))
ply_file.write("property float x\n")
ply_file.write("property float y\n")
ply_file.write("property float z\n")
ply_file.write("property uchar red\n")
ply_file.write("property uchar green\n")
ply_file.write("property uchar blue\n")
ply_file.write("end_header\n")
# Write vertex list
for i in range(xyz.shape[0]):
ply_file.write(
"%f %f %f %d %d %d\n"
% (
xyz[i, 0],
xyz[i, 1],
xyz[i, 2],
rgb[i, 0],
rgb[i, 1],
rgb[i, 2],
)
)
| 14,231 | 39.31728 | 88 | py |
semantic-abstraction | semantic-abstraction-main/point_cloud.py | import pybullet_data
import numpy as np
from numba import njit, prange
import pybullet as p
import matplotlib.pyplot as plt
def transform_pointcloud(xyz_pts, rigid_transform):
"""Apply rigid transformation to 3D pointcloud.
Args:
xyz_pts: Nx3 float array of 3D points
rigid_transform: 3x4 or 4x4 float array defining a rigid transformation (rotation and translation)
Returns:
xyz_pts: Nx3 float array of transformed 3D points
"""
xyz_pts = np.dot(rigid_transform[:3, :3], xyz_pts.T) # apply rotation
# apply translation
xyz_pts = xyz_pts + np.tile(
rigid_transform[:3, 3].reshape(3, 1), (1, xyz_pts.shape[1])
)
return xyz_pts.T
def filter_pts_bounds(xyz, bounds):
mask = xyz[:, 0] >= bounds[0, 0]
mask = np.logical_and(mask, xyz[:, 0] <= bounds[1, 0])
mask = np.logical_and(mask, xyz[:, 1] >= bounds[0, 1])
mask = np.logical_and(mask, xyz[:, 1] <= bounds[1, 1])
mask = np.logical_and(mask, xyz[:, 2] >= bounds[0, 2])
mask = np.logical_and(mask, xyz[:, 2] <= bounds[1, 2])
return mask
def get_pointcloud(depth_img, color_img, cam_intr, cam_pose=None):
"""Get 3D pointcloud from depth image.
Args:
depth_img: HxW float array of depth values in meters aligned with color_img
color_img: HxWx3 uint8 array of color image
cam_intr: 3x3 float array of camera intrinsic parameters
cam_pose: (optional) 3x4 float array of camera pose matrix
Returns:
cam_pts: Nx3 float array of 3D points in camera/world coordinates
color_pts: Nx3 uint8 array of color points
"""
img_h = depth_img.shape[0]
img_w = depth_img.shape[1]
# Project depth into 3D pointcloud in camera coordinates
pixel_x, pixel_y = np.meshgrid(
np.linspace(0, img_w - 1, img_w), np.linspace(0, img_h - 1, img_h)
)
cam_pts_x = np.multiply(pixel_x - cam_intr[0, 2], depth_img / cam_intr[0, 0])
cam_pts_y = np.multiply(pixel_y - cam_intr[1, 2], depth_img / cam_intr[1, 1])
cam_pts_z = depth_img
cam_pts = (
np.array([cam_pts_x, cam_pts_y, cam_pts_z]).transpose(1, 2, 0).reshape(-1, 3)
)
if cam_pose is not None:
cam_pts = transform_pointcloud(cam_pts, cam_pose)
color_pts = None if color_img is None else color_img.reshape(-1, 3)
# TODO check memory leak here
return cam_pts, color_pts
def project_pts_to_2d(pts, camera_view_matrix, camera_intrisic):
"""Project points to 2D.
Args:
pts: Nx3 float array of 3D points in world coordinates.
camera_view_matrix: 4x4 float array. A wrd2cam transformation defining camera's totation and translation.
camera_intrisic: 3x3 float array. [ [f,0,0],[0,f,0],[0,0,1] ]. f is focal length.
Returns:
coord_2d: Nx3 float array of 2D pixel. (w, h, d) the last one is depth
"""
pts_c = transform_pointcloud(pts, camera_view_matrix[0:3, :])
rot_algix = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0]])
pts_c = transform_pointcloud(pts_c, rot_algix) # Nx3
coord_2d = np.dot(camera_intrisic, pts_c.T) # 3xN
coord_2d[0:2, :] = coord_2d[0:2, :] / np.tile(coord_2d[2, :], (2, 1))
coord_2d[2, :] = pts_c[:, 2]
coord_2d = np.array([coord_2d[1], coord_2d[0], coord_2d[2]])
return coord_2d.T
def check_pts_in_frustum(xyz_pts, depth, cam_pose, cam_intr):
# xyz_pts (N,3)
cam_pts = transform_pointcloud(
xyz_pts=xyz_pts, rigid_transform=np.linalg.inv(cam_pose)
)
cam_pts_x = cam_pts[..., 0]
cam_pts_y = cam_pts[..., 1]
pix_z = cam_pts[..., 2]
pix_x = (cam_intr[0, 0] / pix_z) * cam_pts_x + cam_intr[0, 2]
pix_y = (cam_intr[1, 1] / pix_z) * cam_pts_y + cam_intr[1, 2]
# camera to pixel space
h, w = depth.shape
valid_pix = np.logical_and(
pix_x >= 0,
np.logical_and(
pix_x < w, np.logical_and(pix_y >= 0, np.logical_and(pix_y < h, pix_z > 0))
),
)
in_frustum_mask = valid_pix.reshape(-1)
return in_frustum_mask
def meshwrite(filename, verts, colors, faces=None):
"""Save 3D mesh to a polygon .ply file.
Args:
filename: string; path to mesh file. (suffix should be .ply)
verts: [N, 3]. Coordinates of each vertex
colors: [N, 3]. RGB or each vertex. (type: uint8)
faces: (optional) [M, 4]
"""
# Write header
ply_file = open(filename, "w")
ply_file.write("ply\n")
ply_file.write("format ascii 1.0\n")
ply_file.write("element vertex %d\n" % (verts.shape[0]))
ply_file.write("property float x\n")
ply_file.write("property float y\n")
ply_file.write("property float z\n")
ply_file.write("property uchar red\n")
ply_file.write("property uchar green\n")
ply_file.write("property uchar blue\n")
if faces is not None:
ply_file.write("element face %d\n" % (faces.shape[0]))
ply_file.write("end_header\n")
# Write vertex list
for i in range(verts.shape[0]):
ply_file.write(
"%f %f %f %d %d %d\n"
% (
verts[i, 0],
verts[i, 1],
verts[i, 2],
colors[i, 0],
colors[i, 1],
colors[i, 2],
)
)
# Write face list
if faces is not None:
for i in range(faces.shape[0]):
ply_file.write(
"4 %d %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2], faces[i, 3])
)
ply_file.close()
@njit(parallel=True)
def cam2pix(cam_pts, intr):
"""Convert camera coordinates to pixel coordinates."""
intr = intr.astype(np.float32)
fx, fy = intr[0, 0], intr[1, 1]
cx, cy = intr[0, 2], intr[1, 2]
pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
for i in prange(cam_pts.shape[0]):
pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
return pix
def compute_empty_mask(
scene_bounds, depth_img, intrinsic_matrix, extrinsic_matrix, voxel_resolution=20
):
# parts taken from
# https://github.com/andyzeng/tsdf-fusion-python/blob/3f22a940d90f684145b1f29b1feaa92e09eb1db6/fusion.py#L170
# start off all empty
grid_shape = [voxel_resolution] * 3
mask = np.ones(grid_shape).astype(int)
# get volume points
lc = scene_bounds[0]
uc = scene_bounds[1]
# get voxel indices
grid_idxs = np.stack(
np.meshgrid(*[np.arange(0, dim) for dim in grid_shape]), axis=-1
)
# voxel indices to world pts
idx_scale = np.array(grid_shape) - 1
scales = (uc - lc) / idx_scale
offsets = lc
grid_points = grid_idxs.astype(float) * scales + offsets
flattened_grid_points = grid_points.reshape(-1, 3)
print(flattened_grid_points.min(axis=0), flattened_grid_points.max(axis=0))
# world pts to camera centric frame pts
xyz_h = np.hstack(
[
flattened_grid_points,
np.ones((len(flattened_grid_points), 1), dtype=np.float32),
]
)
xyz_t_h = np.dot(np.linalg.inv(extrinsic_matrix), xyz_h.T).T
cam_pts = xyz_t_h[:, :3]
pix_z = cam_pts[:, 2]
pix = cam2pix(cam_pts, intrinsic_matrix)
pix_x, pix_y = pix[:, 0], pix[:, 1]
im_w, im_h = depth_img.shape
valid_pix = np.logical_and(
pix_x >= 0,
np.logical_and(
pix_x < im_w,
np.logical_and(pix_y >= 0, np.logical_and(pix_y < im_h, pix_z > 0)),
),
)
inframe_indices = grid_idxs.reshape(-1, 3)[valid_pix, :]
# depth_val = np.zeros(pix_x.shape)
# depth_val[valid_pix] = depth_img[pix_y[valid_pix], pix_x[valid_pix]]
observed_indices = inframe_indices[
(depth_img[pix_y[valid_pix], pix_x[valid_pix]] > pix_z[valid_pix])
]
print("before:", mask.mean(), mask.shape, observed_indices.shape)
for idx in observed_indices:
mask[tuple(idx)] = 0
print(mask.mean())
print(observed_indices.shape, mask.shape)
# mask[observed_indices] = 0
print("after:", mask.mean())
ax = plt.figure().add_subplot(projection="3d")
ax.voxels(mask)
# pts = grid_points[mask, :]
# ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2])
plt.show()
return mask.astype(bool)
def subsample(seg_pts, num_pts, random_state, balanced=True):
probabilities = np.ones(seg_pts.shape).astype(np.float64)
if balanced:
unique_semantic_ids = np.unique(seg_pts)
num_semantic_ids = len(unique_semantic_ids)
for semantic_id in unique_semantic_ids:
mask = seg_pts == semantic_id
probabilities[mask] = 1.0 / (int((mask).sum().item()) * num_semantic_ids)
else:
probabilities /= probabilities.sum()
indices = random_state.choice(
seg_pts.shape[0], size=num_pts, replace=False, p=probabilities
)
return indices
if __name__ == "__main__":
# TODO change this to filter input sampled points out based on
# view point
from datagen.simulation.asset import make_object, occluder_objects, partnet_objs
from datagen.simulation import Camera
object_keys = [k for k in occluder_objects]
object_def = occluder_objects[object_keys[10]]
p.connect(p.GUI)
p.resetDebugVisualizerCamera(
cameraDistance=4.0,
cameraYaw=270,
cameraPitch=-20,
cameraTargetPosition=(0, 0, 0.4),
)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setRealTimeSimulation(False)
p.resetSimulation()
p.setGravity(0, 0, -9.8)
planeUid = p.loadURDF(fileName="plane.urdf", useFixedBase=True)
occluder_obj = make_object(**object_def)
camera = Camera(position=[-1, 1, 1], lookat=[0, 0, 0.5])
view = camera.get_image(return_pose=True, segmentation_mask=True)
mask = compute_empty_mask(
scene_bounds=np.array([[-1.0, -1.0, -0.1], [1.0, 1.0, 1.9]]),
depth_img=view[1],
intrinsic_matrix=view[-2],
extrinsic_matrix=view[-1],
)
| 9,990 | 33.451724 | 113 | py |
semantic-abstraction | semantic-abstraction-main/summarize.py | import pandas as pd
import rich
import pickle
from dataset import synonyms
import numpy as np
from rich.console import Console
from rich.table import Table
test_objs = set(
map(lambda l: l.rstrip().lstrip(), open("test_semantic_classes.txt", "r"))
)
def summarize_ovssc(metric="voxel32x32x32_iou"):
ssc_approaches = {
"Semantic Aware": pickle.load(
open("models/semaware/ovssc/ovssc_eval_stats.pkl", "rb")
),
"SemAbs + [Chefer et al]": pickle.load(
open("models/chefer_et_al/ovssc/ovssc_eval_stats.pkl", "rb")
),
"Ours": pickle.load(
open(
"models/ours/ovssc/ovssc_eval_stats.pkl",
"rb",
)
),
}
ovssc_stats = {
"approach": [],
"novel rooms": [],
"novel visual": [],
"novel vocab": [],
"novel class": [],
}
pd.options.display.float_format = "{:,.3f}".format
for approach, approach_stats in ssc_approaches.items():
# approach_stats = approach_stats[approach_stats.label!='']
approach_stats["room_id"] = approach_stats["scene_id"].apply(
lambda s: int(s.split("_")[0].split("FloorPlan")[1])
)
approach_stats[metric] = approach_stats[metric] * 100
cutoff_analysis = approach_stats.groupby("cutoff")[[metric]].mean()
best_cutoff = cutoff_analysis[metric].idxmax()
df = approach_stats[approach_stats.cutoff == best_cutoff]
novel_class_mask = df.label.isin(test_objs)
novel_vocab_mask = df.label.isin(synonyms.values())
ovssc_stats["approach"].append(approach)
novel_rooms_df = df[(df.split == "unseen_instances") & (~novel_class_mask)]
mean_per_room = np.array(novel_rooms_df.groupby("room_id")[metric].mean())
ovssc_stats["novel rooms"].append(mean_per_room.mean())
novel_rooms_dr_df = df[
(df.split == "unseen_instances_dr") & (~novel_class_mask)
]
mean_per_room = np.array(novel_rooms_dr_df.groupby("room_id")[metric].mean())
ovssc_stats["novel visual"].append(mean_per_room.mean())
unseen_class_df = df[novel_class_mask]
mean_per_label = unseen_class_df.groupby("label")[metric].mean()
ovssc_stats["novel class"].append(np.array(mean_per_label).mean())
unseen_vocab_df = df[
(df.split == "unseen_instances_synonyms") & novel_vocab_mask
]
mean_per_label = unseen_vocab_df.groupby("label")[metric].mean()
ovssc_stats["novel vocab"].append(np.array(mean_per_label).mean())
ovssc_stats = pd.DataFrame.from_dict(ovssc_stats)
table = Table(title="OVSSC THOR", box=rich.box.MINIMAL_DOUBLE_HEAD)
table.add_column("Approach", justify="left")
table.add_column("Novel Room", justify="right")
table.add_column("Novel Visual", justify="right")
table.add_column("Novel Vocab", justify="right")
table.add_column("Novel Class", justify="right")
for row in ovssc_stats.to_csv().split("\n")[1:-1]:
approach, novel_room, novel_visual, novel_vocab, novel_class = row.split(",")[
1:
]
table.add_row(
approach,
f"{float(novel_room):.01f}",
f"{float(novel_visual):.01f}",
f"{float(novel_vocab):.01f}",
f"{float(novel_class):.01f}",
end_section=approach == "SemAbs + [Chefer et al]",
style="green" if approach == "Ours" else "white",
)
console = Console()
console.print(table)
def summarize_vool(metric="voxel32x32x32_iou"):
vool_approaches = {
"Semantic Aware": pickle.load(
open("models/semaware/vool/vool_eval_stats.pkl", "rb")
),
"ClipSpatial": pickle.load(
open("models/clipspatial/vool/vool_eval_stats.pkl", "rb")
),
"SemAbs + [Chefer et al]": pickle.load(
open("models/chefer_et_al/vool/vool_eval_stats.pkl", "rb")
),
"Ours": pickle.load(open("models/ours/vool/vool_eval_stats.pkl", "rb")),
}
vool_stats = {
"approach": [],
"relation": [],
"novel rooms": [],
"novel visual": [],
"novel vocab": [],
"novel class": [],
}
relations = vool_approaches["Ours"].spatial_relation_name.unique()
for approach in vool_approaches.keys():
approach_stats = vool_approaches[approach]
approach_stats["room_id"] = approach_stats["scene_id"].apply(
lambda s: int(s.split("_")[0].split("FloorPlan")[1])
)
cutoff_analysis = approach_stats.groupby("cutoff")[[metric]].mean()
best_cutoff = cutoff_analysis[metric].idxmax()
approach_stats[metric] = approach_stats[metric] * 100
for relation in relations:
if relation == "[pad]":
continue
df = approach_stats[approach_stats.cutoff == best_cutoff]
df = df[df.spatial_relation_name == relation]
novel_vocab_mask = df.target_obj_name.isin(
synonyms.values()
) | df.reference_obj_name.isin(synonyms.values())
novel_class_mask = df.target_obj_name.isin(
test_objs
) | df.reference_obj_name.isin(test_objs)
vool_stats["approach"].append(approach)
vool_stats["relation"].append(relation)
novel_rooms_df = df[(df.split == "unseen_instances") & (~novel_class_mask)]
mean_per_room = np.array(novel_rooms_df.groupby("room_id")[metric].mean())
vool_stats["novel rooms"].append(np.nanmean(mean_per_room))
novel_rooms_dr_df = df[
(df.split == "unseen_instances_dr") & (~novel_class_mask)
]
mean_per_room = np.array(
novel_rooms_dr_df.groupby("room_id")[metric].mean()
)
vool_stats["novel visual"].append(np.nanmean(mean_per_room))
unseen_class_df = df[novel_class_mask]
vool_stats["novel class"].append(np.nanmean(unseen_class_df[metric]))
unseen_vocab_df = df[
(df.split == "unseen_instances_synonyms") & novel_vocab_mask
]
vool_stats["novel vocab"].append(np.nanmean(unseen_vocab_df[metric]))
vool_stats = pd.DataFrame.from_dict(vool_stats)
for approach_i, approach in enumerate(vool_approaches.keys()):
mean_df = pd.DataFrame.from_dict(
{
"approach": [approach],
"relation": ["mean"],
**{
split: [
np.array(
vool_stats[(vool_stats.approach == approach)][[split]]
).mean()
]
for split in [
"novel rooms",
"novel visual",
"novel vocab",
"novel class",
]
},
}
)
vool_stats = pd.concat(
[
vool_stats.iloc[0 : (approach_i + 1) * 6 + approach_i],
mean_df,
vool_stats.iloc[(approach_i + 1) * 6 + approach_i :],
]
)
table = Table(title="FULL VOOL THOR", box=rich.box.MINIMAL_DOUBLE_HEAD)
table.add_column("Approach", justify="left")
table.add_column("Spatial Relation", justify="left")
table.add_column("Novel Room", justify="right")
table.add_column("Novel Visual", justify="right")
table.add_column("Novel Vocab", justify="right")
table.add_column("Novel Class", justify="right")
last_approach = ""
for row in vool_stats.to_csv().split("\n")[1:-1]:
(
approach,
spatial_relation,
novel_room,
novel_visual,
novel_vocab,
novel_class,
) = row.split(",")[1:]
table.add_row(
approach if approach != last_approach else "",
spatial_relation,
f"{float(novel_room):.01f}",
f"{float(novel_visual):.01f}",
f"{float(novel_vocab):.01f}",
f"{float(novel_class):.01f}",
end_section=spatial_relation == "mean",
style=("green" if approach == "Ours" else "white"),
)
last_approach = approach
console = Console()
console.print(table)
def summarize_nyuv2(metric="voxel60x60x60_iou"):
ssc_approaches = {
"Ours (Supervised)": pickle.load(
open(
"models/ours/ovssc/ovssc_eval_stats_supervised_nyu_merged.pkl",
"rb",
)
),
"Ours (Zeroshot)": pickle.load(
open(
"models/ours/ovssc/ovssc_eval_stats_zs_nyu_merged.pkl",
"rb",
)
),
}
classes = [
"ceiling",
"floor",
"wall",
"window",
"chair",
"bed",
"sofa",
"table",
"tvs",
"furn",
"objs",
"mean",
]
table = Table(title="OVSSC NYU", box=rich.box.MINIMAL_DOUBLE_HEAD)
table.add_column("Approach", justify="left")
for c in classes:
table.add_column(c.title(), justify="right")
for approach, approach_stats in ssc_approaches.items():
approach_stats[metric] = approach_stats[metric] * 100
cutoff_analysis = approach_stats.groupby("cutoff")[[metric]].mean()
best_cutoff = cutoff_analysis[metric].idxmax()
df = approach_stats[approach_stats.cutoff == best_cutoff]
row = [approach]
for c in classes:
if c != "mean":
row.append(f"{df[df.label == c][metric].mean():.01f}")
else:
row.append(
f'{np.array(df.groupby("label")[metric].mean()).mean():.01f}'
)
table.add_row(
*row,
end_section=approach == "Ours (Supervised)",
style="green" if approach == "Ours (Zeroshot)" else "white",
)
console = Console()
console.print(table)
if __name__ == "__main__":
summarize_ovssc()
summarize_vool()
summarize_nyuv2()
| 10,203 | 36.105455 | 87 | py |
semantic-abstraction | semantic-abstraction-main/train_vool.py | from typing import Dict, Tuple, Union
import numpy as np
from dataset import ObjectLocalizationDataset
from net import (
SemAbsVOOL,
ClipSpatialVOOL,
SemanticAwareVOOL,
)
import utils
from torch.nn.functional import binary_cross_entropy_with_logits
import torch
import pandas as pd
def get_detailed_stats(
prediction,
gt_label,
xyz_pts,
scene_ids,
target_obj_names,
reference_obj_names,
spatial_relation_names,
scene_bounds,
ignore_pts,
detailed_analysis=False,
eval_device="cuda",
**kwargs,
):
num_scenes, num_descs = gt_label.shape[:2]
retvals = {
"scene_id": np.array([[scene_id] * num_descs for scene_id in scene_ids])
.reshape(-1)
.tolist(),
"target_obj_name": np.array(target_obj_names).T.reshape(-1).tolist(),
"reference_obj_name": np.array(reference_obj_names).T.reshape(-1).tolist(),
"spatial_relation_name": np.array(spatial_relation_names)
.T.reshape(-1)
.tolist(),
}
retvals.update(
{
f"point_{k}": v
for k, v in utils.prediction_analysis(
prediction=prediction.to(eval_device),
label=gt_label.to(eval_device),
ignore=ignore_pts.to(eval_device),
).items()
}
)
num_desc_b = 10
outputs = []
for i in np.arange(0, num_descs + num_desc_b + 1, num_desc_b):
if np.prod(prediction[:, i : i + num_desc_b].shape) == 0:
continue
outputs.append(
utils.voxelize_points(
prediction=prediction[:, i : i + num_desc_b],
label=gt_label[:, i : i + num_desc_b],
xyz_pts=xyz_pts[:, i : i + num_desc_b],
voxel_shape=(32, 32, 32),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts[:, i : i + num_desc_b],
device=eval_device,
)
)
voxelized_pts = {
k: torch.cat([output[k] for output in outputs], dim=1)
for k in outputs[0].keys()
}
retvals.update(
{
"voxel32x32x32_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
if detailed_analysis:
outputs = []
for i in np.arange(0, num_descs + num_desc_b + 1, num_desc_b):
if np.prod(prediction[:, i : i + num_desc_b].shape) == 0:
continue
outputs.append(
utils.voxelize_points(
prediction=prediction[:, i : i + num_desc_b],
label=gt_label[:, i : i + num_desc_b],
xyz_pts=xyz_pts[:, i : i + num_desc_b],
voxel_shape=(64, 64, 64),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts[:, i : i + num_desc_b],
device=eval_device,
)
)
voxelized_pts = {
k: torch.cat([output[k] for output in outputs], dim=1)
for k in outputs[0].keys()
}
retvals.update(
{
"voxel64x64x64_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
for i, spatial_relation in enumerate(
np.array(spatial_relation_names).T.reshape(-1)
):
if spatial_relation == "[pad]": # skip padding classes
for k in retvals.keys():
if "voxel" in k or "point" in k:
retvals[k][i] = np.NAN
return pd.DataFrame.from_dict(retvals)
def get_losses(
net, batch: dict, cutoffs=[-2.0], balance_positive_negative: bool = False, **kwargs
) -> Tuple[Dict[str, Union[float, torch.Tensor]], pd.DataFrame]:
stats = {}
batch_size, total_num_descs, num_pts = batch["output_label_pts"].shape
if num_pts <= 500000:
outputs = net(**batch)
else:
num_descs = 1
# probably CUDA OOM
outputs = torch.cat(
[
net(
**{
**batch,
"input_target_saliency_pts": batch["input_target_saliency_pts"][
:, desc_i * num_descs : (desc_i + 1) * num_descs, ...
],
"input_reference_saliency_pts": batch[
"input_reference_saliency_pts"
][:, desc_i * num_descs : (desc_i + 1) * num_descs, ...],
"input_description_saliency_pts": batch[
"input_description_saliency_pts"
][:, desc_i * num_descs : (desc_i + 1) * num_descs, ...],
"output_xyz_pts": batch["output_xyz_pts"][
:, desc_i * num_descs : (desc_i + 1) * num_descs, ...
],
"spatial_relation_name": (
np.array(batch["spatial_relation_name"])
.T[:, desc_i * num_descs : (desc_i + 1) * num_descs]
.T
),
}
)
for desc_i in range(total_num_descs // num_descs + 1)
if np.prod(
batch["output_xyz_pts"][
:, desc_i * num_descs : (desc_i + 1) * num_descs, ...
].shape
)
> 0
],
dim=1,
)
padding_mask = torch.from_numpy(
np.array(batch["spatial_relation_name"]).T == "[pad]"
).bool()
ignore_pts_mask = torch.zeros_like(outputs).bool()
# ignore all padding labels
ignore_pts_mask[padding_mask] = True
# ignore all points out of bounds
ignore_pts_mask = torch.logical_or(ignore_pts_mask, batch["out_of_bounds_pts"])
stats["loss"] = binary_cross_entropy_with_logits(
outputs,
batch["output_label_pts"],
weight=utils.get_bce_weight(
output_label_pts=batch["output_label_pts"],
balance_positive_negative=balance_positive_negative,
),
)
with torch.no_grad():
accuracy = ((outputs > 0.0).long() == batch["output_label_pts"]).float()[
~ignore_pts_mask
]
stats["accuracy"] = accuracy.mean()
detailed_stats = [
get_detailed_stats(
prediction=outputs > cutoff,
gt_label=batch["output_label_pts"].bool(),
xyz_pts=batch["output_xyz_pts"],
ignore_pts=ignore_pts_mask,
target_obj_names=batch["target_obj_name"],
reference_obj_names=batch["reference_obj_name"],
spatial_relation_names=batch["spatial_relation_name"],
scene_ids=batch["scene_id"],
eval_device=net.device,
**kwargs,
)
for cutoff in cutoffs
]
for detailed_stat, cutoff in zip(detailed_stats, cutoffs):
detailed_stat["cutoff"] = [cutoff] * len(detailed_stat)
detailed_stats = pd.concat(detailed_stats)
for k in detailed_stats.columns:
if "iou" in k:
stats[k] = detailed_stats[k].mean()
return stats, detailed_stats
approach = {
"semantic_abstraction": SemAbsVOOL,
"semantic_aware": SemanticAwareVOOL,
"clip_spatial": ClipSpatialVOOL,
}
if __name__ == "__main__":
parser = utils.config_parser()
parser.add_argument("--log", type=str, required=True)
parser.add_argument(
"--approach", choices=approach.keys(), default="semantic_abstraction"
)
args = parser.parse_args()
if args.approach == "semantic_aware":
args.network_inputs = ["rgb"]
utils.train(
get_losses_fn=get_losses,
**utils.setup_experiment(
args=args,
net_class=approach[args.approach],
dataset_class=ObjectLocalizationDataset,
split_file_path=args.file_path + "/vool_split.pkl",
),
**vars(args),
)
| 8,243 | 34.230769 | 88 | py |
semantic-abstraction | semantic-abstraction-main/utils.py | from __future__ import annotations
import os
import pickle
import signal
from typing import Optional, Tuple, Type
import numpy as np
import pandas as pd
import torch
from torch.backends import cudnn
from tqdm import tqdm
from transformers import get_scheduler
from argparse import ArgumentParser
import random
from CLIP.clip import saliency_configs
from net import VirtualGrid
from tensorboardX import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from torchtyping import TensorType, patch_typeguard
from arm.optim.lamb import Lamb
from typeguard import typechecked
import logging
from dataset import SceneUnderstandDataset
from rich.logging import RichHandler
logging.basicConfig(
level=logging.INFO, format="%(message)s", datefmt="[%X]", handlers=[RichHandler()]
)
patch_typeguard() # use before @typechecked
def config_parser():
parser = ArgumentParser()
parser.add_argument("--file_path", type=str, required=True)
parser.add_argument("--voxel_shape", type=int, default=[128, 128, 128])
parser.add_argument("--load", type=str)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--num_warmup_steps", type=int, default=1024)
parser.add_argument("--save_freq", type=int, default=1)
parser.add_argument("--eval_freq", type=int, default=5)
parser.add_argument("--gpus", type=str, nargs="+", default="0")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--epochs", type=int, default=200)
parser.add_argument("--num_descs", type=int, default=4)
parser.add_argument("--saliency_vmin", type=float, default=None)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=0.00001)
parser.add_argument("--grad_max_norm", type=float, default=2.0)
parser.add_argument("--xyz_pts_noise", type=float, default=0.0)
parser.add_argument("--num_input_pts", type=int, default=80000)
parser.add_argument("--num_output_pts", type=int, default=400000)
parser.add_argument("--pointing_dim", type=int, default=64)
parser.add_argument("--unet_f_maps", type=int, default=16)
parser.add_argument("--unet_num_channels", type=int, default=16)
parser.add_argument("--unet_num_groups", type=int, default=8)
parser.add_argument("--unet_num_levels", type=int, default=6)
parser.add_argument("--num_patches", type=int, default=4)
parser.add_argument("--patch_mask_cutoff", type=float, default=0.004)
parser.add_argument("--domain_randomization", action="store_true", default=True)
parser.add_argument("--use_pts_feat_extractor", action="store_true", default=True)
parser.add_argument("--pts_feat_extractor_hidden_dim", type=int, default=128)
parser.add_argument("--subtract_mean_relevancy", action="store_true", default=True)
parser.add_argument("--offset_patch_mask", action="store_true", default=False)
parser.add_argument(
"--balance_positive_negative", action="store_true", default=False
)
parser.add_argument(
"--balance_spatial_relations", action="store_true", default=True
)
parser.add_argument(
"--always_replace_subsample_pts", action="store_true", default=False
)
parser.add_argument("--balance_spatial_sampling", action="store_true", default=True)
parser.add_argument("--decoder_concat_xyz_pts", action="store_true", default=True)
parser.add_argument("--use_amp", action="store_true", default=False)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--dr_pos", type=float, default=0.1)
parser.add_argument("--dr_orn", type=float, default=0.3)
parser.add_argument("--dr_scale", type=float, default=0.1)
parser.add_argument(
"--scene_bounds", type=list, default=[[-1.0, -1.0, -0.1], [1.0, 1.0, 1.9]]
)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument(
"--pointing_method",
choices=["cosine_sim", "dot_product", "additive"],
default="cosine_sim",
)
parser.add_argument(
"--saliency_config", choices=saliency_configs.keys(), default="ours"
)
parser.add_argument(
"--network_inputs",
nargs="+",
choices=["patch_masks", "saliency", "rgb", "tsdf"],
default=["saliency"],
)
parser.add_argument(
"--lr_scheduler_type",
choices=[
"constant",
"linear",
"cosine",
"cosine_with_restarts",
"constant_with_warmup",
],
default="cosine_with_restarts",
)
parser.add_argument("--reduce_method", choices=["max", "mean"], default="max")
return parser
def is_main_process():
if dist.is_initialized():
return dist.get_rank() == 0
return True
def setup_experiment(
args,
split_file_path: str,
net_class: Type[torch.nn.Module],
dataset_class,
tsdf_shape: Optional[Tuple[int, int, int]] = None,
return_vis: bool = False,
**kwargs,
):
if len(args.gpus) > 1:
os.environ["NCCL_P2P_DISABLE"] = "1"
dist.init_process_group(backend="nccl", init_method="env://")
signal.signal(signal.SIGINT, lambda sig, frame: dist.destroy_process_group())
if args.device == "cuda":
torch.cuda.set_device(int(args.gpus[dist.get_rank() % len(args.gpus)]))
elif args.device == "cuda":
torch.cuda.set_device(int(args.gpus[0]))
if not is_main_process():
logging.getLogger().setLevel(logging.ERROR)
else:
logging.getLogger().setLevel(logging.INFO)
if tsdf_shape is None:
tsdf_shape = args.voxel_shape
splits = pickle.load(open(split_file_path, "rb"))
logging.info("DATASET AT" + args.file_path)
logging.info(
" | ".join(
[
f"{split_name}: {len(scene_paths)}"
for split_name, scene_paths in splits.items()
]
)
)
loggers = {
k: SummaryWriter(args.log + f"/{k}") if is_main_process() else None
for k in splits.keys()
}
if is_main_process():
if os.path.exists(args.log + "/args.pkl"):
# check if it's very different
prev_args = pickle.load(open(args.log + "/args.pkl", "rb"))
logging.warning(
args.log + "/args.pkl" + " already exists. Differences are;"
)
for arg in set(map(str, vars(prev_args).items())) ^ set(
map(str, vars(args).items())
):
logging.warning(arg)
else:
pickle.dump(args, open(args.log + "/args.pkl", "wb"))
args.scene_bounds = torch.tensor(args.scene_bounds)
datasets = {
k: dataset_class(
scene_paths=splits[k],
tsdf_shape=tsdf_shape,
domain_randomized_rgb=(k == "unseen_instances_dr"),
use_synonyms=(k == "unseen_instances_synonyms"),
**{
**vars(args),
**kwargs,
**{
"domain_randomization": False
if k != "train"
else args.domain_randomization,
"return_vis": k != "train" or return_vis,
},
},
)
for k in splits.keys()
if len(splits[k]) > 0
}
training_detailed_stats = None
if os.path.exists(args.log + "/detailed_stats.pkl"):
training_detailed_stats = pickle.load(
open(args.log + "/detailed_stats.pkl", "rb")
)
net, optimizer, lr_scheduler, start_epoch, scaler = get_net(
train_dataset=datasets.get("train", None), net_class=net_class, **vars(args)
)
return {
"splits": splits,
"loggers": loggers,
"datasets": datasets,
"net": net,
"scaler": scaler,
"optimizer": optimizer,
"lr_scheduler": lr_scheduler,
"start_epoch": start_epoch,
"training_detailed_stats": training_detailed_stats,
}
def seed_all(seed=0):
logging.debug(f"SEEDING WITH {seed}")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
cudnn.benchmark = False
def get_n_params(model):
pp = 0
for p in list(model.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
def get_net(
load: str,
lr: float,
weight_decay: float,
lr_scheduler_type: str,
num_warmup_steps: int,
epochs: int,
seed: int,
net_class: Type[torch.nn.Module],
use_amp: bool,
train_dataset: Optional[SceneUnderstandDataset] = None,
**kwargs,
):
seed_all(seed)
device = kwargs["device"]
batch_size = kwargs["batch_size"]
kwargs["voxel_shape"] = tuple(kwargs["voxel_shape"])
net = net_class(**kwargs).to(device)
if dist.is_initialized():
net = DistributedDataParallel(
module=net, device_ids=[device], find_unused_parameters=True
)
logging.info(f"NUM PARAMS: {get_n_params(net)}")
optimizer = Lamb(
net.parameters(),
lr=lr,
betas=(0.9, 0.999),
weight_decay=weight_decay,
adam=False,
)
lr_scheduler = get_scheduler(
lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=epochs * (len(train_dataset) // batch_size)
if train_dataset is not None
else 1,
)
start_epoch = 0
if load is not None:
logging.info(f"loading from {load}")
ckpt = torch.load(load, map_location=device)
if dist.is_initialized():
net.load_state_dict(ckpt["net"])
else:
net.load_state_dict(
{
"module.".join(k.split("module.")[1:]): v
for k, v in ckpt["net"].items()
}
)
# net.module.steps[...] = 0
optimizer.load_state_dict(ckpt["optimizer"])
start_epoch = ckpt["epochs"]
scaler = None
if use_amp:
scaler = torch.cuda.amp.grad_scaler.GradScaler()
return net, optimizer, lr_scheduler, start_epoch, scaler
def write_to_hdf5(group, key, value, dtype=None, replace=False):
if value is None:
return
if key in group:
if replace:
del group[key]
else:
raise Exception(f"{key} already present")
if type(value) == str or type(value) == int or type(value) == float:
group.attrs[key] = value
elif type(value) == dict:
if key in group:
subgroup = group[key]
else:
subgroup = group.create_group(key)
for subgroup_key, subgroup_value in value.items():
write_to_hdf5(subgroup, subgroup_key, subgroup_value)
else:
group.create_dataset(
name=key, data=value, dtype=dtype, compression="gzip", compression_opts=9
)
def compute_grad_norm(net):
total_norm = 0.0
for p in net.parameters():
if p.grad is not None:
param_norm = p.grad.detach().data.norm(2)
total_norm += param_norm.item() ** 2
return total_norm**0.5
@typechecked
def iou(
prediction: TensorType[..., "num_points"], label: TensorType[..., "num_points"]
):
intersection = torch.logical_and(prediction, label).sum(dim=-1).float()
union = torch.logical_or(prediction, label).sum(dim=-1).float()
return intersection / union
@typechecked
def prediction_analysis(
prediction: TensorType["batch", "num_patches", "num_points"],
label: TensorType["batch", "num_patches", "num_points"],
ignore: TensorType["batch", "num_patches", "num_points"],
):
stats = {
"precision": [],
"recall": [],
"false_negative": [],
"false_positive": [],
"iou": [],
}
for b_i in range(ignore.shape[0]):
for p_i in range(ignore.shape[1]):
mask = ~ignore.bool()[b_i, p_i]
curr_label = label.bool()[b_i, p_i][mask]
positive_labels = curr_label.bool().float().sum(dim=-1)
curr_pred = prediction.bool()[b_i, p_i][mask]
positive_preds = curr_pred.bool().float().sum(dim=-1)
true_positives = (
torch.logical_and(curr_label.bool(), curr_pred.bool())
.float()
.sum(dim=-1)
)
stats["iou"].append(iou(prediction=curr_pred, label=curr_label).item())
stats["precision"].append(
true_positives.item() / positive_preds.item()
if positive_preds.item() != 0
else np.NAN
)
stats["recall"].append(
true_positives.item() / positive_labels.item()
if positive_labels.item() != 0
else np.NAN
)
stats["false_negative"].append(
torch.logical_and(curr_label, ~curr_pred).float().mean(dim=-1).item()
)
stats["false_positive"].append(
torch.logical_and(~curr_label, curr_pred).float().mean(dim=-1).item()
)
return stats
def loop(
net,
loader,
pbar,
get_losses_fn,
logger: Optional[SummaryWriter] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
scaler=None,
grad_max_norm: float = 1e5,
device: torch.device = torch.device("cuda"),
**kwargs,
):
epoch_stats = {}
detailed_stat_df = pd.DataFrame()
for batch in loader:
batch = {
k: (v.to(device) if type(v) == torch.Tensor else v)
for k, v in batch.items()
}
if optimizer:
stats, detailed_stat = get_losses_fn(net=net, batch=batch, **kwargs)
optimizer.zero_grad()
if scaler:
scaler.scale(stats["loss"]).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(net.parameters(), grad_max_norm)
scaler.step(optimizer)
scaler.update()
else:
stats["loss"].backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), grad_max_norm)
optimizer.step()
lr_scheduler.step()
if dist.is_initialized():
net.module.steps += 1
else:
net.steps += 1
stats["gradnorm"] = compute_grad_norm(net)
else:
with torch.no_grad():
stats, detailed_stat = get_losses_fn(net=net, batch=batch, **kwargs)
# sync stats and detailed_stat_df between different processes
if dist.is_initialized():
stats_vector = torch.tensor([stats[k] for k in sorted(stats.keys())]).cuda()
dist.all_reduce(stats_vector)
for k, v in zip(sorted(stats.keys()), stats_vector / dist.get_world_size()):
stats[k] = v.item()
detailed_stats = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(object_list=detailed_stats, obj=detailed_stat)
detailed_stat_df = pd.concat([detailed_stat_df] + detailed_stats)
else:
detailed_stat_df = pd.concat([detailed_stat_df, detailed_stat])
for k, v in stats.items():
v = v.item() if type(v) != float else v
if k not in epoch_stats:
epoch_stats[k] = []
epoch_stats[k].append(v)
if logger is not None and optimizer is not None:
logger.add_scalar(
k, v, net.module.steps if dist.is_initialized() else net.steps
)
if pbar is not None:
pbar.set_description(
"|".join(
f" {k}: {v*100:.02f} "
if any(
_k in k
for _k in {
"iou",
"precision",
"recall",
}
)
else f" {k}: {v:.04e} "
for k, v in stats.items()
)
)
pbar.update()
epoch_stats = {k: np.nanmean(v) for k, v in epoch_stats.items()}
if logger is not None and is_main_process():
for k, v in epoch_stats.items():
logger.add_scalar(
f"{k}_mean", v, net.module.steps if dist.is_initialized() else net.steps
)
return detailed_stat_df
def train(
log: str,
net: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
training_detailed_stats: pd.DataFrame,
start_epoch: int,
epochs: int,
datasets: dict,
loggers: dict,
splits: dict,
save_freq: int,
eval_freq: int,
num_workers: int,
batch_size: int,
get_losses_fn,
use_amp: bool,
**kwargs,
):
for curr_epoch in range(start_epoch, epochs):
if is_main_process():
logging.info(f'{"="*10} EPOCH {curr_epoch} {"="*10}')
for split, dataset in datasets.items():
if split != "train" and curr_epoch % eval_freq != 0:
continue
if split == "train":
net.train()
else:
net.eval()
if split != "train" and split != "unseen_instances":
continue
sampler = None
if dist.is_initialized():
sampler = DistributedSampler(
dataset=dataset,
shuffle=split == "train",
drop_last=split == "train",
)
sampler.set_epoch(curr_epoch)
loader = DataLoader(
dataset=dataset,
sampler=sampler,
num_workers=num_workers,
shuffle=sampler is None and split == "train",
batch_size=batch_size if split == "train" else 1,
persistent_workers=num_workers > 0,
)
try:
with torch.cuda.amp.autocast(enabled=use_amp):
detailed_stats = loop(
net=net,
loader=loader,
get_losses_fn=get_losses_fn,
**{
**kwargs,
"logger": loggers[split],
"optimizer": optimizer if split == "train" else None,
"lr_scheduler": lr_scheduler,
"pbar": tqdm(
total=len(loader),
dynamic_ncols=True,
unit="batch",
smoothing=0.01,
postfix=f"| {split.upper()} ",
)
if is_main_process()
else None,
"detailed_analysis": False,
"cutoffs": [-1.0]
if split == "train"
else np.arange(-2.7, 0, 0.3),
},
)
if is_main_process():
ckpt_path = f"{log}/latest.pth"
torch.save(
{
"net": net.state_dict(),
"optimizer": optimizer.state_dict(),
"epochs": curr_epoch + 1,
},
ckpt_path,
)
detailed_stats["epoch"] = [curr_epoch] * len(detailed_stats)
detailed_stats["split"] = [split] * len(detailed_stats)
training_detailed_stats = pd.concat(
[training_detailed_stats, detailed_stats]
)
training_detailed_stats.to_pickle(log + "/detailed_stats.pkl")
except Exception as e:
print(e)
continue
epoch_stats = training_detailed_stats[
training_detailed_stats.epoch == curr_epoch
]
if not is_main_process():
continue
for split in splits.keys():
split_stats = epoch_stats[epoch_stats.split == split]
if len(split_stats) == 0:
continue
logging.info(split.upper())
for key in filter(
lambda k: any(
metric in k
for metric in {
"iou",
"precision",
"recall",
"false_negative",
"false_positive",
}
),
epoch_stats.columns,
):
if len(split_stats) == 0:
continue
best_cutoff = split_stats.groupby("cutoff").mean()[key].idxmax()
score = split_stats[split_stats.cutoff == best_cutoff][key].mean() * 100
if pd.isna(score):
continue
logging.info(
" " * 4
+ f"[{key.upper():<30}]:"
+ f"{score:>6.02f}"
+ str(best_cutoff).rjust(10)
)
logging.info("\n")
if curr_epoch % save_freq != 0 and curr_epoch != epochs - 1:
continue
ckpt_path = f"{log}/ckpt_{curr_epoch}.pth"
torch.save(
{
"net": net.state_dict(),
"optimizer": optimizer.state_dict(),
"epochs": curr_epoch + 1,
},
ckpt_path,
)
logging.info(f"Saved checkpoint to {ckpt_path}.\n")
def voxelize_points(
prediction: TensorType["batch", "num_patches", "num_points"],
label: TensorType["batch", "num_patches", "num_points"],
xyz_pts: TensorType["batch", "num_patches", "num_points", 3],
voxel_shape: Tuple[int, int, int],
scene_bounds: TensorType[2, 3],
ignore_pts: TensorType["batch", "num_patches", "num_points"],
device="cuda",
):
batch_size, num_patches, num_points = prediction.shape
prediction = prediction.to(device).float()
label = (label.to(device).float() - 0.5) * 2
xyz_pts = xyz_pts.to(device)
xyz_pts = xyz_pts[:, None, ...].view(batch_size * num_patches, num_points, 3)
# voxelize
vg = VirtualGrid(
scene_bounds=scene_bounds,
grid_shape=voxel_shape,
batch_size=batch_size * num_patches,
device=torch.device(device),
reduce_method="max",
)
voxelized_prediction = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=prediction.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
voxelized_label = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=label.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
missing_label = voxelized_label == 0.0
voxelized_label = (voxelized_label > 0).float()
ignore_vol_mask = (
vg.scatter_points(
xyz_pts=xyz_pts,
feature_pts=ignore_pts.to(device)
.float()
.view(batch_size * num_patches, -1, 1),
)
.view(batch_size, num_patches, *voxel_shape)
.bool()
)
ignore_vol_mask = torch.logical_or(ignore_vol_mask, missing_label)
return {
"prediction": (voxelized_prediction > 0).view(
batch_size, num_patches, np.prod(voxel_shape)
),
"label": voxelized_label.view(batch_size, num_patches, np.prod(voxel_shape)),
"ignore": ignore_vol_mask.view(batch_size, num_patches, np.prod(voxel_shape)),
}
@typechecked
def voxel_score(
prediction: TensorType["batch", "num_patches", "num_points"],
label: TensorType["batch", "num_patches", "num_points"],
xyz_pts: TensorType["batch", "num_patches", "num_points", 3],
voxel_shape: Tuple[int, int, int],
scene_bounds: TensorType[2, 3],
ignore_pts: TensorType["batch", "num_patches", "num_points"],
out_of_frustum_pts_mask: TensorType["batch", "num_patches", "num_points"],
score_fn=iou,
device="cuda",
):
batch_size, num_patches, num_points = prediction.shape
prediction = prediction.to(device).float()
label = (label.to(device).float() - 0.5) * 2
xyz_pts = xyz_pts.to(device)
xyz_pts = xyz_pts[:, None, ...].view(batch_size * num_patches, num_points, 3)
# voxelize
vg = VirtualGrid(
scene_bounds=scene_bounds,
grid_shape=voxel_shape,
batch_size=batch_size * num_patches,
device=torch.device(device),
reduce_method="max",
)
voxelized_prediction = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=prediction.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
voxelized_label = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=label.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
missing_label = voxelized_label == 0.0
voxelized_label = (voxelized_label > 0).float()
ignore_vol_mask = (
vg.scatter_points(
xyz_pts=xyz_pts,
feature_pts=torch.logical_or(
ignore_pts.bool(), out_of_frustum_pts_mask.bool()
)
.to(device)
.float()
.view(batch_size * num_patches, -1, 1),
)
.view(batch_size, num_patches, *voxel_shape)
.bool()
)
ignore_vol_mask = torch.logical_or(ignore_vol_mask, missing_label)
result = torch.zeros((batch_size, num_patches)).float()
for b in range(batch_size):
for p in range(num_patches):
result[b, p] = score_fn(
(voxelized_prediction[b, p] > 0)[~ignore_vol_mask[b, p]].bool(),
(voxelized_label[b, p] > 0)[~ignore_vol_mask[b, p]].bool(),
)
return result
@typechecked
def get_bce_weight(
output_label_pts: TensorType["batch", "num_patches", "num_points"],
balance_positive_negative: bool,
):
weight = torch.ones_like(output_label_pts).float()
if balance_positive_negative:
weight_total = weight.sum()
# per instance
positive_mask = output_label_pts.bool()
# positive_mask.shape = BATCH x NUM PATCH x NUM PTS
batch_size, num_patches, num_pts = positive_mask.shape
percent_positive = positive_mask.float().mean(dim=2).view(-1)
percent_negative = 1 - percent_positive
weight = weight.view(-1, num_pts)
positive_mask = positive_mask.view(-1, num_pts)
# TODO vectorize this
assert len(weight) == batch_size * num_patches
for i in range(len(weight)):
weight[i, positive_mask[i]] = 1.0 / (percent_positive[i] + 1e-10)
weight[i, ~positive_mask[i]] = 1.0 / (percent_negative[i] + 1e-10)
weight = weight.view(output_label_pts.shape)
weight *= weight_total / weight.sum()
return weight
| 27,394 | 35.526667 | 88 | py |
semantic-abstraction | semantic-abstraction-main/dataset.py | import numpy as np
import torch
from torch.utils.data import Dataset
from fusion import TSDFVolume
from point_cloud import (
check_pts_in_frustum,
filter_pts_bounds,
get_pointcloud,
)
from typing import List, Optional, Tuple
import h5py
from transforms3d import affines, euler
from torchtyping import TensorType, patch_typeguard
from typeguard import typechecked
patch_typeguard() # use before @typechecked
def deref_h5py(dataset, refs):
return np.array([dataset[ref][0] for ref in refs]).astype(np.float32)
synonyms = {
"television": "tv",
"sofa": "couch",
"house plant": "plant in a pot",
"bookcase": "bookshelf",
"baseball bat": "rawlings big stick maple bat",
"pillow": "cushion",
"arm chair": "recliner",
"bread": "loaf of sourdough",
"cell phone": "mobile phone",
"desktop": "computer",
"dresser": "wardrobe",
"dumbbell": "gym weights",
"fridge": "refridgerator",
"garbage can": "trash can",
"laptop": "computer",
"outlet": "eletric plug",
"stairs": "staircase",
}
class SceneUnderstandDataset(Dataset):
def __init__(
self,
file_path: str,
scene_bounds: TensorType[2, 3],
network_inputs: List[str],
domain_randomization: bool,
num_input_pts: int,
num_output_pts: int,
return_vis: bool,
scene_paths: List[str],
tsdf_shape: Tuple[int, int, int],
domain_randomized_rgb: bool,
offset_patch_mask: bool,
balance_spatial_relations: bool,
saliency_config: str,
use_synonyms: bool,
subtract_mean_relevancy: bool,
balance_spatial_sampling: bool,
saliency_vmin: float,
dr_pos: float,
dr_orn: float,
dr_scale: float,
xyz_pts_noise: float,
always_replace_subsample_pts: bool,
patch_mask_cutoff: float = 0.004,
load_gt: bool = True,
**kwargs,
):
# setup
self.file_path = file_path
self.keys = list(sorted(scene_paths))
self.num_input_pts = num_input_pts
self.num_output_pts = num_output_pts
self.network_inputs = network_inputs
# 3D scene
self.scene_bounds = np.array(scene_bounds)
self.tsdf_shape = tsdf_shape
# retval customization
self.domain_randomized_rgb = domain_randomized_rgb
self.return_vis = return_vis
self.domain_randomization = domain_randomization
self.subtract_mean_relevancy = subtract_mean_relevancy
self.use_synonyms = use_synonyms
self.offset_patch_mask = offset_patch_mask
self.patch_mask_cutoff = patch_mask_cutoff
self.saliency_config = saliency_config
self.saliency_vmin = saliency_vmin
self.xyz_pts_noise = xyz_pts_noise
self.balance_spatial_relations = balance_spatial_relations
self.balance_spatial_sampling = balance_spatial_sampling
self.dr_pos = dr_pos
self.dr_orn = dr_orn
self.dr_scale = dr_scale
self.load_gt = load_gt
self.always_replace_subsample_pts = always_replace_subsample_pts
def __len__(self):
return len(self.keys)
@staticmethod
@typechecked
def transform_filter_subsample(
xyz_pts,
scene_bounds,
num_subsample_pts,
subsample_probabilities,
alway_replace_pts: bool,
transform_matrix=None,
**kwargs,
):
num_pts = len(xyz_pts)
retval = {"xyz_pts": xyz_pts}
retval.update(kwargs)
if transform_matrix is not None:
# turn into homogeneous coords
xyz_pts = torch.cat((xyz_pts, torch.ones(num_pts)[:, None]), dim=-1)
xyz_pts = torch.matmul(transform_matrix, xyz_pts.permute(1, 0)).permute(
1, 0
)[..., :3]
# filter out of bounds points
in_bounds_mask = filter_pts_bounds(xyz_pts, scene_bounds).bool()
retval["xyz_pts"] = xyz_pts[in_bounds_mask, :]
subsample_probabilities = subsample_probabilities[in_bounds_mask]
subsample_probabilities /= subsample_probabilities.sum()
for k, v in kwargs.items():
if v is None:
retval[k] = None
elif v.shape[0] == len(in_bounds_mask):
retval[k] = v[in_bounds_mask, ...]
elif v.shape[1] == len(in_bounds_mask):
retval[k] = v[:, in_bounds_mask, ...]
else:
raise Exception(k, v.shape, in_bounds_mask.shape)
if num_subsample_pts == -1:
return retval
try:
# bias based on description
indices = np.random.choice(
a=len(retval["xyz_pts"]),
size=num_subsample_pts,
p=subsample_probabilities,
replace=alway_replace_pts,
)
except Exception as e:
indices = np.random.choice(
a=len(retval["xyz_pts"]),
size=num_subsample_pts,
p=subsample_probabilities,
replace=True,
)
return {
k: (
v[indices, ...]
if len(v) == len(retval["xyz_pts"])
else v[:, indices, ...]
)
if v is not None
else None
for k, v in retval.items()
}
class ObjectLocalizationDataset(SceneUnderstandDataset):
def __init__(self, num_descs: int, **kwargs):
super().__init__(**kwargs)
self.num_descs = num_descs
@staticmethod
def get_descriptions(
scene_group,
num_subsample_descs: int,
saliency_config: str,
rgb_key: str,
use_synonyms: bool,
balance_spatial_relations: bool = False,
only_return_num_descs: bool = False,
):
saliency_prefix = f"saliencies/{rgb_key}|{saliency_config}"
descriptions = dict()
desc_group = scene_group["descriptions"]
num_descs = len(desc_group["spatial_relation_name"])
descriptions["target_obj_name"] = np.array(
desc_group["target_obj_name"]
).astype(str)
descriptions["target_obj_id"] = np.array(desc_group["target_obj_id"])
descriptions["reference_obj_name"] = np.array(
desc_group["reference_obj_name"]
).astype(str)
descriptions["spatial_relation_name"] = np.array(
desc_group["spatial_relation_name"]
).astype(str)
description_sentences = ""
for desc_part in [
descriptions["target_obj_name"],
" ",
descriptions["spatial_relation_name"],
" a ",
descriptions["reference_obj_name"],
]:
description_sentences = np.char.add(description_sentences, desc_part)
if use_synonyms:
has_synonym = list(
map(
lambda sentence: any(x in sentence for x in synonyms.keys()),
description_sentences,
)
)
descriptions["target_obj_name"] = descriptions["target_obj_name"][
has_synonym
]
descriptions["target_obj_id"] = descriptions["target_obj_id"][has_synonym]
descriptions["reference_obj_name"] = descriptions["reference_obj_name"][
has_synonym
]
descriptions["spatial_relation_name"] = descriptions[
"spatial_relation_name"
][has_synonym]
description_sentences = np.array(description_sentences)[has_synonym]
num_descs = sum(has_synonym)
if only_return_num_descs:
return num_descs
desc_indices = np.arange(0, num_descs)
if num_subsample_descs != -1 and num_subsample_descs < num_descs:
p = np.ones(num_descs).astype(np.float64)
if balance_spatial_relations:
spatial_relations = np.array(
desc_group["spatial_relation_name"]
).tolist()
unique_relations = list(set(spatial_relations))
spatial_relations_ids = np.array(
list(map(lambda r: unique_relations.index(r), spatial_relations))
)
for spatial_relations_id in range(len(unique_relations)):
mask = spatial_relations_ids == spatial_relations_id
p[mask] = 1 / mask.sum()
p /= p.sum()
desc_indices = np.random.choice(
num_descs, num_subsample_descs, replace=False, p=p
)
desc_indices.sort() # hdf5 indexing must be in order
descriptions["target_obj_name"] = descriptions["target_obj_name"][desc_indices]
descriptions["target_obj_id"] = descriptions["target_obj_id"][desc_indices]
descriptions["reference_obj_name"] = descriptions["reference_obj_name"][
desc_indices
]
descriptions["spatial_relation_name"] = descriptions["spatial_relation_name"][
desc_indices
]
description_sentences = description_sentences[desc_indices]
if use_synonyms:
descriptions["target_obj_name"] = np.array(
list(
map(
lambda x: x if x not in synonyms.keys() else synonyms[x],
descriptions["target_obj_name"],
)
)
)
descriptions["reference_obj_name"] = np.array(
list(
map(
lambda x: x if x not in synonyms.keys() else synonyms[x],
descriptions["reference_obj_name"],
)
)
)
saliency_text_labels = (
np.array(scene_group[f"{saliency_prefix}|saliency_text_labels"])
.astype(str)
.tolist()
)
descriptions["target_obj_saliency_refs"] = [
scene_group[f"{saliency_prefix}"][idx]
for idx in map(
lambda obj_name: saliency_text_labels.index(obj_name),
descriptions["target_obj_name"],
)
]
descriptions["reference_obj_saliency_refs"] = [
scene_group[f"{saliency_prefix}"][idx]
for idx in map(
lambda obj_name: saliency_text_labels.index(obj_name),
descriptions["reference_obj_name"],
)
]
descriptions["description_saliency_refs"] = [
scene_group[f"{saliency_prefix}"][idx]
for idx in map(
lambda desc: saliency_text_labels.index(desc), description_sentences
)
]
num_missing_descs = num_subsample_descs - len(
descriptions["spatial_relation_name"]
)
if num_missing_descs > 0 and num_subsample_descs != -1:
descriptions["target_obj_id"] = np.array(
descriptions["target_obj_id"].tolist() + [-2] * num_missing_descs
)
descriptions["spatial_relation_name"] = np.array(
descriptions["spatial_relation_name"].tolist()
+ ["[pad]"] * num_missing_descs
)
descriptions["target_obj_name"] = np.array(
descriptions["target_obj_name"].tolist() + ["[pad]"] * num_missing_descs
)
descriptions["reference_obj_name"] = np.array(
descriptions["reference_obj_name"].tolist()
+ ["[pad]"] * num_missing_descs
)
descriptions["num_descs"] = len(descriptions["spatial_relation_name"])
return descriptions
def __getitem__(self, idx):
retvals = dict()
scene_path = self.file_path + "/" + self.keys[idx]
with h5py.File(scene_path, "r") as f:
group = f["data"]
depth = deref_h5py(dataset=f["depth"], refs=group["depth"])[0]
cam_intr = np.array(group["cam_intr"])
cam_pose = np.array(group["cam_pose"])
if self.domain_randomized_rgb:
retvals["rgb"] = np.array(group["domain_randomized_rgb"]).astype(
np.float32
)[0]
else:
retvals["rgb"] = deref_h5py(dataset=f["rgb"], refs=group["rgb"])[0]
image_shape = retvals["rgb"].shape[:2]
retvals["rgb"] = torch.from_numpy(retvals["rgb"]) / 255.0
retvals["input_xyz_pts"] = torch.from_numpy(
get_pointcloud(depth, None, cam_intr, cam_pose)[0].astype(np.float32)
)
retvals["full_objid_pts"] = None
if "full_objid_pts" in group:
retvals["output_xyz_pts"] = torch.from_numpy(
deref_h5py(dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"])[0]
)
retvals["full_objid_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_objid_pts"], refs=group["full_objid_pts"]
)[0]
)
retvals["out_of_bounds_pts"] = torch.zeros(
len(retvals["full_objid_pts"])
).float()
descriptions = self.get_descriptions(
scene_group=group,
num_subsample_descs=self.num_descs if not self.return_vis else -1,
saliency_config=self.saliency_config,
rgb_key="domain_randomized_rgb"
if self.domain_randomized_rgb
else "rgb",
use_synonyms=self.use_synonyms,
balance_spatial_relations=self.balance_spatial_relations,
)
retvals["spatial_relation_name"] = descriptions[
"spatial_relation_name"
].tolist()
# gradcam values typically between -0.02 and 0.02
# so multiply by 50
retvals["input_target_saliency_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["saliencies"],
refs=descriptions["target_obj_saliency_refs"],
)
)
retvals["input_reference_saliency_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["saliencies"],
refs=descriptions["reference_obj_saliency_refs"],
)
)
retvals["input_description_saliency_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["saliencies"],
refs=descriptions["description_saliency_refs"],
)
)
saliency_prefix = f'data/saliencies/{"domain_randomized_rgb" if self.domain_randomized_rgb else "rgb"}|{self.saliency_config}'
mean_idx = (
np.array(f[f"{saliency_prefix}|saliency_text_labels"])
.astype(str)
.tolist()
.index("mean")
)
mean_relevancy_map = (
torch.from_numpy(f["saliencies"][mean_idx]).float().squeeze()
)
for k in {
"input_target_saliency_pts",
"input_reference_saliency_pts",
"input_description_saliency_pts",
}:
if self.subtract_mean_relevancy:
retvals[k] -= mean_relevancy_map
if self.saliency_vmin is not None:
retvals[k] -= self.saliency_vmin
retvals[k][retvals[k] < 0] = 0
retvals[k] = (
torch.nn.functional.interpolate(
retvals[k][:, None, :, :],
size=tuple(image_shape),
mode="bilinear",
align_corners=False,
)
.squeeze()
.view(len(retvals[k]), -1, 1)
)
retvals[k] *= 50
if "patch_masks" in self.network_inputs:
assert "saliency" not in self.network_inputs
retvals["input_target_saliency_pts"] = (
retvals["input_target_saliency_pts"] > self.patch_mask_cutoff
).float()
retvals["input_reference_saliency_pts"] = (
retvals["input_reference_saliency_pts"] > self.patch_mask_cutoff
).float()
retvals["input_description_saliency_pts"] = (
retvals["input_description_saliency_pts"] > self.patch_mask_cutoff
).float()
retvals["input_rgb_pts"] = (
retvals["rgb"]
.view(-1, 3)[None, ...]
.repeat(len(descriptions["spatial_relation_name"]), 1, 1)
)
if len(retvals["input_target_saliency_pts"]) < len(
descriptions["spatial_relation_name"]
):
retvals["input_target_saliency_pts"] = torch.cat(
(
retvals["input_target_saliency_pts"],
torch.zeros(
len(descriptions["spatial_relation_name"])
- len(retvals["input_target_saliency_pts"]),
*list(retvals["input_target_saliency_pts"].shape)[1:],
),
),
dim=0,
)
retvals["input_reference_saliency_pts"] = torch.cat(
(
retvals["input_reference_saliency_pts"],
torch.zeros(
len(descriptions["spatial_relation_name"])
- len(retvals["input_reference_saliency_pts"]),
*list(retvals["input_reference_saliency_pts"].shape)[1:],
),
),
dim=0,
)
retvals["input_description_saliency_pts"] = torch.cat(
(
retvals["input_description_saliency_pts"],
torch.zeros(
len(descriptions["spatial_relation_name"])
- len(retvals["input_description_saliency_pts"]),
*list(retvals["input_description_saliency_pts"].shape)[1:],
),
),
dim=0,
)
retvals["output_label_pts"] = None
if "full_objid_pts" in retvals and retvals["full_objid_pts"] is not None:
output_label_pts = torch.zeros(
len(descriptions["target_obj_id"]),
len(retvals["full_objid_pts"]),
dtype=torch.float32,
)
for desc_i, target_obj_id in enumerate(descriptions["target_obj_id"]):
obj_mask = retvals["full_objid_pts"] == target_obj_id
output_label_pts[desc_i, :] = obj_mask
retvals["output_label_pts"] = output_label_pts
retvals["scene_id"] = self.keys[idx].split("/")[-1].split(".")[0]
retvals["target_obj_name"] = descriptions["target_obj_name"].tolist()
retvals["reference_obj_name"] = descriptions["reference_obj_name"].tolist()
if self.return_vis:
retvals["depth"] = depth
retvals["cam_intr"] = cam_intr
retvals["cam_pose"] = cam_pose
retvals["vis_gt_object_labels"] = (
np.array(group["objid_to_class"]).astype(str).tolist()
if "objid_to_class" in group
else []
)
if "matterport" in self.file_path or "arkit" in self.file_path:
vis_xyz_pts, vis_rgb_pts = get_pointcloud(
depth, retvals["rgb"].numpy(), cam_intr, cam_pose
)
retvals["vis_gt_objid_pts"] = torch.from_numpy(vis_rgb_pts)
retvals["vis_gt_xyz_pts"] = torch.from_numpy(vis_xyz_pts)
else:
retvals["vis_gt_objid_pts"] = retvals["full_objid_pts"]
retvals["vis_gt_xyz_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"]
)[0]
)
transform_matrix = None
if self.domain_randomization:
scene_dims = self.scene_bounds[1, :] - self.scene_bounds[0, :]
assert (scene_dims >= 0).all()
translation = torch.randn(3) * scene_dims * self.dr_pos
rotation = euler.euler2mat(
(torch.rand(1)[0] - 0.5) * self.dr_orn,
(torch.rand(1)[0] - 0.5) * self.dr_orn,
(torch.rand(1)[0] - 0.5) * self.dr_orn
# full rotation around z axis
)
scale = torch.rand(3) * self.dr_scale + 1.0
transform_matrix = torch.from_numpy(
affines.compose(T=translation, R=rotation, Z=scale).astype(np.float32)
)
# PROCESS INPUTS
kwargs = {
"transform_matrix": transform_matrix,
"scene_bounds": self.scene_bounds,
"num_subsample_pts": self.num_input_pts,
"subsample_probabilities": np.ones(len(retvals["input_xyz_pts"])).astype(
np.float64
)
/ len(retvals["input_xyz_pts"]),
"alway_replace_pts": self.always_replace_subsample_pts,
}
try:
processed_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["input_xyz_pts"],
input_target_saliency_pts=retvals["input_target_saliency_pts"],
input_reference_saliency_pts=retvals["input_reference_saliency_pts"],
input_description_saliency_pts=retvals[
"input_description_saliency_pts"
],
input_rgb_pts=retvals["input_rgb_pts"],
**kwargs,
)
except Exception as e:
kwargs["transform_matrix"] = None
processed_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["input_xyz_pts"],
input_target_saliency_pts=retvals["input_target_saliency_pts"],
input_reference_saliency_pts=retvals["input_reference_saliency_pts"],
input_description_saliency_pts=retvals[
"input_description_saliency_pts"
],
input_rgb_pts=retvals["input_rgb_pts"],
**kwargs,
)
retvals["input_xyz_pts"] = processed_pts["xyz_pts"]
retvals["input_target_saliency_pts"] = processed_pts[
"input_target_saliency_pts"
]
retvals["input_reference_saliency_pts"] = processed_pts[
"input_reference_saliency_pts"
]
retvals["input_description_saliency_pts"] = processed_pts[
"input_description_saliency_pts"
]
retvals["input_rgb_pts"] = processed_pts["input_rgb_pts"]
if "tsdf" in self.network_inputs:
voxel_size = (
(self.scene_bounds[1] - self.scene_bounds[0]) / self.tsdf_shape
).min()
tsdf_vol = TSDFVolume(vol_bnds=self.scene_bounds.T, voxel_size=voxel_size)
final_transform = cam_pose
if kwargs["transform_matrix"] is not None:
final_transform = kwargs["transform_matrix"] @ cam_pose
tsdf_vol.integrate(
color_im=retvals["rgb"].numpy(),
depth_im=depth,
cam_intr=cam_intr,
cam_pose=final_transform,
)
retvals["tsdf_vol"] = torch.from_numpy(tsdf_vol.get_volume()[0])
else:
retvals["tsdf_vol"] = torch.ones(1)
# PROCESS OUTPUTS
if "output_label_pts" in retvals and retvals["output_label_pts"] != None:
kwargs["num_subsample_pts"] = (
self.num_output_pts if not self.return_vis else -1
)
if self.balance_spatial_sampling:
desc_output_xyz_pts = []
desc_output_label_pts = []
desc_ignore_pts = []
for desc_i in range(len(retvals["output_label_pts"])):
subsample_probabilities = np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
positive_mask = retvals["output_label_pts"][desc_i].bool()
if positive_mask.any() and (not positive_mask.all()):
subsample_probabilities[positive_mask] = (
len(retvals["output_xyz_pts"]) / positive_mask.sum()
)
subsample_probabilities[~positive_mask] = (
len(retvals["output_xyz_pts"]) / (~positive_mask).sum()
)
subsample_probabilities /= subsample_probabilities.sum()
kwargs["subsample_probabilities"] = subsample_probabilities
output_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
output_label_pts=retvals["output_label_pts"][desc_i][None, :],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
**kwargs,
)
desc_output_xyz_pts.append(output_pts["xyz_pts"])
desc_output_label_pts.append(output_pts["output_label_pts"])
desc_ignore_pts.append(output_pts["out_of_bounds_pts"])
retvals["output_xyz_pts"] = torch.stack(desc_output_xyz_pts)
retvals["output_label_pts"] = torch.stack(
desc_output_label_pts
).squeeze(dim=-2)
retvals["out_of_bounds_pts"] = torch.stack(desc_ignore_pts)
else:
kwargs["subsample_probabilities"] = np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
kwargs["subsample_probabilities"] /= kwargs[
"subsample_probabilities"
].sum()
processed_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
output_label_pts=retvals["output_label_pts"],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
**kwargs,
)
retvals["output_xyz_pts"] = processed_pts["xyz_pts"]
retvals["out_of_bounds_pts"] = processed_pts["out_of_bounds_pts"]
retvals["output_xyz_pts"] = retvals["output_xyz_pts"][None].repeat(
len(processed_pts["output_label_pts"]), 1, 1
)
retvals["output_label_pts"] = processed_pts["output_label_pts"]
if self.xyz_pts_noise > 0.0:
retvals["output_xyz_pts"] += (
torch.randn_like(retvals["output_xyz_pts"]) * self.xyz_pts_noise
)
retvals["input_xyz_pts"] += (
torch.randn_like(retvals["input_xyz_pts"]) * self.xyz_pts_noise
)
retvals["out_of_frustum_pts_mask"] = torch.from_numpy(
np.stack(
[
~check_pts_in_frustum(
xyz_pts=desc_xyz_pts,
depth=depth,
cam_pose=cam_pose,
cam_intr=cam_intr,
)
for desc_xyz_pts in retvals["output_xyz_pts"]
],
axis=0,
)
).bool()
return retvals
class SceneCompletionDataset(SceneUnderstandDataset):
def __init__(self, num_patches: int, **kwargs):
super().__init__(**kwargs)
self.num_patches = num_patches
@staticmethod
def get_scene_patches(
file,
num_subsample_patches: int,
rgb_key: str,
saliency_config: str,
use_synonyms: bool,
subtract_mean_relevancy: bool,
full_objid_pts: Optional[torch.Tensor] = None,
out_of_frustum_pts_mask: Optional[torch.Tensor] = None,
only_return_num_patches: bool = False,
use_gt_seg: bool = False,
):
assert only_return_num_patches or saliency_config is not None
saliency_prefix = f"data/saliencies/{rgb_key}|{saliency_config}"
has_groundtruth = full_objid_pts is not None
scene_patches = dict()
scene_object_labels = np.array(file[f"data/objid_to_class"]).astype(str)
scene_patches["patch_labels"] = np.array(
list(map(lambda s: s.split("[")[0], scene_object_labels))
)
if has_groundtruth:
if out_of_frustum_pts_mask is not None:
scene_obj_ids = set(
full_objid_pts[~out_of_frustum_pts_mask].view(-1).long().tolist()
)
else:
scene_obj_ids = set(full_objid_pts.view(-1).long().tolist())
visible_obj_ids = set(
np.unique(
deref_h5py(dataset=file["seg"], refs=file["data/seg"])[0]
).astype(int)
) - {-1}
scene_obj_ids = scene_obj_ids.intersection(visible_obj_ids)
scene_patches["patch_labels"] = list(
set(scene_patches["patch_labels"][list(scene_obj_ids)])
- {"empty", "out of bounds", "unlabelled"}
)
if use_synonyms:
scene_patches["patch_labels"] = list(
map(
lambda x: x if x not in synonyms.keys() else synonyms[x],
scene_patches["patch_labels"],
)
)
if use_gt_seg:
assert has_groundtruth
assert not subtract_mean_relevancy
assert not use_synonyms
patch_objids = dict()
for patch_label in scene_patches["patch_labels"]:
patch_objids[patch_label] = set(
map(
lambda objid: int(objid.split("[")[1].split("]")[0]),
filter(
lambda objid: objid.split("[")[0] == patch_label,
scene_object_labels.tolist(),
),
)
)
patch_labels = np.array(list(patch_objids.keys()))
num_patches = len(patch_objids)
if num_subsample_patches != -1 and num_patches > num_subsample_patches:
indices = np.random.choice(
num_patches, num_subsample_patches, replace=False
)
patch_labels = patch_labels[indices]
patch_objids = {k: patch_objids[k] for k in patch_labels}
num_patches = len(patch_objids)
seg = deref_h5py(dataset=file["seg"], refs=file["data/seg"])[0]
scene_patches["patch_saliencies"] = []
for patch_label, objids in patch_objids.items():
# take or of all object segs
mask = np.zeros_like(seg)
for objid in objids:
mask = np.logical_or(mask, (seg == objid))
scene_patches["patch_saliencies"].append(mask)
scene_patches["patch_saliencies"] = (
torch.from_numpy(np.stack(scene_patches["patch_saliencies"])).float()
- 0.5
) / 50 # because it will be multiplied by 50 later
scene_patches["patch_labels"] = patch_labels
scene_patches["patch_objmatches"] = np.array(
[
"|".join(map(str, patch_objids[patch_label]))
for patch_label in scene_patches["patch_labels"]
]
)
scene_patches["num_patches"] = num_patches
# NOTE HARDCODED, only meant for testing ours, not semaware
scene_patches["patch_label_features"] = torch.zeros(
size=(num_patches, 512)
).float()
return scene_patches
saliency_text_labels = np.array(
file[f"{saliency_prefix}|saliency_text_labels"]
).astype(str)
# make sure saliencies for scene object labels have been generated
assert set(scene_patches["patch_labels"]).issubset(saliency_text_labels)
saliency_indices = np.array(
list(
map(
lambda l: l[0],
# only get index, not actual saliency label
filter(
lambda l: l[1] in scene_patches["patch_labels"],
# make sure saliency text label is in
# set of valid patch mask labels
enumerate(saliency_text_labels),
),
)
)
)
num_patches = len(saliency_indices)
if only_return_num_patches:
return num_patches
if num_subsample_patches != -1 and num_patches > num_subsample_patches:
saliency_indices = np.random.choice(
saliency_indices, num_subsample_patches, replace=False
)
num_patches = num_subsample_patches
# hdf5 indexing must be in order
saliency_indices.sort()
scene_patches["patch_labels"] = np.array(
file[f"{saliency_prefix}|saliency_text_labels"]
).astype(str)[saliency_indices]
scene_patches["patch_saliencies"] = torch.from_numpy(
deref_h5py(
dataset=file[f"saliencies"],
refs=file[saliency_prefix][saliency_indices],
)
).float()
if subtract_mean_relevancy:
mean_idx = (
np.array(file[f"{saliency_prefix}|saliency_text_labels"])
.astype(str)
.tolist()
.index("mean")
)
mean_relevancy = (
torch.from_numpy(file[f"saliencies"][mean_idx]).float().squeeze()
)
scene_patches["patch_saliencies"] -= mean_relevancy
scene_patches["patch_label_features"] = torch.from_numpy(
np.array(file[f"{saliency_prefix}|saliency_text_label_features"])
).float()[saliency_indices]
scene_patches["num_patches"] = num_patches
if has_groundtruth:
original_patch_labels = scene_patches["patch_labels"]
if use_synonyms:
inv_synonyms = {v: k for k, v in synonyms.items()}
original_patch_labels = map(
lambda l: l if l not in synonyms.values() else inv_synonyms[l],
original_patch_labels,
)
scene_patches["patch_objmatches"] = np.array(
[
"|".join(
[
str(objid)
for objid, obj_label in enumerate(scene_object_labels)
if obj_label.split("[")[0] == patch_label
]
)
for patch_label in original_patch_labels
]
)
else:
# matterport
scene_patches["patch_objmatches"] = np.array([""] * num_patches)
image_shape = file["rgb"].shape[1:-1]
scene_patches["patch_saliencies"] = torch.nn.functional.interpolate(
scene_patches["patch_saliencies"][:, None, :, :],
size=tuple(image_shape),
mode="bilinear",
align_corners=False,
)[:, 0]
return scene_patches
@classmethod
def transform_retvals(
cls,
retvals: dict,
num_output_pts: int,
balance_spatial_sampling: bool,
scene_bounds: np.ndarray,
tsdf_shape,
rgb,
depth,
cam_intr,
cam_pose,
network_inputs,
**kwargs,
):
input_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["input_xyz_pts"],
input_feature_pts=retvals["input_feature_pts"],
subsample_probabilities=np.ones(len(retvals["input_xyz_pts"])).astype(
np.float64
)
/ len(retvals["input_xyz_pts"]),
scene_bounds=scene_bounds,
**kwargs,
)
kwargs["num_subsample_pts"] = -1
# PROCESS OUTPUTS
if "output_label_pts" in retvals:
kwargs["num_subsample_pts"] = num_output_pts
if balance_spatial_sampling:
patch_output_xyz_pts = []
patch_output_label_pts = []
patch_ignore_pts = []
for patch_i in range(len(retvals["output_label_pts"])):
subsample_probabilities = np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
positive_mask = retvals["output_label_pts"][patch_i].bool()
if positive_mask.any() and (not positive_mask.all()):
subsample_probabilities[positive_mask] = (
len(retvals["output_xyz_pts"]) / positive_mask.sum()
)
subsample_probabilities[~positive_mask] = (
len(retvals["output_xyz_pts"]) / (~positive_mask).sum()
)
subsample_probabilities /= subsample_probabilities.sum()
output_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
output_label_pts=retvals["output_label_pts"][patch_i][None, :],
subsample_probabilities=subsample_probabilities,
scene_bounds=scene_bounds,
**kwargs,
)
patch_output_xyz_pts.append(output_pts["xyz_pts"])
patch_output_label_pts.append(output_pts["output_label_pts"])
patch_ignore_pts.append(output_pts["out_of_bounds_pts"])
retvals["output_xyz_pts"] = torch.stack(patch_output_xyz_pts)
retvals["out_of_bounds_pts"] = torch.stack(patch_ignore_pts)
retvals["output_label_pts"] = torch.stack(
patch_output_label_pts
).squeeze(dim=-2)
else:
output_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
output_label_pts=retvals["output_label_pts"],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
subsample_probabilities=np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
/ len(retvals["output_xyz_pts"]),
scene_bounds=scene_bounds,
**kwargs,
)
retvals["output_xyz_pts"] = output_pts["xyz_pts"][None, ...].repeat(
len(output_pts["output_label_pts"]), 1, 1
)
retvals["out_of_bounds_pts"] = output_pts["out_of_bounds_pts"][
None, ...
].repeat(len(output_pts["output_label_pts"]), 1, 1)
retvals["output_label_pts"] = output_pts["output_label_pts"]
retvals["input_xyz_pts"] = input_pts["xyz_pts"]
retvals["input_feature_pts"] = input_pts["input_feature_pts"]
# construct the tsdf vol
if "tsdf" in network_inputs:
voxel_size = ((scene_bounds[1] - scene_bounds[0]) / tsdf_shape).min()
tsdf_vol = TSDFVolume(vol_bnds=scene_bounds.T, voxel_size=voxel_size)
final_transform = cam_pose
if kwargs["transform_matrix"] is not None:
final_transform = kwargs["transform_matrix"] @ cam_pose
tsdf_vol.integrate(
color_im=rgb.numpy(),
depth_im=depth,
cam_intr=cam_intr,
cam_pose=final_transform,
)
retvals["tsdf_vol"] = torch.from_numpy(tsdf_vol.get_volume()[0])
else:
retvals["tsdf_vol"] = torch.ones(1)
def __getitem__(self, idx):
retvals = dict()
scene_path = self.file_path + "/" + self.keys[idx]
with h5py.File(scene_path, "r") as f:
group = f["data"]
depth = deref_h5py(dataset=f["depth"], refs=group["depth"])[0]
cam_intr = np.array(group["cam_intr"])
cam_pose = np.array(group["cam_pose"])
if self.domain_randomized_rgb:
retvals["rgb"] = np.array(group["domain_randomized_rgb"][0])
else:
retvals["rgb"] = np.array(f["rgb"][group["rgb"][0]][0])
retvals["rgb"] = torch.from_numpy(retvals["rgb"]).float()
retvals["input_xyz_pts"] = torch.from_numpy(
get_pointcloud(depth, None, cam_intr, cam_pose)[0]
).float()
retvals["full_objid_pts"] = None
if "full_objid_pts" in group:
retvals["output_xyz_pts"] = torch.from_numpy(
deref_h5py(dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"])[0]
).float()
retvals["full_objid_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_objid_pts"], refs=group["full_objid_pts"]
)[0]
).long()
retvals["out_of_frustum_pts_mask"] = ~check_pts_in_frustum(
xyz_pts=retvals["output_xyz_pts"],
depth=depth,
cam_pose=cam_pose,
cam_intr=cam_intr,
)
scene_patches = self.get_scene_patches(
file=f,
num_subsample_patches=self.num_patches if not self.return_vis else -1,
full_objid_pts=retvals["full_objid_pts"],
out_of_frustum_pts_mask=retvals["out_of_frustum_pts_mask"]
if "out_of_frustum_pts_mask" in retvals
else None,
saliency_config=self.saliency_config,
subtract_mean_relevancy=self.subtract_mean_relevancy,
use_synonyms=self.use_synonyms,
rgb_key="domain_randomized_rgb"
if self.domain_randomized_rgb
else "rgb",
)
feature_pts = []
feature_dim = 0
if "rgb" in self.network_inputs:
# if rgb is in network inputs, then approach must be semantic aware
# therefore, no other inputs
feature_pts.append(retvals["rgb"][None, ...] / 255.0)
feature_dim += 3
else:
if "patch_masks" in self.network_inputs:
if self.offset_patch_mask:
feature_pts.append(
(
scene_patches["patch_saliencies"][..., None]
> self.patch_mask_cutoff
)
* 2
- 1
)
else:
feature_pts.append(
(
scene_patches["patch_saliencies"][..., None]
> self.patch_mask_cutoff
)
)
feature_dim += 1
if "saliency" in self.network_inputs:
patch_saliencies = scene_patches["patch_saliencies"][..., None]
if self.saliency_vmin is not None:
patch_saliencies -= self.saliency_vmin
patch_saliencies[patch_saliencies < 0] = 0
feature_pts.append(patch_saliencies * 50)
# gradcam values typically between -0.02 and 0.02
feature_dim += 1
retvals["input_feature_pts"] = torch.cat(feature_pts, dim=-1)
retvals["input_feature_pts"] = retvals["input_feature_pts"].view(
len(retvals["input_feature_pts"]), -1, feature_dim
)
if (
self.num_patches > len(retvals["input_feature_pts"])
and not self.return_vis
and "rgb" not in self.network_inputs
):
retvals["input_feature_pts"] = torch.cat(
(
retvals["input_feature_pts"],
torch.zeros(
self.num_patches - len(retvals["input_feature_pts"]),
*list(retvals["input_feature_pts"].shape[1:]),
),
),
dim=0,
)
retvals["semantic_class_features"] = scene_patches["patch_label_features"]
if (
self.num_patches > len(scene_patches["patch_label_features"])
and not self.return_vis
):
retvals["semantic_class_features"] = torch.cat(
(
retvals["semantic_class_features"],
torch.randn(
[self.num_patches - len(retvals["semantic_class_features"])]
+ list(retvals["semantic_class_features"].shape[1:]),
),
),
dim=0,
)
if (
self.load_gt
and "full_objid_pts" in retvals
and retvals["full_objid_pts"] is not None
):
gt_seg = deref_h5py(dataset=f["seg"], refs=group["seg"])[0]
retvals["seg"] = gt_seg
output_label_pts = torch.zeros(
len(retvals["semantic_class_features"]),
len(retvals["full_objid_pts"]),
dtype=float,
)
for patch_i, patch_matches in enumerate(
scene_patches["patch_objmatches"]
):
for objid in patch_matches.split("|"):
if objid == "":
continue
output_label_pts[
patch_i, retvals["full_objid_pts"] == int(objid)
] = 1.0
retvals["output_label_pts"] = output_label_pts
retvals["out_of_bounds_pts"] = torch.zeros(
len(retvals["full_objid_pts"])
).float()
object_labels = np.array(group["objid_to_class"]).astype(str).tolist()
if "out of bounds" in object_labels:
oob_idx = object_labels.index("out of bounds")
retvals["out_of_bounds_pts"] = (
retvals["full_objid_pts"] == oob_idx
).float()
retvals["patch_labels"] = scene_patches["patch_labels"].tolist()
assert all(map(lambda l: l != "", retvals["patch_labels"]))
retvals["patch_labels"] += (
[""]
* max(self.num_patches - len(retvals["patch_labels"]), 0)
* int(not self.return_vis)
)
retvals["scene_id"] = self.keys[idx].split("/")[-1].split(".")[0]
if self.return_vis:
retvals["depth"] = depth
retvals["cam_intr"] = cam_intr
retvals["cam_pose"] = cam_pose
retvals["patch_objmatches"] = scene_patches["patch_objmatches"].tolist()
retvals["vis_gt_object_labels"] = (
np.array(group["objid_to_class"]).astype(str).tolist()
if "objid_to_class" in group
else []
)
if "matterport" in self.file_path or "arkit" in self.file_path:
vis_xyz_pts, vis_rgb_pts = get_pointcloud(
depth, retvals["rgb"].numpy(), cam_intr, cam_pose
)
retvals["vis_gt_objid_pts"] = torch.from_numpy(vis_rgb_pts).float()
retvals["vis_gt_xyz_pts"] = torch.from_numpy(vis_xyz_pts).float()
else:
retvals["vis_gt_objid_pts"] = retvals["full_objid_pts"]
retvals["vis_gt_xyz_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"]
)[0]
).float()
empty_mask = (
retvals["vis_gt_objid_pts"] == retvals["vis_gt_objid_pts"].max()
)
retvals["vis_gt_objid_pts"] = retvals["vis_gt_objid_pts"][
~empty_mask
]
retvals["vis_gt_xyz_pts"] = retvals["vis_gt_xyz_pts"][~empty_mask]
retvals["patch_saliencies"] = scene_patches["patch_saliencies"]
transform_matrix = None
if self.domain_randomization:
scene_dims = self.scene_bounds[1, :] - self.scene_bounds[0, :]
assert (scene_dims >= 0).all()
translation = torch.randn(3) * scene_dims * 0.05
rotation = euler.euler2mat(
(torch.rand(1)[0] - 0.5) * 0.3,
(torch.rand(1)[0] - 0.5) * 0.3,
(torch.rand(1)[0] - 0.5) * 0.3
# full rotation around z axis
)
scale = torch.rand(3) * 0.1 + 1.0
transform_matrix = torch.tensor(
affines.compose(T=translation, R=rotation, Z=scale)
).float()
# filter out points with invalid depth
if (depth == 0.0).any():
invalid_depth_mask = (depth == 0.0).reshape(-1)
for k in retvals.keys():
if "input" in k:
if retvals[k].shape[0] == len(invalid_depth_mask):
retvals[k] = retvals[k][~invalid_depth_mask]
elif retvals[k].shape[1] == len(invalid_depth_mask):
retvals[k] = retvals[k][:, ~invalid_depth_mask]
else:
raise Exception()
# PROCESS INPUTS
kwargs = {
"transform_matrix": transform_matrix,
"scene_bounds": self.scene_bounds,
"num_subsample_pts": self.num_input_pts,
"alway_replace_pts": self.always_replace_subsample_pts,
"depth": depth,
"cam_intr": cam_intr,
"cam_pose": cam_pose,
"balance_spatial_sampling": self.balance_spatial_sampling,
"tsdf_shape": self.tsdf_shape,
"retvals": retvals,
"num_output_pts": self.num_output_pts if not self.return_vis else -1,
"rgb": retvals["rgb"],
"network_inputs": self.network_inputs,
}
try:
self.transform_retvals(**kwargs)
except Exception as e:
kwargs["transform_matrix"] = None
self.transform_retvals(**kwargs)
if "output_xyz_pts" in retvals:
retvals["out_of_frustum_pts_mask"] = ~torch.from_numpy(
np.stack(
[
check_pts_in_frustum(
xyz_pts=xyz_pts,
depth=depth,
cam_pose=cam_pose,
cam_intr=cam_intr,
)
for xyz_pts in retvals["output_xyz_pts"].cpu().numpy()
]
)
)
if self.xyz_pts_noise > 0.0:
retvals["output_xyz_pts"] += (
torch.randn_like(retvals["output_xyz_pts"]) * self.xyz_pts_noise
)
retvals["input_xyz_pts"] += (
torch.randn_like(retvals["input_xyz_pts"]) * self.xyz_pts_noise
)
return {
k: v.float() if type(v) == torch.Tensor else v
for k, v in retvals.items()
if v is not None
}
| 52,891 | 41.689266 | 138 | py |
semantic-abstraction | semantic-abstraction-main/net.py | from typing import List, Tuple
import torch
from torch.nn import (
Sequential,
LeakyReLU,
Linear,
Module,
Dropout,
ParameterDict,
)
from torch.nn.parameter import Parameter
from torch.nn.functional import grid_sample
from torch_scatter import scatter
import numpy as np
from unet3d import ResidualUNet3D
from CLIP.clip import ClipWrapper
from torchtyping import TensorType, patch_typeguard
from typeguard import typechecked
patch_typeguard() # use before @typechecked
@typechecked
class VirtualGrid:
def __init__(
self,
scene_bounds,
grid_shape: Tuple[int, int, int] = (32, 32, 32),
batch_size: int = 8,
device: torch.device = torch.device("cpu"),
int_dtype: torch.dtype = torch.int64,
float_dtype: torch.dtype = torch.float32,
reduce_method: str = "mean",
):
self.lower_corner = tuple(scene_bounds[0])
self.upper_corner = tuple(scene_bounds[1])
self.grid_shape = tuple(grid_shape)
self.batch_size = int(batch_size)
self.device = device
self.int_dtype = int_dtype
self.float_dtype = float_dtype
self.reduce_method = reduce_method
@property
def num_grids(self):
grid_shape = self.grid_shape
batch_size = self.batch_size
return int(np.prod((batch_size,) + grid_shape))
def get_grid_idxs(self, include_batch=True):
batch_size = self.batch_size
grid_shape = self.grid_shape
device = self.device
int_dtype = self.int_dtype
dims = grid_shape
if include_batch:
dims = (batch_size,) + grid_shape
axis_coords = [torch.arange(0, x, device=device, dtype=int_dtype) for x in dims]
coords_per_axis = torch.meshgrid(*axis_coords, indexing="ij")
grid_idxs = torch.stack(coords_per_axis, dim=-1)
return grid_idxs
def get_grid_points(self, include_batch=True):
lower_corner = self.lower_corner
upper_corner = self.upper_corner
grid_shape = self.grid_shape
float_dtype = self.float_dtype
device = self.device
grid_idxs = self.get_grid_idxs(include_batch=include_batch)
lc = torch.tensor(lower_corner, dtype=float_dtype, device=device)
uc = torch.tensor(upper_corner, dtype=float_dtype, device=device)
idx_scale = torch.tensor(grid_shape, dtype=float_dtype, device=device) - 1
scales = (uc - lc) / idx_scale
offsets = lc
grid_idxs_no_batch = grid_idxs
if include_batch:
grid_idxs_no_batch = grid_idxs[:, :, :, :, 1:]
grid_idxs_f = grid_idxs_no_batch.to(float_dtype)
grid_points = grid_idxs_f * scales + offsets
return grid_points
def get_points_grid_idxs(self, points, cast_to_int=True, batch_idx=None):
lower_corner = self.lower_corner
upper_corner = self.upper_corner
grid_shape = self.grid_shape
int_dtype = self.int_dtype
float_dtype = self.float_dtype
device = self.device
lc = torch.tensor(lower_corner, dtype=float_dtype, device=device)
uc = torch.tensor(upper_corner, dtype=float_dtype, device=device)
idx_scale = torch.tensor(grid_shape, dtype=float_dtype, device=device) - 1
offsets = -lc
scales = idx_scale / (uc - lc)
points_idxs_i = (points + offsets) * scales
if cast_to_int:
points_idxs_i = points_idxs_i.to(dtype=int_dtype)
points_idxs = torch.empty_like(points_idxs_i)
for i in range(3):
points_idxs[..., i] = torch.clamp(
points_idxs_i[..., i], min=0, max=grid_shape[i] - 1
)
final_points_idxs = points_idxs
if batch_idx is not None:
final_points_idxs = torch.cat(
[
batch_idx.view(*points.shape[:-1], 1).to(dtype=points_idxs.dtype),
points_idxs,
],
dim=-1,
)
return final_points_idxs
def flatten_idxs(self, idxs, keepdim=False):
grid_shape = self.grid_shape
batch_size = self.batch_size
coord_size = idxs.shape[-1]
target_shape = None
if coord_size == 4:
# with batch
target_shape = (batch_size,) + grid_shape
elif coord_size == 3:
# without batch
target_shape = grid_shape
else:
raise RuntimeError("Invalid shape {}".format(str(idxs.shape)))
target_stride = tuple(np.cumprod(np.array(target_shape)[::-1])[::-1])[1:] + (1,)
flat_idxs = (
idxs * torch.tensor(target_stride, dtype=idxs.dtype, device=idxs.device)
).sum(dim=-1, keepdim=keepdim, dtype=idxs.dtype)
return flat_idxs
def unflatten_idxs(self, flat_idxs, include_batch=True):
grid_shape = self.grid_shape
batch_size = self.batch_size
target_shape = grid_shape
if include_batch:
target_shape = (batch_size,) + grid_shape
target_stride = tuple(np.cumprod(np.array(target_shape)[::-1])[::-1])[1:] + (1,)
source_shape = tuple(flat_idxs.shape)
if source_shape[-1] == 1:
source_shape = source_shape[:-1]
flat_idxs = flat_idxs[..., 0]
source_shape += (4,) if include_batch else (3,)
idxs = torch.empty(
size=source_shape, dtype=flat_idxs.dtype, device=flat_idxs.device
)
mod = flat_idxs
for i in range(source_shape[-1]):
idxs[..., i] = mod / target_stride[i]
mod = mod % target_stride[i]
return idxs
def idxs_to_points(self, idxs):
lower_corner = self.lower_corner
upper_corner = self.upper_corner
grid_shape = self.grid_shape
float_dtype = self.float_dtype
int_dtype = idxs.dtype
device = idxs.device
source_shape = idxs.shape
point_idxs = None
if source_shape[-1] == 4:
# has batch idx
point_idxs = idxs[..., 1:]
elif source_shape[-1] == 3:
point_idxs = idxs
else:
raise RuntimeError("Invalid shape {}".format(tuple(source_shape)))
lc = torch.tensor(lower_corner, dtype=float_dtype, device=device)
uc = torch.tensor(upper_corner, dtype=float_dtype, device=device)
idx_scale = torch.tensor(grid_shape, dtype=float_dtype, device=device) - 1
offsets = lc
scales = (uc - lc) / idx_scale
idxs_points = point_idxs * scales + offsets
return idxs_points
def scatter_points(self, xyz_pts, feature_pts, reduce_method=None, **kwargs):
if reduce_method is None:
reduce_method = self.reduce_method
batch_size = feature_pts.shape[0]
idxs = self.get_points_grid_idxs(xyz_pts)
# idxs.shape = [B, num_pts, 3]
flat_idxs = self.flatten_idxs(idxs, keepdim=False)
# flat_idxs.shape = [B, num_pts]
vol_features = scatter(
src=feature_pts,
index=flat_idxs,
dim=-2,
dim_size=np.prod(self.grid_shape),
reduce=self.reduce_method,
**kwargs
).view(batch_size, *self.grid_shape, -1)
return vol_features.permute(0, 4, 1, 2, 3).contiguous()
class ImplicitVolumetricDecoder(Module):
def __init__(self, hidden_size: int, output_dim: int, concat_xyz_pts: bool = False):
super().__init__()
self.concat_xyz_pts = concat_xyz_pts
self.mlp = Sequential(
Linear(hidden_size + int(self.concat_xyz_pts) * 3, hidden_size),
LeakyReLU(),
Linear(hidden_size, output_dim),
)
self.output_dim = output_dim
def forward(
self,
features_vol: TensorType["batch", "channel", "width", "height", "length"],
virtual_grid: VirtualGrid,
query_points: TensorType["batch", "num_points", 3],
) -> TensorType["batch", "num_points", "channel"]:
query_points = virtual_grid.get_points_grid_idxs(
query_points, cast_to_int=False
).float()
for i in range(len(virtual_grid.grid_shape)):
query_points[..., i] = query_points[..., i] / virtual_grid.grid_shape[i]
# query_points now between 0 and 1
# normalize query points to (-1, 1), which is
# required by grid_sample
query_points_normalized = 2.0 * query_points - 1.0
query_points = query_points_normalized.view(
*(query_points_normalized.shape[:2] + (1, 1, 3))
)
sampled_features = grid_sample(
input=features_vol,
grid=query_points,
mode="bilinear",
padding_mode="border",
align_corners=True,
)
sampled_features = (
sampled_features.view(sampled_features.shape[:3])
.permute(0, 2, 1)
.contiguous()
)
B, L, C = sampled_features.shape
# return sampled_features
sampled_features = sampled_features.view(B * L, C).contiguous()
if self.concat_xyz_pts:
sampled_features = torch.cat(
(sampled_features, query_points.view(B * L, 3)), dim=-1
)
out_features = (
self.mlp(sampled_features).view(B, L, self.output_dim).contiguous()
)
return out_features
class PointingAttention(Module):
def __init__(self, pointing_dim, method="dot_product", pointing_temperature=0.07):
super().__init__()
self.method = method
self.pointing_dim = pointing_dim
if method == "dot_product":
self.forward = self.dot_product
elif method == "cosine_sim":
self.cosine_sim_temp = pointing_temperature
self.forward = self.cosine_sim
elif method == "additive":
self.pointer_v = Linear(pointing_dim, 1, bias=False)
self.forward = self.additive
else:
raise Exception()
@staticmethod
def prep_input(key, query):
"""
key.shape = BxKx[ABC]xD
query.shape = BxQx[XYZ]xD
output attention should be: Bx[ABC]x[XYZ]xD
"""
if key.shape == query.shape:
return key, query
for _ in range(len(key.shape) - 3):
query = query.unsqueeze(2)
# Now, query.shape = BxQx[1,1,1]x[XYZ]xD
for _ in range(len(query.shape) - len(key.shape)):
key = key.unsqueeze(-2)
# Now, key.shape = BxKx[ABC]x[1,1,1]xD
key = key.unsqueeze(dim=2)
query = query.unsqueeze(dim=1)
return key, query
def dot_product(self, key, query):
key, query = self.prep_input(key, query)
dotprod = (query * key).sum(dim=-1)
pointing_attn = dotprod / np.sqrt(self.pointing_dim)
return pointing_attn
def cosine_sim(self, key, query):
"""
key.shape = BxDxKx...
query.shape = BxDxQx...
"""
key, query = self.prep_input(key, query)
pointing_attn = (
torch.cosine_similarity(key, query, dim=-1) / self.cosine_sim_temp
)
return pointing_attn
def additive(self, key, query):
key, query = self.prep_input(key, query)
additive_kq = query + key
additive_kq = torch.tanh(additive_kq)
pointing_attn = self.pointer_v(additive_kq).squeeze(dim=-1)
return pointing_attn
class SemAbs3D(Module):
def __init__(
self,
voxel_shape: Tuple[int, int, int],
scene_bounds: Tuple[Tuple[float, float, float], Tuple[float, float, float]],
unet_num_channels: int,
unet_f_maps: int,
unet_num_groups: int,
unet_num_levels: int,
network_inputs: List[str],
use_pts_feat_extractor: bool,
pts_feat_extractor_hidden_dim: int,
reduce_method: str,
output_dim=1,
device: str = "cuda",
decoder_concat_xyz_pts: bool = False,
**kwargs
):
super().__init__()
self.device = device
self.vg = VirtualGrid(
scene_bounds=np.array(scene_bounds),
batch_size=kwargs["batch_size"],
grid_shape=voxel_shape,
device=torch.device(device),
)
self.register_buffer("steps", torch.zeros(1))
self.network_inputs = network_inputs
self.use_pts_feat_extractor = use_pts_feat_extractor
self.reduce_method = reduce_method
self.pts_feature_dim = (
("saliency" in self.network_inputs)
+ ("rgb" in self.network_inputs) * 3
+ ("patch_masks" in self.network_inputs)
)
vol_feature_extractor_input_channels = self.pts_feature_dim + (
"tsdf" in self.network_inputs
)
if self.use_pts_feat_extractor:
self.pts_feat_extractor = Sequential(
Linear(self.pts_feature_dim + 3, pts_feat_extractor_hidden_dim),
LeakyReLU(),
Linear(pts_feat_extractor_hidden_dim, pts_feat_extractor_hidden_dim),
LeakyReLU(),
Linear(
pts_feat_extractor_hidden_dim,
unet_num_channels - int("tsdf" in self.network_inputs),
),
)
vol_feature_extractor_input_channels = unet_num_channels
assert self.reduce_method == "max"
self.vol_feature_extractor = ResidualUNet3D(
in_channels=vol_feature_extractor_input_channels,
out_channels=unet_num_channels,
f_maps=unet_f_maps,
num_groups=unet_num_groups,
num_levels=unet_num_levels,
)
self.visual_sampler = ImplicitVolumetricDecoder(
hidden_size=unet_num_channels,
output_dim=output_dim,
concat_xyz_pts=decoder_concat_xyz_pts,
)
def forward(
self, input_xyz_pts, input_feature_pts, tsdf_vol, output_xyz_pts, **kwargs
):
batch_size, num_patches, input_num_pts = input_feature_pts.shape[:3]
input_xyz_pts = (
input_xyz_pts.unsqueeze(dim=1)
.repeat(1, num_patches, 1, 1)
.view(batch_size * num_patches, input_num_pts, 3)
)
input_feature_pts = input_feature_pts.view(
batch_size * num_patches, input_num_pts, self.pts_feature_dim
)
if self.use_pts_feat_extractor:
input_feature_pts = self.pts_feat_extractor(
torch.cat(
(
input_xyz_pts,
input_feature_pts,
),
dim=-1,
)
)
visual_volumetric_features = self.vg.scatter_points(
xyz_pts=input_xyz_pts,
feature_pts=input_feature_pts,
reduce_method=self.reduce_method,
)
batch_size, num_patches, num_output_pts = output_xyz_pts.shape[:3]
if visual_volumetric_features.shape[0] < batch_size * num_patches:
visual_volumetric_features = (
visual_volumetric_features[:, None, ...]
.repeat(1, num_patches, 1, 1, 1, 1)
.view(batch_size * num_patches, *visual_volumetric_features.shape[1:])
)
if "tsdf" in self.network_inputs:
visual_volumetric_features = torch.cat(
(
tsdf_vol.unsqueeze(dim=1).repeat(num_patches, 1, 1, 1, 1),
visual_volumetric_features,
),
dim=1,
)
self.visual_volumetric_features = self.vol_feature_extractor(
visual_volumetric_features
)
output_xyz_pts = output_xyz_pts.view(
batch_size * num_patches, num_output_pts, 3
)
return (
self.visual_sampler(
features_vol=self.visual_volumetric_features,
virtual_grid=self.vg,
query_points=output_xyz_pts,
)
.view(batch_size, num_patches, num_output_pts, -1)
.squeeze(dim=-1)
)
class SemanticAwareOVSSC(SemAbs3D):
def __init__(self, pointing_method: str, clip_hidden_dim: int = 512, **kwargs):
super().__init__(output_dim=clip_hidden_dim, **kwargs)
self.semantic_class_pointer = PointingAttention(
pointing_dim=clip_hidden_dim, method=pointing_method
)
def forward(self, semantic_class_features, **kwargs):
sampled_features = super().forward(**kwargs)
assert sampled_features.shape[1] == semantic_class_features.shape[1]
num_patches = semantic_class_features.shape[1]
return (
torch.stack(
[
self.semantic_class_pointer(
key=semantic_class_features[:, patch_i, ...][:, None, ...],
query=sampled_features[:, patch_i, ...][:, None, ...],
)
for patch_i in range(num_patches)
],
dim=1,
)
.squeeze(dim=2)
.squeeze(dim=2)
)
class SemAbsVOOL(Module):
def __init__(
self,
pointing_method: str,
pointing_dim: int,
device: str,
decoder_concat_xyz_pts: bool,
**kwargs
):
super().__init__()
self.register_buffer("steps", torch.zeros(1))
self.device = device
self.completion_net = SemAbs3D(device=device, **kwargs).to(device)
self.spatial_sampler = ImplicitVolumetricDecoder(
hidden_size=2 * kwargs["unet_num_channels"],
output_dim=pointing_dim,
concat_xyz_pts=decoder_concat_xyz_pts,
)
self.pointer = PointingAttention(
method=pointing_method, pointing_dim=pointing_dim
)
self.relation_embeddings = ParameterDict(
{
k: Parameter(torch.randn(pointing_dim))
for k in [
"in",
"behind",
"in front of",
"on the left of",
"on the right of",
"on",
"[pad]",
]
}
)
def get_region_pointing_features(self, spatial_relation_name, **kwargs):
# spatial_relation_name.shape NUMDESCxBATCHxWORD
region_pointing_features = (
torch.stack(
[
torch.stack(
[
self.relation_embeddings[
spatial_relation_name[desc_i][batch_i]
]
for batch_i in range(len(spatial_relation_name[desc_i]))
],
dim=0,
)
for desc_i in range(len(spatial_relation_name))
],
dim=0,
)
.permute(1, 0, 2)
.contiguous()
)
return region_pointing_features
def get_feature_vol(
self,
input_xyz_pts,
input_target_saliency_pts,
input_reference_saliency_pts,
tsdf_vol,
num_descs,
**kwargs
):
place_holder_output_xyz_pts = torch.zeros_like(input_xyz_pts)[
..., None, 0:1, :
].repeat(1, num_descs, 1, 1)
self.completion_net(
input_xyz_pts=input_xyz_pts,
input_feature_pts=input_target_saliency_pts,
tsdf_vol=tsdf_vol,
# placeholder
output_xyz_pts=place_holder_output_xyz_pts,
)
target_feature_vol = self.completion_net.visual_volumetric_features
self.completion_net(
input_xyz_pts=input_xyz_pts,
input_feature_pts=input_reference_saliency_pts,
tsdf_vol=tsdf_vol,
# placeholder
output_xyz_pts=place_holder_output_xyz_pts,
)
reference_feature_vol = self.completion_net.visual_volumetric_features
feature_vol = torch.cat((target_feature_vol, reference_feature_vol), dim=1)
return feature_vol
def forward(self, output_xyz_pts, spatial_relation_name, **kwargs):
batch_size, num_descs = np.array(spatial_relation_name).T.shape
feature_vol = self.get_feature_vol(num_descs=num_descs, **kwargs)
num_output_pts = output_xyz_pts.shape[-2]
sampled_locator_feature_pts = self.spatial_sampler(
features_vol=feature_vol,
virtual_grid=self.completion_net.vg,
query_points=output_xyz_pts.view(batch_size * num_descs, num_output_pts, 3),
)
# region_pointing_features.shape BATCH x NUMDESC x WORD
region_pointing_features = self.get_region_pointing_features(
spatial_relation_name=spatial_relation_name
)
return self.pointer(
key=sampled_locator_feature_pts,
query=region_pointing_features.contiguous().view(
batch_size * num_descs, 1, -1
),
).view(batch_size, num_descs, num_output_pts)
class SemanticAwareVOOL(SemAbsVOOL):
def __init__(self, pointing_dim: int, clip_hidden_dim=512, **kwargs):
super().__init__(output_dim=pointing_dim, pointing_dim=pointing_dim, **kwargs)
self.mlp = Linear(clip_hidden_dim * 2 + pointing_dim, pointing_dim)
def get_region_pointing_features(
self, target_obj_name, reference_obj_name, **kwargs
):
with torch.no_grad():
target_obj_name = np.array(target_obj_name).T
reference_obj_name = np.array(reference_obj_name).T
batch_size, num_descs = target_obj_name.shape
target_obj_feature_names = torch.from_numpy(
ClipWrapper.get_clip_text_feature(target_obj_name.reshape(-1))
).to(self.device)
target_obj_feature_names = target_obj_feature_names.view(
batch_size, num_descs, -1
)
reference_obj_feature_names = torch.from_numpy(
ClipWrapper.get_clip_text_feature(reference_obj_name.reshape(-1))
).to(self.device)
reference_obj_feature_names = reference_obj_feature_names.view(
batch_size, num_descs, -1
)
region_pointing_features = super().get_region_pointing_features(**kwargs)
return self.mlp(
torch.cat(
(
target_obj_feature_names,
reference_obj_feature_names,
region_pointing_features,
),
dim=-1,
)
)
def forward(self, input_rgb_pts, spatial_relation_name, **kwargs):
# prepare inputs
batch_size, num_desc, _, _ = input_rgb_pts.shape
num_output_pts = kwargs["output_xyz_pts"].shape[-2]
sampled_locator_feature_pts = self.completion_net(
input_feature_pts=input_rgb_pts, **kwargs
)
region_pointing_features = self.get_region_pointing_features(
spatial_relation_name=spatial_relation_name, **kwargs
)
return self.pointer(
key=sampled_locator_feature_pts.view(
batch_size * num_desc, num_output_pts, -1
),
query=region_pointing_features.contiguous().view(
batch_size * num_desc, 1, -1
),
).view(batch_size, num_desc, -1)
class ClipSpatialVOOL(Module):
def __init__(self, device: str, decoder_concat_xyz_pts: bool, **kwargs):
super().__init__()
self.register_buffer("steps", torch.zeros(1))
self.device = device
self.completion_net = SemAbs3D(device=device, **kwargs).to(device)
self.spatial_sampler = ImplicitVolumetricDecoder(
hidden_size=kwargs["unet_num_channels"],
output_dim=1,
concat_xyz_pts=decoder_concat_xyz_pts,
)
def get_feature_vol(
self,
input_xyz_pts,
input_description_saliency_pts,
tsdf_vol,
num_descs,
**kwargs
):
self.completion_net(
input_xyz_pts=input_xyz_pts,
input_feature_pts=input_description_saliency_pts,
tsdf_vol=tsdf_vol,
# placeholder
output_xyz_pts=torch.zeros_like(input_xyz_pts)[..., None, 0:1, :].repeat(
1, num_descs, 1, 1
),
)
return self.completion_net.visual_volumetric_features
def forward(self, output_xyz_pts, spatial_relation_name, **kwargs):
batch_size, num_descs = np.array(spatial_relation_name).T.shape
feature_vol = self.get_feature_vol(num_descs=num_descs, **kwargs)
num_output_pts = output_xyz_pts.shape[-2]
return self.spatial_sampler(
features_vol=feature_vol,
virtual_grid=self.completion_net.vg,
query_points=output_xyz_pts.view(batch_size * num_descs, num_output_pts, 3),
).view(batch_size, num_descs, num_output_pts)
| 25,154 | 36.047128 | 88 | py |
semantic-abstraction | semantic-abstraction-main/eval.py | import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
import os
import pickle
from dataset import ObjectLocalizationDataset, SceneCompletionDataset
from train_vool import get_losses as vool_get_losses, approach as vool_approaches
from train_ovssc import get_losses as ovssc_get_losses, approach as ovssc_approaches
import utils
from torch.utils.data import DataLoader
import pandas as pd
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
if __name__ == "__main__":
parser = utils.config_parser()
parser.add_argument("--task", choices=["ovssc", "vool"], required=True)
args = parser.parse_args()
with open(os.path.dirname(args.load) + "/args.pkl", "rb") as file:
exp_args = pickle.load(file)
for arg in vars(exp_args):
if any(arg == s for s in ["device", "file_path", "load", "gpus", "task"]):
continue
setattr(args, arg, getattr(exp_args, arg))
args.domain_randomization = False
args.scene_bounds = torch.tensor(args.scene_bounds)
args.batch_size = 1
args.num_workers = 8
args.balance_spatial_sampling = False
args.detailed_analysis = True
ddp = len(args.gpus) > 1
approaches = ovssc_approaches if args.task == "ovssc" else vool_approaches
dataset_class = (
SceneCompletionDataset if args.task == "ovssc" else ObjectLocalizationDataset
)
exp_dict = utils.setup_experiment(
args=args,
net_class=approaches[args.approach],
dataset_class=dataset_class,
split_file_path=args.file_path
+ ("/vool_split.pkl" if args.task == "vool" else "/ssc_split.pkl"),
return_vis=True,
ddp=ddp,
)
net = exp_dict["net"]
net.eval()
net.requires_grad = False
epoch = exp_dict["start_epoch"]
eval_detailed_stats = pd.DataFrame()
with torch.no_grad():
for split, dataset in exp_dict["datasets"].items():
if split == "train":
continue
sampler = None
if ddp:
sampler = DistributedSampler(
dataset=dataset, shuffle=False, drop_last=False
)
sampler.set_epoch(0)
loader = DataLoader(
dataset=dataset,
num_workers=args.num_workers,
batch_size=1,
sampler=sampler,
)
detailed_stats = utils.loop(
net=net,
loader=loader,
get_losses_fn=ovssc_get_losses
if args.task == "ovssc"
else vool_get_losses,
**{
**vars(args),
"optimizer": None,
"lr_scheduler": None,
"cutoffs": np.arange(-2.5, -0.0, 0.1),
"pbar": tqdm(
total=len(loader),
dynamic_ncols=True,
unit="batch",
postfix=f"| {split.upper()} ",
),
"detailed_analysis": True,
},
)
detailed_stats["epoch"] = [epoch] * len(detailed_stats)
detailed_stats["split"] = [split] * len(detailed_stats)
eval_detailed_stats = pd.concat([eval_detailed_stats, detailed_stats])
if (ddp and dist.get_rank() == 0) or not ddp:
stats_path = os.path.splitext(args.load)[0] + f"_eval_stats.pkl"
eval_detailed_stats.to_pickle(stats_path)
print("dumped stats to ", stats_path)
| 3,625 | 37.574468 | 86 | py |
semantic-abstraction | semantic-abstraction-main/unet3d.py | """
Code from the 3D UNet implementation:
https://github.com/wolny/pytorch-3dunet/
"""
import importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
from functools import partial
def number_of_features_per_level(init_channel_number, num_levels):
return [init_channel_number * 2**k for k in range(num_levels)]
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'gcr' -> groupnorm + conv + ReLU
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
'bcr' -> batchnorm + conv + ReLU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert "c" in order, "Conv layer MUST be present"
assert (
order[0] not in "rle"
), "Non-linearity cannot be the first operation in the layer"
modules = []
for i, char in enumerate(order):
if char == "r":
modules.append(("ReLU", nn.ReLU(inplace=True)))
elif char == "l":
modules.append(
("LeakyReLU", nn.LeakyReLU(negative_slope=0.1, inplace=True))
)
elif char == "e":
modules.append(("ELU", nn.ELU(inplace=True)))
elif char == "c":
# add learnable bias only in the absence of batchnorm/groupnorm
bias = not ("g" in order or "b" in order)
modules.append(
(
"conv",
conv3d(
in_channels, out_channels, kernel_size, bias, padding=padding
),
)
)
elif char == "g":
is_before_conv = i < order.index("c")
if is_before_conv:
num_channels = in_channels
else:
num_channels = out_channels
# use only one group if the given number of groups is greater than the number of channels
if num_channels < num_groups:
num_groups = 1
assert (
num_channels % num_groups == 0
), f"Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}"
modules.append(
(
"groupnorm",
nn.GroupNorm(num_groups=num_groups, num_channels=num_channels),
)
)
elif char == "b":
is_before_conv = i < order.index("c")
if is_before_conv:
modules.append(("batchnorm", nn.BatchNorm3d(in_channels)))
else:
modules.append(("batchnorm", nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
order="crg",
num_groups=8,
padding=1,
):
super(SingleConv, self).__init__()
for name, module in create_conv(
in_channels, out_channels, kernel_size, order, num_groups, padding=padding
):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
encoder,
kernel_size=3,
order="crg",
num_groups=8,
):
super(DoubleConv, self).__init__()
if encoder:
# we're in the encoder path
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels
else:
# we're in the decoder path, decrease the number of channels in the 1st convolution
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
# conv1
self.add_module(
"SingleConv1",
SingleConv(
conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups
),
)
# conv2
self.add_module(
"SingleConv2",
SingleConv(
conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups
),
)
class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
order="cge",
num_groups=8,
**kwargs,
):
super(ExtResNetBlock, self).__init__()
# first convolution
self.conv1 = SingleConv(
in_channels,
out_channels,
kernel_size=kernel_size,
order=order,
num_groups=num_groups,
)
# residual block
self.conv2 = SingleConv(
out_channels,
out_channels,
kernel_size=kernel_size,
order=order,
num_groups=num_groups,
)
# remove non-linearity from the 3rd convolution since it's going to be applied after adding the residual
n_order = order
for c in "rel":
n_order = n_order.replace(c, "")
self.conv3 = SingleConv(
out_channels,
out_channels,
kernel_size=kernel_size,
order=n_order,
num_groups=num_groups,
)
# create non-linearity separately
if "l" in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif "e" in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, x):
# apply first convolution and save the output as a residual
out = self.conv1(x)
residual = out
# residual block
out = self.conv2(out)
out = self.conv3(out)
out += residual
out = self.non_linearity(out)
return out
class Encoder(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
conv_kernel_size=3,
apply_pooling=True,
pool_kernel_size=(2, 2, 2),
pool_type="max",
basic_module=DoubleConv,
conv_layer_order="crg",
num_groups=8,
):
super(Encoder, self).__init__()
assert pool_type in ["max", "avg"]
if apply_pooling:
if pool_type == "max":
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None
self.basic_module = basic_module(
in_channels=in_channels,
out_channels=out_channels,
encoder=True,
kernel_size=conv_kernel_size,
order=conv_layer_order,
num_groups=num_groups,
)
def forward(self, x):
if self.pooling is not None:
x = self.pooling(x)
x = self.basic_module(x)
return x
class Decoder(nn.Module):
"""
A single module for decoder path consisting of the upsampling layer
(either learned ConvTranspose3d or nearest neighbor interpolation) followed by a basic module (DoubleConv or ExtResNetBlock).
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
scale_factor (tuple): used as the multiplier for the image H/W/D in
case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation
from the corresponding encoder
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
scale_factor=(2, 2, 2),
basic_module=DoubleConv,
conv_layer_order="crg",
num_groups=8,
mode="nearest",
):
super(Decoder, self).__init__()
if basic_module == DoubleConv:
# if DoubleConv is the basic_module use interpolation for upsampling and concatenation joining
self.upsampling = Upsampling(
transposed_conv=False,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
mode=mode,
)
# concat joining
self.joining = partial(self._joining, concat=True)
else:
# if basic_module=ExtResNetBlock use transposed convolution upsampling and summation joining
self.upsampling = Upsampling(
transposed_conv=True,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
mode=mode,
)
# sum joining
self.joining = partial(self._joining, concat=False)
# adapt the number of in_channels for the ExtResNetBlock
in_channels = out_channels
self.basic_module = basic_module(
in_channels=in_channels,
out_channels=out_channels,
encoder=False,
kernel_size=kernel_size,
order=conv_layer_order,
num_groups=num_groups,
)
def forward(self, encoder_features, x):
x = self.upsampling(encoder_features=encoder_features, x=x)
x = self.joining(encoder_features, x)
x = self.basic_module(x)
return x
@staticmethod
def _joining(encoder_features, x, concat):
if concat:
return torch.cat((encoder_features, x), dim=1)
else:
return encoder_features + x
class Upsampling(nn.Module):
"""
Upsamples a given multi-channel 3D data using either interpolation or learned transposed convolution.
Args:
transposed_conv (bool): if True uses ConvTranspose3d for upsampling, otherwise uses interpolation
concat_joining (bool): if True uses concatenation joining between encoder and decoder features, otherwise
uses summation joining (see Residual U-Net)
in_channels (int): number of input channels for transposed conv
out_channels (int): number of output channels for transpose conv
kernel_size (int or tuple): size of the convolving kernel
scale_factor (int or tuple): stride of the convolution
mode (str): algorithm used for upsampling:
'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area'. Default: 'nearest'
"""
def __init__(
self,
transposed_conv,
in_channels=None,
out_channels=None,
kernel_size=3,
scale_factor=(2, 2, 2),
mode="nearest",
):
super(Upsampling, self).__init__()
if transposed_conv:
# make sure that the output size reverses the MaxPool3d from the corresponding encoder
# (D_out = (D_in − 1) × stride[0] − 2 × padding[0] + kernel_size[0] + output_padding[0])
self.upsample = nn.ConvTranspose3d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=scale_factor,
padding=1,
)
else:
self.upsample = partial(self._interpolate, mode=mode)
def forward(self, encoder_features, x):
output_size = encoder_features.size()[2:]
return self.upsample(x, output_size)
@staticmethod
def _interpolate(x, size, mode):
return F.interpolate(x, size=size, mode=mode)
class FinalConv(nn.Sequential):
"""
A module consisting of a convolution layer (e.g. Conv3d+ReLU+GroupNorm3d) and the final 1x1 convolution
which reduces the number of channels to 'out_channels'.
with the number of output channels 'out_channels // 2' and 'out_channels' respectively.
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be change however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ReLU use order='cbr'.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self, in_channels, out_channels, kernel_size=3, order="crg", num_groups=8
):
super(FinalConv, self).__init__()
# conv1
self.add_module(
"SingleConv",
SingleConv(in_channels, in_channels, kernel_size, order, num_groups),
)
# in the last layer a 1×1 convolution reduces the number of output channels to out_channels
final_conv = nn.Conv3d(in_channels, out_channels, 1)
self.add_module("final_conv", final_conv)
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
f_maps (int, tuple): if int: number of feature maps in the first conv layer of the encoder (default: 64);
if tuple: number of feature maps at each level
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
"""
def __init__(
self,
in_channels,
out_channels,
final_sigmoid,
basic_module,
f_maps=64,
layer_order="gcr",
num_groups=8,
num_levels=4,
is_segmentation=False,
testing=False,
**kwargs,
):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
# create encoder path consisting of Encoder modules. Depth of the encoder is equal to `len(f_maps)`
encoders = []
for i, out_feature_num in enumerate(f_maps):
if i == 0:
encoder = Encoder(
in_channels,
out_feature_num,
apply_pooling=False,
basic_module=basic_module,
conv_layer_order=layer_order,
num_groups=num_groups,
)
else:
# TODO: adapt for anisotropy in the data, i.e. use proper pooling kernel to make the data isotropic after 1-2 pooling operations
# currently pools with a constant kernel: (2, 2, 2)
encoder = Encoder(
f_maps[i - 1],
out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
num_groups=num_groups,
)
encoders.append(encoder)
self.encoders = nn.ModuleList(encoders)
# create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1`
decoders = []
reversed_f_maps = list(reversed(f_maps))
for i in range(len(reversed_f_maps) - 1):
if basic_module == DoubleConv:
in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1]
else:
in_feature_num = reversed_f_maps[i]
out_feature_num = reversed_f_maps[i + 1]
# TODO: if non-standard pooling was used, make sure to use correct striding for transpose conv
# currently strides with a constant stride: (2, 2, 2)
decoder = Decoder(
in_feature_num,
out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
num_groups=num_groups,
)
decoders.append(decoder)
self.decoders = nn.ModuleList(decoders)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
def __init__(
self,
in_channels,
out_channels,
final_sigmoid=True,
f_maps=64,
layer_order="gcr",
num_groups=8,
num_levels=4,
is_segmentation=True,
**kwargs,
):
super(UNet3D, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
**kwargs,
)
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
def __init__(
self,
in_channels,
out_channels,
f_maps=64,
num_groups=8,
num_levels=5,
final_sigmoid=False,
layer_order="gcr",
is_segmentation=False,
**kwargs,
):
super(ResidualUNet3D, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=ExtResNetBlock,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
**kwargs,
)
| 25,729 | 36.289855 | 144 | py |
semantic-abstraction | semantic-abstraction-main/visualize.py | import io
import logging
from pathlib import Path
import textwrap
from typing import Any, Dict, List, Tuple
from skimage.measure import marching_cubes
import numpy as np
import torch
import os
import pickle
from net import SemAbs3D, SemAbsVOOL
from point_cloud import (
check_pts_in_frustum,
filter_pts_bounds,
get_pointcloud,
meshwrite,
)
import utils
import os
from utils import config_parser
from CLIP.clip import ClipWrapper, saliency_configs
from fusion import TSDFVolume
import typer
from matplotlib import pyplot as plt
from rich.progress import Progress
import open3d as o3d
from transforms3d import affines, euler
import imageio
from PIL import Image
import cv2
app = typer.Typer(pretty_exceptions_enable=False)
Point3D = Tuple[float, float, float]
def visualize_relevancies(
rgb: np.ndarray,
relevancies: np.ndarray,
obj_classes: List[str],
dump_path: str,
):
fig, axes = plt.subplots(4, int(np.ceil(len(obj_classes) / 4)), figsize=(15, 15))
axes = axes.flatten()
vmin = 0.000
cmap = plt.get_cmap("jet")
vmax = 0.01
[ax.axis("off") for ax in axes]
for ax, label_grad, label in zip(axes, relevancies, obj_classes):
ax.imshow(rgb)
ax.set_title(label, fontsize=12)
grad = np.clip((label_grad - vmin) / (vmax - vmin), a_min=0.0, a_max=1.0)
colored_grad = cmap(grad)
grad = 1 - grad
colored_grad[..., -1] = grad * 0.7
ax.imshow(colored_grad)
plt.tight_layout(pad=0)
plt.savefig(dump_path)
plt.close(fig)
def prep_data(
data_pickle_path: str,
scene_bounds: Tuple[Point3D, Point3D],
subtract_mean: bool,
dump_path: str,
):
scene_id = data_pickle_path.split("/")[-1].split(".pkl")[0]
data = pickle.load(open(data_pickle_path, "rb"))
rgb = data["rgb"]
assert rgb.dtype == np.uint8
depth = data["depth"]
assert depth.dtype == np.float32
cam_intr = data["cam_intr"]
assert depth.dtype == np.float32
cam_extr = data["cam_extr"]
assert depth.dtype == np.float32
scene_dump_path = f"{dump_path}/{scene_id}"
if not os.path.exists(scene_dump_path):
Path(scene_dump_path).mkdir(parents=True, exist_ok=True)
if "img_shape" in data:
rgb = cv2.resize(rgb, data["img_shape"])
depth = cv2.resize(depth, data["img_shape"])
descriptions = data["descriptions"]
target_obj_classes = [d[0] for d in descriptions]
spatial_relation_names = [d[1] for d in descriptions]
reference_obj_classes = [d[2] for d in descriptions]
ovssc_obj_classes = data["ovssc_obj_classes"]
relevancy_keys = list(
set(ovssc_obj_classes).union(target_obj_classes).union(reference_obj_classes)
)
h, w, c = rgb.shape
relevancies = (
ClipWrapper.get_clip_saliency(
img=rgb,
text_labels=np.array(relevancy_keys),
prompts=["a photograph of a {} in a home."],
**saliency_configs["ours"](h),
)[0]
* 50
)
assert len(relevancy_keys) == len(relevancies)
input_xyz_pts = torch.from_numpy(
get_pointcloud(depth, None, cam_intr, cam_extr)[0].astype(np.float32)
)
in_bounds_mask = filter_pts_bounds(input_xyz_pts, np.array(scene_bounds)).bool()
input_xyz_pts = input_xyz_pts[in_bounds_mask]
input_rgb_pts = rgb.reshape(-1, 3)[in_bounds_mask.cpu().numpy()]
if subtract_mean:
relevancies -= relevancies.mean(dim=0, keepdim=True)
visualize_relevancies(
rgb=rgb,
relevancies=relevancies.cpu().numpy() / 50,
obj_classes=relevancy_keys,
dump_path=scene_dump_path + "/relevancies.png",
)
ovssc_input_feature_pts = torch.stack(
[
relevancies[relevancy_keys.index(obj_class)].view(-1)[in_bounds_mask]
for obj_class in ovssc_obj_classes
]
)
input_target_saliency_pts = torch.stack(
[
relevancies[relevancy_keys.index(obj_class)].view(-1)[in_bounds_mask]
for obj_class in target_obj_classes
]
)
input_reference_saliency_pts = torch.stack(
[
relevancies[relevancy_keys.index(obj_class)].view(-1)[in_bounds_mask]
for obj_class in reference_obj_classes
]
)
batch = {
"input_xyz_pts": input_xyz_pts,
"input_rgb_pts": input_rgb_pts,
"relevancies": relevancies,
"input_feature_pts": ovssc_input_feature_pts,
"ovssc_obj_classes": ovssc_obj_classes,
"rgb": rgb,
"depth": depth,
"cam_intr": cam_intr,
"cam_extr": cam_extr,
"scene_id": scene_id,
"input_target_saliency_pts": input_target_saliency_pts,
"input_reference_saliency_pts": input_reference_saliency_pts,
"spatial_relation_name": spatial_relation_names,
"tsdf_vol": None,
"descriptions": [f"the {d[0]} {d[1]} the {d[2]}" for d in data["descriptions"]],
}
return batch
def process_batch_ovssc(
net: SemAbs3D,
batch: Dict[str, Any],
scene_bounds: Tuple[Point3D, Point3D],
device: str,
num_input_pts: int,
sampling_shape: Tuple[int, int, int] = (240, 240, 240),
num_pts_per_pass: int = int(2**20),
cutoff: float = -3.0,
) -> Dict[str, torch.Tensor]:
grid_points = get_sample_points(
sampling_shape=sampling_shape, scene_bounds=scene_bounds, device=device
)
assert filter_pts_bounds(
grid_points.cpu().numpy(), bounds=np.array(scene_bounds)
).all()
label_outputs = {}
with Progress() as progress:
inference_task = progress.add_task(
"Running completion", total=len(batch["ovssc_obj_classes"])
)
for class_idx, obj_class in enumerate(batch["ovssc_obj_classes"]):
label_outputs[obj_class] = []
for j in np.arange(
0,
((len(grid_points) // num_pts_per_pass) + 1) * num_pts_per_pass,
num_pts_per_pass,
):
if len(grid_points[j : j + num_pts_per_pass, :]) == 0:
break
output_xyz_pts = grid_points[j : j + num_pts_per_pass, :][
None, None, ...
]
input_xyz_pts = batch["input_xyz_pts"]
indices = np.random.choice(input_xyz_pts.shape[-2], size=num_input_pts)
label_outputs[obj_class].append(
net(
**{
**batch,
**{
"output_xyz_pts": output_xyz_pts.float().to(device),
"input_feature_pts": batch["input_feature_pts"][
None, None, [class_idx], indices, None
].to(device),
"input_xyz_pts": input_xyz_pts[..., indices, :]
.float()
.to(device),
},
}
)
.detach()
.cpu()
)
progress.update(inference_task, advance=1)
label_outputs = {
class_idx: torch.cat(patch_output, dim=-1).squeeze().view(*sampling_shape)
for class_idx, patch_output in label_outputs.items()
}
tsdf_vol = TSDFVolume(
vol_bnds=np.array(scene_bounds).T,
voxel_size=(scene_bounds[1][0] - scene_bounds[0][0]) / sampling_shape[0],
)
tsdf_vol.integrate(
color_im=batch["rgb"],
depth_im=batch["depth"],
cam_intr=batch["cam_intr"],
cam_pose=batch["cam_extr"],
)
tsdf_vol = tsdf_vol.get_volume()[0]
logprobs = torch.stack(
[label_outputs[label] for label in batch["ovssc_obj_classes"]], dim=-1
)
prediction = logprobs.argmax(dim=-1)
empty_mask = (logprobs < cutoff).all(dim=-1)
empty_mask = empty_mask.view(*sampling_shape)
in_frustum_mask = check_pts_in_frustum(
xyz_pts=grid_points.cpu().numpy(),
depth=batch["depth"],
cam_pose=batch["cam_extr"],
cam_intr=batch["cam_intr"],
)
in_frustum_mask = torch.from_numpy(in_frustum_mask).view(*sampling_shape)
prediction_volumes = {}
for class_idx, class_label in enumerate(batch["ovssc_obj_classes"]):
patch_prediction = (prediction == class_idx).float().view(*sampling_shape)
patch_prediction[empty_mask] = 0.0
patch_prediction[~in_frustum_mask] = 0.0
patch_prediction[tsdf_vol > 0.0] = 0.0
prediction_volumes[class_label] = patch_prediction.cpu().numpy()
return prediction_volumes
def export_obj(vol, filename, level=0.5):
vol[:, :, -1] = -np.inf
vol[:, :, 0] = -np.inf
vol[:, -1, :] = -np.inf
vol[:, 0, :] = -np.inf
vol[-1, :, :] = -np.inf
vol[0, :, :] = -np.inf
if (vol < level).all():
return
verts, faces, norms, _ = marching_cubes(vol, level=level)
vol_shape = np.array(vol.shape)
verts -= vol_shape / 2
verts = verts / vol_shape
# Write header
obj_file = open(filename, "w")
# Write vertex list
for i in range(verts.shape[0]):
obj_file.write("v %f %f %f\n" % (verts[i, 0], verts[i, 1], verts[i, 2]))
for i in range(norms.shape[0]):
obj_file.write("vn %f %f %f\n" % (norms[i, 0], norms[i, 1], norms[i, 2]))
faces = faces.copy()
faces += 1
for i in range(faces.shape[0]):
obj_file.write("f %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2]))
obj_file.close()
def get_sample_points(
sampling_shape: Tuple[int, int, int],
scene_bounds: Tuple[Point3D, Point3D],
device: str,
):
axis_coords = [torch.arange(0, x, device=device) for x in sampling_shape]
coords_per_axis = torch.meshgrid(*axis_coords, indexing="ij")
grid_idxs = torch.stack(coords_per_axis, dim=-1).to(device)
lc = torch.tensor(scene_bounds[0], device=device, dtype=torch.float32)
uc = torch.tensor(scene_bounds[1], device=device, dtype=torch.float32)
idx_scale = torch.tensor(sampling_shape, device=device, dtype=torch.float32) - 1
scales = (uc - lc) / idx_scale
offsets = lc
grid_idxs_f = grid_idxs.to(torch.float32)
grid_points = grid_idxs_f * scales + offsets
return grid_points.view(-1, 3)
@app.command()
def ovssc_inference(
data_pickle_path: str,
model_ckpt_path: str,
dump_path: str = "visualization/",
):
args = config_parser().parse_args(
args=["--load", model_ckpt_path, "--file_path", data_pickle_path]
)
with open(os.path.dirname(args.load) + "/args.pkl", "rb") as file:
exp_args = pickle.load(file)
for arg in vars(exp_args):
if any(arg == s for s in ["device", "file_path", "load"]):
continue
setattr(args, arg, getattr(exp_args, arg))
args.domain_randomization = False
scene_bounds = tuple(args.scene_bounds)
logging.info("Preparing batch")
batch = prep_data(
data_pickle_path=data_pickle_path,
scene_bounds=scene_bounds,
subtract_mean=args.subtract_mean_relevancy,
dump_path=dump_path,
)
logging.info(
f"Fetched {len(batch['ovssc_obj_classes'])} classes: "
+ ", ".join(batch["ovssc_obj_classes"])
)
pickle.dump(batch, open("new-input.pkl", "wb"))
batch = pickle.load(open("new-input.pkl", "rb"))
if not os.path.exists(f"{dump_path}/{batch['scene_id']}"):
Path(f"{dump_path}/{batch['scene_id']}").mkdir(parents=True, exist_ok=True)
net = utils.get_net(net_class=SemAbs3D, **vars(args))[0]
net.eval()
prediction_volumes = process_batch_ovssc(
net=net,
batch=batch,
scene_bounds=scene_bounds,
device=args.device,
num_input_pts=args.num_input_pts,
)
logging.info(f"Dumping meshes to {dump_path}/{batch['scene_id']}")
for obj_class, vol in prediction_volumes.items():
try:
export_obj(
vol=vol,
filename=f"{dump_path}/{batch['scene_id']}/{obj_class}.obj",
level=0.5,
)
except RuntimeError as e:
print(f"{obj_class} probably empty: {e}")
def process_batch_vool(
net: SemAbs3D,
batch: Dict[str, Any],
scene_bounds: Tuple[Point3D, Point3D],
device: str,
num_input_pts: int,
sampling_shape: Tuple[int, int, int] = (240, 240, 240),
num_pts_per_pass: int = int(2**20),
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
grid_points = get_sample_points(
sampling_shape=sampling_shape, scene_bounds=scene_bounds, device=device
)
assert filter_pts_bounds(
grid_points.cpu().numpy(), bounds=np.array(scene_bounds)
).all()
desc_predictions = {}
with Progress() as progress:
inference_task = progress.add_task(
"Running localization", total=len(batch["descriptions"])
)
for desc_idx, desc in enumerate(batch["descriptions"]):
desc_predictions[desc] = []
for j in np.arange(
0,
((len(grid_points) // num_pts_per_pass) + 1) * num_pts_per_pass,
num_pts_per_pass,
):
if len(grid_points[j : j + num_pts_per_pass, :]) == 0:
break
output_xyz_pts = grid_points[j : j + num_pts_per_pass, :][
None, None, ...
]
input_xyz_pts = batch["input_xyz_pts"]
indices = np.random.choice(input_xyz_pts.shape[-2], size=num_input_pts)
desc_predictions[desc].append(
net(
**{
**batch,
**{
"output_xyz_pts": output_xyz_pts.float().to(device),
"input_target_saliency_pts": batch[
"input_target_saliency_pts"
][None, None, [desc_idx], indices, None].to(device),
"input_reference_saliency_pts": batch[
"input_reference_saliency_pts"
][None, None, [desc_idx], indices, None].to(device),
"spatial_relation_name": [
[batch["spatial_relation_name"][desc_idx]]
],
"input_xyz_pts": input_xyz_pts[..., indices, :]
.float()
.to(device),
},
}
)
.detach()
.cpu()
)
progress.update(inference_task, advance=1)
desc_predictions = {
desc: torch.cat(patch_output, dim=-1).squeeze().view(*sampling_shape)
for desc, patch_output in desc_predictions.items()
}
return desc_predictions, grid_points
@app.command()
def vool_inference(
data_pickle_path: str,
model_ckpt_path: str,
dump_path: str = "visualization/",
):
args = config_parser().parse_args(
args=["--load", model_ckpt_path, "--file_path", data_pickle_path]
)
with open(os.path.dirname(args.load) + "/args.pkl", "rb") as file:
exp_args = pickle.load(file)
for arg in vars(exp_args):
if any(arg == s for s in ["device", "file_path", "load"]):
continue
setattr(args, arg, getattr(exp_args, arg))
args.domain_randomization = False
scene_bounds = tuple(args.scene_bounds)
logging.info("Preparing batch")
batch = prep_data(
data_pickle_path=data_pickle_path,
scene_bounds=scene_bounds,
subtract_mean=args.subtract_mean_relevancy,
dump_path=dump_path,
)
logging.info(
f"Fetched {len(batch['descriptions'])} descriptions: "
+ ", ".join(batch["descriptions"])
)
pickle.dump(batch, open("new-input.pkl", "wb"))
batch = pickle.load(open("new-input.pkl", "rb"))
net = utils.get_net(net_class=SemAbsVOOL, **vars(args))[0]
net.eval()
desc_predictions, grid_points = process_batch_vool(
net=net,
batch=batch,
scene_bounds=scene_bounds,
device=args.device,
num_input_pts=args.num_input_pts,
)
logging.info(f"Dumping pointclouds to {dump_path}/{batch['scene_id']}")
cmap = plt.get_cmap("jet")
for desc, prediction in desc_predictions.items():
prediction = prediction.squeeze().view(-1)
keep_mask = prediction > prediction.max() - 0.15
desc_points = grid_points[keep_mask]
logprobs = prediction[keep_mask]
logprobs = logprobs.exp().numpy()
vmin = logprobs.min()
vmax = logprobs.max()
logprobs = (logprobs - vmin) / (vmax - vmin)
colors = cmap(logprobs)[..., :3]
meshwrite(
filename=f"{dump_path}/{batch['scene_id']}/{desc}.ply",
verts=desc_points.cpu().numpy(),
colors=(colors * 255).astype(np.uint8),
)
indices = np.arange(len(batch["input_xyz_pts"]))
if len(batch["input_xyz_pts"]) > 100000:
indices = np.random.choice(
len(batch["input_xyz_pts"]), size=100000, replace=False
)
meshwrite(
filename=f"{dump_path}/{batch['scene_id']}/scene_rgb.ply",
verts=batch["input_xyz_pts"].cpu().numpy()[indices],
colors=batch["input_rgb_pts"][indices],
)
# color palette from https://sashamaps.net/docs/resources/20-colors/
twenty_color_palette = (
np.array(
[
[230, 25, 75],
[60, 180, 75],
[255, 225, 25],
[0, 130, 200],
[245, 130, 48],
[145, 30, 180],
[70, 240, 240],
[240, 50, 230],
[210, 245, 60],
[250, 190, 212],
[0, 128, 128],
[220, 190, 255],
[170, 110, 40],
[255, 250, 200],
[128, 0, 0],
[170, 255, 195],
[128, 128, 0],
[255, 215, 180],
[0, 0, 128],
[128, 128, 128],
[255, 255, 255],
[0, 0, 0],
]
)
/ 255
)
def render_animation(geometries, n_frames=220, point_size=6, **kwargs):
vis = o3d.visualization.Visualizer()
vis.create_window(width=1024, height=1024)
vis.get_render_option().point_size = point_size
for geom in geometries:
vis.add_geometry(geom)
images = []
with Progress() as progress:
render_task = progress.add_task("Rendering", total=n_frames)
for _ in range(n_frames):
ctr = vis.get_view_control()
ctr.rotate(10.0, 0.0)
vis.update_renderer()
img = np.asarray(vis.capture_screen_float_buffer(do_render=True))
images.append((img * 255).astype(np.uint8))
progress.update(render_task, advance=1)
vis.destroy_window()
return images
def generate_legend(legend):
f = lambda m, c: plt.plot([], [], marker=m, color=c, ls="none")[0]
handles = [f("s", c) for c in legend.values()]
legend = plt.legend(
handles, list(legend.keys()), loc=3, framealpha=0, frameon=False
)
fig = legend.figure
fig.canvas.draw()
bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
buf = io.BytesIO()
plt.savefig(buf, format="png", dpi=200, bbox_inches=bbox)
buf.seek(0)
img = np.array(Image.open(buf)).astype(np.uint8)
return img
@app.command()
def ovssc_visualize(output_path: str):
geometries = []
rotate = affines.compose(
T=[0, 0, 0], R=euler.euler2mat(-np.pi / 2, 0, 0), Z=[1, 1, 1]
)
legend = {}
for idx, path in enumerate(Path(output_path).rglob("*.obj")):
path = str(path)
mesh = o3d.io.read_triangle_mesh(path)
mesh = mesh.transform(rotate)
class_name = "\n".join(textwrap.wrap(path.split("/")[-1].split(".obj")[0], 30))
# color mesh
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(mesh.vertices)
pcd.paint_uniform_color(twenty_color_palette[idx % 20])
legend[class_name] = twenty_color_palette[idx % 20]
geometries.append(pcd)
output_path = f"{output_path}/completion.mp4"
legend_img = generate_legend(legend)[:, :, :3]
h, w, _ = legend_img.shape
mask = (legend_img != 255).any(axis=2)
with imageio.get_writer(output_path, fps=24) as writer:
for img in render_animation(geometries=geometries, point_size=4):
img[:h, :w, :][mask] = legend_img[mask]
writer.append_data(img)
print(output_path)
@app.command()
def vool_visualize(output_path: str):
pointclouds = {
str(path).split("/")[-1].split(".ply")[0]: o3d.io.read_point_cloud(str(path))
for path in Path(output_path).rglob("*.ply")
}
rotate = affines.compose(
T=[0, 0, 0], R=euler.euler2mat(-np.pi / 2, 0, 0), Z=[1, 1, 1]
)
scene = pointclouds["scene_rgb"].voxel_down_sample(voxel_size=0.03)
scene = scene.transform(rotate)
for desc, localization in pointclouds.items():
if desc == "scene_rgb":
continue
localization = localization.transform(rotate)
with imageio.get_writer(f"{output_path}/{desc}.mp4", fps=24) as writer:
for image in render_animation(geometries=[scene, localization]):
writer.append_data(image)
print(f"{output_path}/{desc}.mp4")
if __name__ == "__main__":
app()
"""
### scene_4_living-room-1.pkl (NO, VOOL messed up for some reason..., should look into this)
python visualize.py ovssc-inference matterport/scene_4_living-room-1.pkl models/ours/ovssc/ovssc.pth
python visualize.py ovssc-visualize visualization/scene_4_living-room-1
python visualize.py vool-inference matterport/scene_4_living-room-1.pkl models/ours/vool/vool.pth
python visualize.py vool-visualize visualization/scene_4_living-room-1
### scene_1_kitchen-5.pkl (YES)
python visualize.py ovssc-inference matterport/scene_1_kitchen-5.pkl models/ours/ovssc/ovssc.pth
python visualize.py ovssc-visualize visualization/scene_1_kitchen-5
python visualize.py vool-inference matterport/scene_1_kitchen-5.pkl models/ours/vool/vool.pth
python visualize.py vool-visualize visualization/scene_1_kitchen-5
### 00754-EqZacbtdApE_living-room-1 (YES)
python visualize.py ovssc-inference matterport/00754-EqZacbtdApE_living-room-1.pkl models/ours/ovssc/ovssc.pth
python visualize.py ovssc-visualize visualization/00754-EqZacbtdApE_living-room-1
python visualize.py vool-inference matterport/00754-EqZacbtdApE_living-room-1.pkl models/ours/vool/vool.pth
python visualize.py vool-visualize visualization/00754-EqZacbtdApE_living-room-1
scene_2_hallway-2 (YES)
310_kitchen-6 (BAD OVSSC)
scene_2_bedroom-8 (COMPLETION AND LOCALIZATION MESSED UP)
vn_poster (Good completion)
"""
| 23,057 | 35.084507 | 110 | py |
semantic-abstraction | semantic-abstraction-main/train_ovssc.py | import numpy as np
import torch
from torch.nn.functional import binary_cross_entropy_with_logits
from net import SemAbs3D, SemanticAwareOVSSC
import utils
import pandas as pd
from dataset import SceneCompletionDataset
from typing import Dict, Tuple, Union
def get_detailed_stats(
prediction,
gt_label,
xyz_pts,
patch_labels,
scene_ids,
scene_bounds,
ignore_pts,
detailed_analysis=False,
eval_device="cuda",
**kwargs,
):
num_scenes, num_patches = patch_labels.shape
retvals = {
"scene_id": np.array([[scene_id] * num_patches for scene_id in scene_ids])
.reshape(-1)
.tolist(),
"label": patch_labels.reshape(-1).tolist(),
}
retvals.update(
{
f"point_{k}": v
for k, v in utils.prediction_analysis(
prediction=prediction.to(eval_device),
label=gt_label.to(eval_device),
ignore=ignore_pts.to(eval_device),
).items()
}
)
voxelized_pts = utils.voxelize_points(
prediction=prediction,
label=gt_label,
xyz_pts=xyz_pts,
voxel_shape=(32, 32, 32),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts,
)
retvals.update(
{
"voxel32x32x32_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
if detailed_analysis:
voxelized_pts = utils.voxelize_points(
prediction=prediction,
label=gt_label,
xyz_pts=xyz_pts,
voxel_shape=(64, 64, 64),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts,
)
retvals.update(
{
"voxel64x64x64_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
for i, label in enumerate(patch_labels.reshape(-1).tolist()):
if label == "": # skip padding classes
for k in retvals.keys():
if "voxel" in k or "point" in k:
retvals[k][i] = np.NAN
return pd.DataFrame.from_dict(retvals)
def get_losses(
net,
batch: dict,
cutoffs=[0],
balance_positive_negative: bool = False,
**kwargs,
) -> Tuple[Dict[str, Union[float, torch.Tensor]], pd.DataFrame]:
stats = {}
num_pts = batch["output_xyz_pts"].shape[2]
if num_pts <= 500000:
outputs = net(**batch)
else:
num_patches = 1
# probably CUDA OOM
outputs = torch.cat(
[
net(
**{
**batch,
"input_feature_pts": batch["input_feature_pts"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
]
if batch["input_feature_pts"].shape[1]
== batch["output_xyz_pts"].shape[1]
else batch["input_feature_pts"],
"output_xyz_pts": batch["output_xyz_pts"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
],
"semantic_class_features": batch["semantic_class_features"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
],
}
)
for patch_i in range(len(batch["patch_labels"]) // num_patches + 1)
if np.prod(
batch["output_xyz_pts"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
].shape
)
> 0
],
dim=1,
)
batch["patch_labels"] = np.array(batch["patch_labels"]).T
padding_mask = torch.from_numpy(batch["patch_labels"] == "").bool()
batch["out_of_bounds_pts"] = batch["out_of_bounds_pts"].view(outputs.shape)
ignore_pts_mask = torch.zeros_like(outputs).bool()
# ignore all padding labels
ignore_pts_mask[padding_mask] = True
# ignore all points out of bounds
ignore_pts_mask = torch.logical_or(ignore_pts_mask, batch["out_of_bounds_pts"])
# don't eval on points outside of frustum
ignore_pts_mask = torch.logical_or(
ignore_pts_mask, batch["out_of_frustum_pts_mask"]
)
stats["loss"] = binary_cross_entropy_with_logits(
outputs[~ignore_pts_mask],
batch["output_label_pts"][~ignore_pts_mask],
weight=utils.get_bce_weight(
output_label_pts=batch["output_label_pts"],
balance_positive_negative=balance_positive_negative,
)[~ignore_pts_mask],
)
with torch.no_grad():
vision_accuracy_mask = (
(outputs > 0.0).long() == batch["output_label_pts"]
).float()
stats["accuracy"] = vision_accuracy_mask[~ignore_pts_mask].mean()
detailed_stats = [
get_detailed_stats(
prediction=outputs > cutoff,
gt_label=batch["output_label_pts"].bool(),
xyz_pts=batch["output_xyz_pts"],
ignore_pts=ignore_pts_mask,
patch_labels=batch["patch_labels"],
scene_ids=batch["scene_id"],
eval_device=net.device,
**kwargs,
)
for cutoff in cutoffs
]
for detailed_stat, cutoff in zip(detailed_stats, cutoffs):
detailed_stat["cutoff"] = [cutoff] * len(detailed_stat)
detailed_stats = pd.concat(detailed_stats)
for k in detailed_stats.columns:
if "iou" in k:
stats[k] = detailed_stats[k].mean()
return stats, detailed_stats
approach = {
"semantic_abstraction": SemAbs3D,
"semantic_aware": SemanticAwareOVSSC,
}
if __name__ == "__main__":
parser = utils.config_parser()
parser.add_argument("--log", type=str, required=True)
parser.add_argument(
"--approach", choices=approach.keys(), default="semantic_abstraction"
)
args = parser.parse_args()
if args.approach == "semantic_aware":
args.network_inputs = ["rgb"]
utils.train(
get_losses_fn=get_losses,
**utils.setup_experiment(
args=args,
ddp=len(args.gpus) > 1,
net_class=approach[args.approach],
dataset_class=SceneCompletionDataset,
split_file_path=args.file_path + "/ssc_split.pkl",
),
**vars(args),
)
| 6,693 | 32.808081 | 87 | py |
semantic-abstraction | semantic-abstraction-main/generate_thor_data.py | import logging
import re
from copy import deepcopy
import shutil
from argparse import ArgumentParser
from typing import List
import ray
from ai2thor.controller import Controller
from ai2thor.platform import CloudRendering
from matplotlib import pyplot as plt
import numpy as np
import torch
from transforms3d import affines, euler
from fusion import TSDFVolume, rigid_transform
from generate_relevancy import get_datastructure, init_dataset, resize_and_add_data
from net import VirtualGrid
import pickle
import os
from tqdm import tqdm
from point_cloud import filter_pts_bounds, get_pointcloud
from utils import write_to_hdf5
import h5py
from numba import njit, prange
fov_w = 80.0
width = 224 * 4
height = 224 * 4
num_output_pts = 1000000
scene_bounds = np.array([[-1, -1, -0.1], [1, 1, 1.9]])
focal_length = (width / 2) / np.tan((np.pi * fov_w / 180) / 2)
cam_intr = np.array(
[[focal_length, 0, height / 2], [0, focal_length, width / 2], [0, 0, 1]]
)
kitchens = [f"FloorPlan{i}_physics" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}_physics" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}_physics" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}_physics" for i in range(1, 31)]
test_scenes = kitchens[-5:] + living_rooms[-5:] + bedrooms[-5:] + bathrooms[-5:]
def parse_gt(scene_name: str, path_to_exported_scenes: str):
pickle_path = f"{path_to_exported_scenes}/{scene_name}.pkl"
scene_gt = None
if os.path.exists(pickle_path):
try:
scene_gt = pickle.load(open(pickle_path, "rb"))
except Exception as e:
logging.error(e)
logging.error(pickle_path)
# cache this pre-processing
if scene_gt is None:
labels = []
semantic = []
full_xyz_pts = np.array(
list(
map(
lambda l: list(map(float, l.rstrip().split("|"))),
open(
f"{path_to_exported_scenes}/{scene_name}/full_xyz_pts.txt"
).readlines(),
)
)
)
full_objid_pts = list(
map(
lambda l: l.rstrip(),
open(
f"{path_to_exported_scenes}/{scene_name}/full_objid_pts.txt"
).readlines(),
)
)
receptacle_infos = list(
map(
process_receptacle_line,
open(
f"{path_to_exported_scenes}/{scene_name}_receptacles.txt"
).readlines(),
)
)
receptacle_masks = {
receptacle_info["receptacle_name"]: check_inside_receptacle(
xyz_pts=full_xyz_pts, receptacle_info=receptacle_info
)
for receptacle_info in receptacle_infos
}
unique_obj_ids = list(set(full_objid_pts))
unique_labels = list(set(map(class_reduction_rule, unique_obj_ids)))
for objid in full_objid_pts:
label = class_reduction_rule(objid)
labels.append(label)
semantic.append(unique_labels.index(label))
semantic = np.array(semantic).astype(int)
scene_gt = {
"full_xyz_pts": full_xyz_pts,
"full_objid_pts": full_objid_pts,
"semantic": semantic,
"labels": labels,
"unique_labels": unique_labels,
"receptacle_masks": receptacle_masks,
}
pickle.dump(scene_gt, open(pickle_path, "wb"))
return scene_gt
def check_inside_receptacle(xyz_pts, receptacle_info):
local_pts = (
np.linalg.inv(receptacle_info["transform_matrix"])
@ np.concatenate((xyz_pts, np.ones(len(xyz_pts))[:, None]), axis=1).T
).T[:, :3]
# in and out
bbox = np.array(
[
-receptacle_info["bbox_size"] / 2,
receptacle_info["bbox_size"] / 2,
]
)
mask_pts = np.logical_and(
(local_pts >= bbox[0]).all(axis=-1), (local_pts <= bbox[1]).all(axis=-1)
)
return mask_pts
def process_receptacle_line(line):
receptacle_name, transform_matrix, bbox_size, bbox_center = (
line.rstrip().lstrip().split("|")
)
transform_matrix = np.array(
transform_matrix.replace(")(", ",").replace(")", "").replace("(", "").split(",")
).astype(float)
bbox_size = np.array(bbox_size[1 : len(bbox_size) - 1].split(",")).astype(float)
bbox_center = np.array(bbox_center[1 : len(bbox_center) - 1].split(",")).astype(
float
)
return {
"receptacle_name": receptacle_name,
"transform_matrix": transform_matrix.reshape(4, 4),
"bbox_size": bbox_size,
"bbox_center": bbox_center,
}
@njit(parallel=True)
def cam2pix(cam_pts, intr):
# from https://github.com/andyzeng/tsdf-fusion-python/blob/master/fusion.py#L181-L193
"""Convert camera coordinates to pixel coordinates."""
intr = intr.astype(np.float32)
fx, fy = intr[0, 0], intr[1, 1]
cx, cy = intr[0, 2], intr[1, 2]
pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
for i in prange(cam_pts.shape[0]):
pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
return pix
def xyz_pts_to_cam_pix(xyz_pts, cam_pose, cam_intr):
cam_pts = rigid_transform(xyz_pts, np.linalg.inv(cam_pose))
pix_z = cam_pts[:, 2]
pix = cam2pix(cam_pts, cam_intr)
pix_x, pix_y = pix[:, 0], pix[:, 1]
return pix_x, pix_y, pix_z
def get_all_relations(
scene_data,
receptacle_masks,
objects_info,
remapped_visible_obj_ids,
all_remapped_obj_ids,
visibility_pts_mask,
container_obj_classes={
"cabinet",
"fridge",
"drawer",
"bathtub basin",
"bowl",
"box",
"cup",
"desk",
"garbage can",
"laundry hamper",
"microwave",
"mug",
"pot",
"safe",
"sink basin",
"toaster",
},
no_localization_obj_classes={
"wall",
"ceiling",
"floor",
"empty",
"countertop",
"drawer",
"counter",
"banana",
},
direction_dot_threshold=0.6,
):
objects_in_scene = set(np.unique(scene_data["full_objid_pts"]))
descriptions = set()
unfiltered_descriptions = list()
def should_add_relation(target_obj_name, spatial_relation, reference_obj_name):
if target_obj_name == reference_obj_name:
# unhelpful
return False
if (
"ceiling" in reference_obj_name
or reference_obj_name
in {"floor", "rug", "baseboard", "light fixture", "decal"}
or target_obj_name
in {"floor", "rug", "baseboard", "light fixture", "decal"}
):
# people don't localize objects in reference to these objects
return False
if (
f"{target_obj_name} {spatial_relation} a {reference_obj_name}"
in descriptions
):
# duplicate
return False
if spatial_relation not in {"in", "on"} and (
(f"{target_obj_name} in a {reference_obj_name}" in descriptions)
or (f"{target_obj_name} on a {reference_obj_name}" in descriptions)
or (f"{reference_obj_name} on a {target_obj_name}" in descriptions)
or (f"{reference_obj_name} in a {target_obj_name}" in descriptions)
):
# if target obj is on or in reference obj, then it shouldn't also be
# left of, right of, behind, or in front of
return False
return True
retval = {
"target_obj_name": [],
"target_obj_material": [],
"target_obj_id": [],
"reference_obj_name": [],
"reference_obj_material": [],
"spatial_relation_name": [],
}
# map from object id to obj class name
for target_obj_id, obj_info in objects_info.items():
target_obj_name = " ".join(
map(lambda c: c.lower(), camel_case_split(obj_info["objectType"]))
)
if obj_info["parentReceptacles"] is not None:
for reference_obj_id in obj_info["parentReceptacles"]:
if reference_obj_id not in remapped_visible_obj_ids.keys():
# parent obj not visible
continue
if target_obj_id not in all_remapped_obj_ids:
logging.warning(
target_obj_id + " not in mapped objids " + reference_obj_id
)
continue
if (
all_remapped_obj_ids[target_obj_id] not in objects_in_scene
or all_remapped_obj_ids[reference_obj_id] not in objects_in_scene
):
# target or reference object doesn't even appear in scene bounds
continue
parent_obj_info = objects_info[reference_obj_id]
if parent_obj_info["objectType"] == "Floor":
continue
reference_obj_name = " ".join(
map(
lambda c: c.lower(),
camel_case_split(parent_obj_info["objectType"]),
)
)
spatial_relation_name = (
"in" if reference_obj_name in container_obj_classes else "on"
)
unfiltered_descriptions.append(
f"{target_obj_name} {spatial_relation_name} a {reference_obj_name}"
)
if should_add_relation(
target_obj_name=target_obj_name,
spatial_relation=spatial_relation_name,
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} {spatial_relation_name} a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_id"].append(all_remapped_obj_ids[target_obj_id])
retval["target_obj_material"].append(
"|".join(obj_info["salientMaterials"])
if obj_info["salientMaterials"] is not None
else ""
)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(
"|".join(parent_obj_info["salientMaterials"])
if parent_obj_info["salientMaterials"] is not None
else ""
)
retval["spatial_relation_name"].append(spatial_relation_name)
target_obj_is_visible = (
target_obj_id in remapped_visible_obj_ids.keys()
)
if not target_obj_is_visible:
# if target obj not visible then should
# supervise entire region
matching_receptacle_masks = {
rk: rv
for rk, rv in receptacle_masks.items()
if " ".join(
map(
lambda c: c.lower(),
camel_case_split(rk.split("_")[0]),
)
)
== retval["reference_obj_name"][-1]
}
if len(matching_receptacle_masks) == 0:
continue
receptacle_mask = np.logical_or.reduce(
tuple(
receptacle_mask["mask"]
for receptacle_mask in matching_receptacle_masks.values()
)
)
scene_data["full_objid_pts"][
:, np.logical_and(receptacle_mask, ~visibility_pts_mask)
] = all_remapped_obj_ids[target_obj_id]
# augment with inside relation
if target_obj_name in container_obj_classes:
container_name = target_obj_name
container_obj_id = target_obj_id
if container_obj_id not in remapped_visible_obj_ids.keys():
continue
matching_receptacle_masks = {
rk: rv
for rk, rv in receptacle_masks.items()
if " ".join(
map(lambda c: c.lower(), camel_case_split(rk.split("_")[0]))
)
== container_name
}
if len(matching_receptacle_masks) == 0:
continue
description = f"banana in a {container_name}"
unfiltered_descriptions.append(description)
if should_add_relation(
target_obj_name="banana",
spatial_relation="in",
reference_obj_name=container_name,
):
descriptions.add(description)
receptacle_mask = np.logical_or.reduce(
tuple(
receptacle_mask["mask"]
for receptacle_mask in matching_receptacle_masks.values()
)
)
hidden_obj_id = len(scene_data["objid_to_class"])
retval["reference_obj_name"].append(container_name)
retval["reference_obj_material"].append(
"|".join(obj_info["salientMaterials"])
if obj_info["salientMaterials"] is not None
else ""
)
hidden_obj_name = "banana"
retval["target_obj_name"].append(hidden_obj_name)
retval["target_obj_id"].append(hidden_obj_id)
retval["target_obj_material"].append("")
retval["spatial_relation_name"].append("in")
scene_data["objid_to_class"] = np.array(
scene_data["objid_to_class"].astype(str).tolist()
+ [f"banana[{hidden_obj_id}]"]
).astype("S")
scene_data["full_objid_pts"][
:, np.logical_and(receptacle_mask, ~visibility_pts_mask)
] = hidden_obj_id
# FIND ALL SPATIAL RELATIONS IN SCENE
for reference_obj_key, reference_obj_id in remapped_visible_obj_ids.items():
for target_obj_id in set(scene_data["full_objid_pts"][0]):
target_obj_name = (
scene_data["objid_to_class"][target_obj_id]
.decode("utf-8")
.split("[")[0]
)
reference_obj_name = (
scene_data["objid_to_class"][reference_obj_id]
.decode("utf-8")
.split("[")[0]
)
if reference_obj_id == target_obj_id:
continue
if (
target_obj_name in no_localization_obj_classes
or reference_obj_name in no_localization_obj_classes
):
continue
target_obj_mask = scene_data["full_objid_pts"][0] == target_obj_id
target_obj_xyz_pts = scene_data["full_xyz_pts"][0][target_obj_mask, :]
reference_obj_mask = scene_data["full_objid_pts"][0] == reference_obj_id
if not reference_obj_mask.any() or not target_obj_mask.any():
continue
reference_obj_xyz_pts = scene_data["full_xyz_pts"][0][reference_obj_mask, :]
displacement = reference_obj_xyz_pts.mean(axis=0) - target_obj_xyz_pts.mean(
axis=0
)
distance = np.linalg.norm(displacement)
direction = displacement / distance
reference_obj_bounds = reference_obj_xyz_pts.max(
axis=0
) - reference_obj_xyz_pts.min(axis=0)
distance_threshold = min(
max(max(reference_obj_bounds[0], reference_obj_bounds[1]) * 2.0, 0.1),
1.0,
)
if distance > distance_threshold:
# too far away, probably not an actual spatial relation
continue
reference_material = (
"|".join(objects_info[reference_obj_key]["salientMaterials"])
if reference_obj_key in objects_info
and objects_info[reference_obj_key]["salientMaterials"] is not None
else ""
)
target_obj_is_visible = target_obj_id in scene_data["seg"]
unfiltered_descriptions.append(
f"{target_obj_name} behind a {reference_obj_name}"
)
if np.dot(
direction, [-1, 0, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="behind",
reference_obj_name=reference_obj_name,
):
descriptions.add(f"{target_obj_name} behind a {reference_obj_name}")
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("behind")
if not target_obj_is_visible:
empty_id = list(
map(
lambda c: c.split("[")[0],
scene_data["objid_to_class"].astype(str),
)
).index("empty")
empty_mask = scene_data["full_objid_pts"][0] == empty_id
reference_class_mask_pts = np.logical_or.reduce(
tuple(
scene_data["full_objid_pts"][0] == objid
for objid, objclass in enumerate(
scene_data["objid_to_class"].astype(str)
)
if objclass.split("[")[0] == reference_obj_name
)
)
im_h, im_w = scene_data["depth"][0].shape
resize_scale = 10
pix_x, pix_y, pix_z = xyz_pts_to_cam_pix(
xyz_pts=scene_data["full_xyz_pts"][0],
cam_pose=scene_data["cam_pose"],
cam_intr=scene_data["cam_intr"],
)
# effectively resize
ref_pix_x, ref_pix_y, ref_pix_z = xyz_pts_to_cam_pix(
xyz_pts=scene_data["full_xyz_pts"][0][
reference_class_mask_pts, :
],
cam_pose=scene_data["cam_pose"],
cam_intr=scene_data["cam_intr"],
)
full_pix_xy = np.stack((pix_x, pix_y), axis=1)
corner = full_pix_xy.min(axis=0)
full_pix_xy -= corner
ref_pix_xy = np.stack((ref_pix_x, ref_pix_y), axis=1)
ref_pix_xy -= corner
full_pix_xy[:, 0] = np.digitize(
full_pix_xy[:, 0], bins=np.arange(0, im_w, resize_scale)
)
full_pix_xy[:, 1] = np.digitize(
full_pix_xy[:, 1], bins=np.arange(0, im_h, resize_scale)
)
ref_pix_xy[:, 0] = np.digitize(
ref_pix_xy[:, 0], bins=np.arange(0, im_w, resize_scale)
)
ref_pix_xy[:, 1] = np.digitize(
ref_pix_xy[:, 1], bins=np.arange(0, im_h, resize_scale)
)
ref_backsize = -np.ones(
(full_pix_xy[:, 0].max() + 1, full_pix_xy[:, 1].max() + 1)
).astype(float)
# get back side of object in each pixel
for pix_xy in np.unique(ref_pix_xy, axis=0):
mask = (ref_pix_xy == pix_xy).all(axis=1)
ref_backsize[pix_xy[0], pix_xy[1]] = ref_pix_z[mask].max()
accessed_depth = ref_backsize[full_pix_xy[:, 0], full_pix_xy[:, 1]]
behind_mask = np.logical_and(
accessed_depth < pix_z, accessed_depth != -1
)
target_obj_mask = np.logical_and.reduce(
(behind_mask, ~visibility_pts_mask, empty_mask)
)
scene_data["full_objid_pts"][:, target_obj_mask] = target_obj_id
# some objects shouldn't allow behind
if reference_obj_name in {"cabinet"}:
continue
# if in front of, left of, or right of, then target object
# should be visible
if target_obj_id not in remapped_visible_obj_ids.values():
continue
if np.dot(direction, [0, 1, 0]) > direction_dot_threshold:
unfiltered_descriptions.append(
f"{target_obj_name} on the right of a {reference_obj_name}"
)
elif np.dot(direction, [0, -1, 0]) > direction_dot_threshold:
unfiltered_descriptions.append(
f"{target_obj_name} on the left of a {reference_obj_name}"
)
elif np.dot(direction, [1, 0, 0]) > direction_dot_threshold:
unfiltered_descriptions.append(
f"{target_obj_name} in front of a {reference_obj_name}"
)
if np.dot(
direction, [0, 1, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="on the right of",
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} on the right of a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("on the right of")
elif np.dot(
direction, [0, -1, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="on the left of",
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} on the left of a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("on the left of")
elif np.dot(
direction, [1, 0, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="in front of",
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} in front of a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("in front of")
return retval
def camel_case_split(str):
return re.findall(r"[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))", str)
def class_reduction_rule(raw_class_name):
if "FP326:PS_326_" in raw_class_name:
raw_class_name = raw_class_name.split("FP326:PS_326_")[1]
class_name = (
raw_class_name.split("_")[0]
.split("Height")[0]
.split("Standard")[-1]
.split("|")[0]
.split("Size")[0]
.split("Done")[0]
)
if class_name.upper() == class_name:
return class_name
if len(camel_case_split(class_name)):
class_name = " ".join(c.lower() for c in camel_case_split(class_name))
class_name = "".join(class_name.split("mesh")).rstrip().lstrip()
if "f " == class_name[:2]:
class_name = class_name[2:]
if "ladel" in class_name or "ladle" in class_name:
return "ladle"
if class_name == "towl":
return "towel"
if class_name == "plate stack":
return "plate"
if (
"deco" in class_name
and "decor" not in class_name
and "decorative" not in class_name
and "decoration" not in class_name
):
class_name = class_name.replace("deco", "decoration")
elif (
"decor" in class_name
and "decorative" not in class_name
and "decoration" not in class_name
):
class_name = class_name.replace("decor", "decoration")
class_name = class_name.replace("counter top", "countertop")
class_name = class_name.replace("fire place", "fireplace")
class_name = class_name.replace("base board", "baseboard")
class_name = class_name.replace("dish washer", "dishwasher")
class_name = class_name.replace("dish washer", "dishwasher")
class_name = class_name.replace("dish washer", "dishwasher")
class_name = class_name.replace("bath tub", "bathtub")
class_name = class_name.replace("base board", "baseboard")
if "book" == class_name or "book stack" == class_name:
return "book"
if "rug" == class_name[-3:]:
return "rug"
if (
class_name[-len("bottles") :] == "bottles"
or class_name[-len("wires") :] == "wires"
or class_name[-len("windows") :] == "windows"
or class_name[-len("pans") :] == "pans"
or class_name[-len("decals") :] == "decals"
or class_name[-len("cups") :] == "cups"
or class_name[-len("walls") :] == "walls"
or class_name[-len("rods") :] == "rods"
or class_name[-len("cans") :] == "cans"
or class_name[-len("lights") :] == "lights"
):
return class_name[:-1]
if class_name[-len("glasses") :] == "glasses":
return class_name[:-2]
if "cloth" in class_name:
return "cloth"
if "island" in class_name:
return "kitchen island"
if "ceiling" in class_name:
return class_name
if "cabinet" in class_name:
return "cabinet"
if "fridge" in class_name:
return "fridge"
if "shelf" in class_name or "shelving" in class_name or "shelves" in class_name:
return "shelf"
if "knife" in class_name:
return "knife"
if "stove" in class_name:
return "stove"
if "wall" in class_name:
return "wall"
if "window" in class_name:
return "window"
if "door" in class_name:
return "door"
return class_name
def process_class_name(c):
return c.split("|")[0].split(" ")[0]
def run_simulator(
scene_id: str,
domain_randomization: bool,
np_rand: np.random.RandomState,
num_attempts: int = 10,
dist: float = 3.0,
debug: bool = False,
):
controller = None
try:
controller = Controller(
agentMode="default",
visibilityDistance=1.5,
scene=scene_id,
# step sizes
gridSize=0.05,
snapToGrid=False,
rotateStepDegrees=5,
# image modalities
renderDepthImage=True,
renderInstanceSegmentation=True,
# camera properties
width=width,
height=height,
fieldOfView=fov_w,
# render headless
platform=CloudRendering,
)
except Exception as e:
logging.error(e)
if controller is not None:
controller.stop()
return
datapoint = None
reachable_positions = controller.step(action="GetReachablePositions").metadata[
"actionReturn"
]
for _ in range(num_attempts):
sampled_position = np_rand.choice(reachable_positions)
sampled_rotation = dict(x=0, y=np_rand.uniform(0, 360), z=0)
try:
event = controller.step(
action="Teleport",
position=sampled_position,
rotation=sampled_rotation,
horizon=0,
standing=True,
)
except Exception as e:
logging.error(e)
controller.stop()
return
classes = list(set(map(process_class_name, event.color_to_object_id.values())))
semantic_img = np.zeros(event.instance_segmentation_frame.shape[:2]).astype(int)
for color, objname in event.color_to_object_id.items():
objname = process_class_name(objname)
obj_mask = (event.instance_segmentation_frame == color).all(axis=-1)
semantic_img[obj_mask] = classes.index(objname)
# reflective surfaces in Unity shows depth of reflection probe
reflective_surface_mask = event.depth_frame > 10.0
depth = deepcopy(event.depth_frame)
depth[reflective_surface_mask] = np.interp(
np.flatnonzero(reflective_surface_mask),
np.flatnonzero(~reflective_surface_mask),
depth[~reflective_surface_mask],
)
if "Wall" in classes and (semantic_img == classes.index("Wall")).mean() > 0.8:
continue
# ideally most objects are between 1.5 and 3.5 meters away
pixel_in_good_range = np.logical_and(
depth < dist + 1.0,
depth > dist - 1.0,
)
if len(np.unique(semantic_img)) < 4:
if debug:
plt.imshow(semantic_img)
plt.show()
logging.debug("not enough interesting objects")
continue
if pixel_in_good_range.mean() < 0.2:
if debug:
logging.debug("not enough pixels in good range")
fig, axes = plt.subplots(1, 3)
axes[0].axis("off")
axes[1].axis("off")
axes[2].axis("off")
axes[0].imshow(depth)
axes[1].imshow(pixel_in_good_range.astype(int))
axes[2].imshow(event.frame)
plt.show()
continue
domain_randomized_rgb = np.zeros(1)
if domain_randomization:
controller.step(action="RandomizeMaterials")
domain_randomized_rgb = controller.step(action="RandomizeMaterials").frame
controller.stop()
datapoint = {
"scene_id": scene_id,
"rgb": deepcopy(event.frame),
"depth": depth,
"instance": deepcopy(event.instance_segmentation_frame),
"color_to_object_id": deepcopy(event.color_to_object_id),
"semantic": semantic_img,
"classes": classes,
"position": list(event.metadata["agent"]["position"].values()),
"camera_horizon": event.metadata["agent"]["cameraHorizon"],
"rotation": list(event.metadata["agent"]["rotation"].values()),
"objects_info": event.metadata["objects"],
"sampled_position": sampled_position,
"sampled_rotation": sampled_rotation,
"domain_randomized_rgb": domain_randomized_rgb,
}
break
if datapoint is None:
controller.stop()
logging.debug("attempts ran out")
return
return datapoint
def scene_data_from_thor_datapoint(
np_rand,
datapoint: dict,
dist: float,
path_to_exported_scenes: str,
debug: bool = False,
):
cam_pose = affines.compose(
T=datapoint["position"],
R=euler.euler2mat(
datapoint["rotation"][2] * np.pi / 180,
datapoint["rotation"][1] * np.pi / 180,
datapoint["rotation"][0] * np.pi / 180,
),
Z=np.ones(3),
)
xyz_pts, rgb_pts = get_pointcloud(
depth_img=datapoint["depth"],
color_img=datapoint["rgb"],
cam_intr=cam_intr,
cam_pose=cam_pose,
)
# compute transform to align ground truth with view
transform = (
affines.compose(T=[0, 0, 2], R=euler.euler2mat(0, 0, 0), Z=np.array([1, 1, 1]))
@ affines.compose(
T=[0, 0, 0], R=euler.euler2mat(0, 0, 0), Z=np.array([1, 1, -1])
)
@ affines.compose(
T=[0, 0, 0], R=euler.euler2mat(np.pi / 2, 0, 0), Z=np.ones(3) * 0.6
)
@ affines.compose(T=[0, 0, 0], R=euler.euler2mat(0, np.pi, 0), Z=np.ones(3))
@ affines.compose(
T=[dist - 0.5, 2.0, 0], R=euler.euler2mat(0, np.pi / 2, 0), Z=np.ones(3)
)
@ affines.compose(
T=[0, 0, 0], R=euler.euler2mat(0, -np.pi, -np.pi), Z=np.ones(3)
)
@ np.linalg.inv(cam_pose)
)
scene_gt = parse_gt(
scene_name=datapoint["scene_id"],
path_to_exported_scenes=path_to_exported_scenes,
)
full_xyz_pts = scene_gt["full_xyz_pts"]
remapped_full_objid_pts = scene_gt["full_objid_pts"]
full_objid_unique = scene_gt["objids"]
objid_to_class = scene_gt["objid_to_class"]
receptacle_masks = scene_gt["receptacle_masks"]
original_xyz_pts = full_xyz_pts.copy()
full_xyz_pts = (
transform
@ np.concatenate(
(original_xyz_pts, np.ones(len(original_xyz_pts))[:, None]), axis=1
).T
).T[:, :3]
if debug:
from plot_utils import plot_pointcloud
mask = filter_pts_bounds(xyz=full_xyz_pts, bounds=scene_bounds)
fig, ax = plt.subplots(1)
ax.imshow(datapoint["rgb"])
plot_pointcloud(
xyz=full_xyz_pts[mask],
features=remapped_full_objid_pts[mask],
object_labels=np.array(objid_to_class),
show_plot=False,
delete_fig=False,
)
xyz_pts, rgb_pts = get_pointcloud(
depth_img=datapoint["depth"],
color_img=datapoint["rgb"],
cam_intr=cam_intr,
cam_pose=transform @ cam_pose,
)
plot_pointcloud(
xyz=xyz_pts,
features=rgb_pts,
show_plot=True,
)
plt.show()
# process instance
remapped_seg = -np.ones(datapoint["instance"].shape[:2]).astype(int)
objects_in_view = {
color: instance_key
for color, instance_key in datapoint["color_to_object_id"].items()
if (datapoint["instance"] == color).all(axis=-1).any()
}
remapped_visible_obj_ids = dict()
for obj_color, instance_key in objects_in_view.items():
obj_mask = (datapoint["instance"] == obj_color).all(axis=-1)
if instance_key in full_objid_unique:
remapped_objid = full_objid_unique.index(instance_key)
else:
# project out to 3D, then find class in gt which is spatially closest
# to projected mask
xyz_pts, _ = get_pointcloud(
depth_img=datapoint["depth"],
color_img=None,
cam_intr=cam_intr,
cam_pose=transform @ cam_pose,
)
partial_obj_xyz_pts = xyz_pts[obj_mask.reshape(-1), :]
partial_to_full_distances = dict()
for int_obj_id, gt_obj_id in enumerate(full_objid_unique):
if gt_obj_id == "empty":
continue
gt_obj_mask = remapped_full_objid_pts == int_obj_id
full_obj_xyz_pts = full_xyz_pts[gt_obj_mask, :]
if len(full_obj_xyz_pts) == 0:
continue
elif len(full_obj_xyz_pts) > 100:
full_obj_xyz_pts = full_obj_xyz_pts[
np_rand.choice(len(full_obj_xyz_pts), 100, replace=False), :
]
distances = (
(full_obj_xyz_pts[None, ...] - partial_obj_xyz_pts[:, None, ...])
** 2
).sum(axis=2)
all_distances = distances.min(axis=1).sum(axis=0)
partial_to_full_distances[gt_obj_id] = all_distances
gt_obj_id = min(partial_to_full_distances.items(), key=lambda v: v[1])[0]
remapped_objid = full_objid_unique.index(gt_obj_id)
remapped_visible_obj_ids[instance_key] = remapped_objid
remapped_seg[obj_mask] = remapped_objid
mask = filter_pts_bounds(xyz=full_xyz_pts, bounds=scene_bounds)
full_xyz_pts = full_xyz_pts[mask, :]
remapped_full_objid_pts = remapped_full_objid_pts[mask]
logging.debug(f"NUM PTS: { len(full_xyz_pts)}")
try:
indices = np_rand.choice(len(full_xyz_pts), size=num_output_pts, replace=False)
except Exception as e:
logging.error("Not enough points")
logging.error(e)
return
remapped_obj_ids = deepcopy(remapped_visible_obj_ids)
for remapped_id, objid in enumerate(full_objid_unique):
if objid not in remapped_obj_ids:
remapped_obj_ids[objid] = remapped_id
vox_size = 64
tsdf_vol = TSDFVolume(vol_bnds=scene_bounds.T, voxel_size=2.0 / vox_size)
tsdf_vol.integrate(
color_im=datapoint["rgb"],
depth_im=datapoint["depth"],
cam_intr=cam_intr,
cam_pose=transform @ cam_pose,
)
tsdf_xyz_pts = tsdf_vol.vox2world(
tsdf_vol._vol_origin, tsdf_vol.vox_coords, tsdf_vol._voxel_size
)
tsdf_value_pts = tsdf_vol.get_volume()[0].reshape(-1)
for objid in range(len(objid_to_class)):
objid_to_class[objid] = objid_to_class[objid] + f"[{objid}]"
scene_data = {
"rgb": datapoint["rgb"][None, ...],
"domain_randomized_rgb": datapoint["domain_randomized_rgb"][None, ...],
"depth": datapoint["depth"][None, ...],
"seg": remapped_seg[None, ...],
"cam_intr": cam_intr,
"cam_pose": transform @ cam_pose,
"scene_bounds": scene_bounds,
"tsdf_value_pts": tsdf_value_pts[None, ...],
"tsdf_xyz_pts": tsdf_xyz_pts[None, ...],
"full_xyz_pts": full_xyz_pts[indices, :][None, ...],
"full_objid_pts": remapped_full_objid_pts[indices][None, ...],
"objid_to_class": np.array(objid_to_class).astype("S"),
}
vg = VirtualGrid(
scene_bounds=scene_bounds, grid_shape=tuple([vox_size] * 3), batch_size=1
)
query_points = torch.from_numpy(scene_data["full_xyz_pts"])
grid_indices = (
vg.get_points_grid_idxs(query_points, cast_to_int=True)[0].cpu().numpy()
)
tsdf_vol = tsdf_vol.get_volume()[0]
visibility_pts_mask = (
tsdf_vol[grid_indices[:, 0], grid_indices[:, 1], grid_indices[:, 2]] > 0.0
)
scene_data["descriptions"] = get_all_relations(
scene_data=scene_data,
receptacle_masks={
receptacle_name: {
"mask": receptacle_mask[mask][indices],
"xyz_pts": original_xyz_pts[receptacle_mask],
}
for receptacle_name, receptacle_mask in receptacle_masks.items()
},
objects_info={
obj_info["objectId"]: obj_info for obj_info in datapoint["objects_info"]
},
remapped_visible_obj_ids=remapped_visible_obj_ids,
all_remapped_obj_ids=remapped_obj_ids,
visibility_pts_mask=visibility_pts_mask,
)
return scene_data
@ray.remote(num_cpus=1, num_gpus=0.05)
def generate_datapoint(
scene_ids,
dataset_dir_path: str,
seed: int,
path_to_exported_scenes: str,
dist: float = 3.0,
**kwargs,
):
np_rand = np.random.RandomState(seed=seed)
scene_id = np_rand.choice(scene_ids)
output_path = f"{dataset_dir_path}/{seed:05d}|{scene_id}.hdf5"
if os.path.exists(output_path):
return
domain_randomization = scene_id in test_scenes
datapoint = run_simulator(
scene_id=scene_id,
dist=dist,
np_rand=np_rand,
domain_randomization=domain_randomization,
**kwargs,
)
if datapoint is None:
return
scene_data = scene_data_from_thor_datapoint(
datapoint=datapoint,
dist=dist,
np_rand=np_rand,
path_to_exported_scenes=path_to_exported_scenes,
)
if scene_data is None:
return
init_dataset(output_path, data_structure=data_structure)
with h5py.File(output_path, "a") as file:
group = file.create_group(f"data")
for key, value in scene_data.items():
if key in data_structure.keys():
region_references = resize_and_add_data(dataset=file[key], data=value)
write_to_hdf5(group, key, region_references, dtype=h5py.regionref_dtype)
else:
write_to_hdf5(group, key, value)
def generate_gt_scenes(
scene_ids: List[str], path_to_exported_scenes: str, path_to_custom_unity: str
):
np.random.shuffle(scene_ids)
for scene_id in scene_ids:
if os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_xyz_pts.txt"
) and os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_objid_pts.txt"
):
continue
controller = None
try:
controller = Controller(
local_executable_path=path_to_custom_unity,
agentMode="default",
visibilityDistance=1.5,
scene=scene_id,
# step sizes
gridSize=0.25,
snapToGrid=True,
rotateStepDegrees=90,
# image modalities
renderDepthImage=True,
renderInstanceSegmentation=True,
# camera properties
width=width,
height=height,
fieldOfView=fov_w,
# render headless
platform=CloudRendering,
)
except Exception as e:
logging.error(e)
finally:
if controller is not None:
controller.stop()
exit()
def generate_datapoints(
dataset_dir_path: str,
path_to_custom_unity: str,
path_to_exported_scenes: str,
num_processes: int,
num_pts: int,
start_seed: int,
local: bool,
):
ray.init(
log_to_driver=True,
local_mode=local,
)
tasks = []
scene_ids = sorted(kitchens + living_rooms + bathrooms + bedrooms)
not_gt_scene_ids = list(
filter(
lambda scene_id: not (
os.path.exists(f"{path_to_exported_scenes}/{scene_id}/full_xyz_pts.txt")
and os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_objid_pts.txt"
)
),
scene_ids,
)
)
logging.info("scenes without gts: " + ", ".join(not_gt_scene_ids))
if (
len(not_gt_scene_ids) > 0
and input(f"There are {len(not_gt_scene_ids)} scenes without gt. Generate?")
== "y"
):
generate_gt_scenes(
not_gt_scene_ids, path_to_exported_scenes, path_to_custom_unity
)
scene_ids = list(
filter(
lambda scene_id: (
os.path.exists(f"{path_to_exported_scenes}/{scene_id}/full_xyz_pts.txt")
and os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_objid_pts.txt"
)
),
scene_ids,
)
)
seed = start_seed
tasks = [
generate_datapoint.remote(
scene_ids=scene_ids,
dataset_dir_path=dataset_dir_path,
path_to_exported_scenes=path_to_exported_scenes,
seed=seed + i,
)
for i in range(num_processes)
]
seed += num_processes
pbar = tqdm(total=num_pts, smoothing=0.001)
offset = 0
while seed < start_seed + num_pts:
readies, tasks = ray.wait(tasks, num_returns=1)
pbar.update((seed - start_seed) - pbar.n)
offset += len(readies)
tasks.extend(
[
generate_datapoint.remote(
scene_ids=scene_ids,
dataset_dir_path=dataset_dir_path,
path_to_exported_scenes=path_to_exported_scenes,
seed=seed + i,
)
for i in range(len(readies))
]
)
seed += len(readies)
pbar.set_description(f"CURR SEED: {seed:06d}")
try:
ray.get(readies)
except Exception as e:
logging.error(e)
pass
data_structure = get_datastructure(
image_shape=(width, height),
relevancy_shape=(128, 128),
clip_hidden_dim=512,
tsdf_dim=(64, 64, 64),
num_output_pts=num_output_pts,
)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--dataset_dir_path", type=str, required=True)
parser.add_argument("--num_processes", type=int, default=1)
parser.add_argument("--num_pts", type=int, default=50000)
parser.add_argument("--start_seed", type=int, default=0)
parser.add_argument("--local", action="store_true", default=False)
parser.add_argument("--path_to_custom_unity", type=str)
parser.add_argument("--path_to_exported_scenes", type=str)
args = parser.parse_args()
if os.path.exists(args.dataset_dir_path) and (
input(f"{args.dataset_dir_path} exists. replace?") == "y"
):
shutil.rmtree(args.dataset_dir_path)
os.mkdir(args.dataset_dir_path)
elif not os.path.exists(args.dataset_dir_path):
os.mkdir(args.dataset_dir_path)
data = generate_datapoints(**vars(args))
| 46,662 | 37.405761 | 91 | py |
semantic-abstraction | semantic-abstraction-main/arm/utils.py | # Adapted from: https://github.com/stepjam/ARM/blob/main/arm/utils.py
import torch
import numpy as np
from scipy.spatial.transform import Rotation
import pyrender
import trimesh
from pyrender.trackball import Trackball
def normalize_quaternion(quat):
return np.array(quat) / np.linalg.norm(quat, axis=-1, keepdims=True)
def quaternion_to_discrete_euler(quaternion, resolution):
euler = Rotation.from_quat(quaternion).as_euler("xyz", degrees=True) + 180
assert np.min(euler) >= 0 and np.max(euler) <= 360
disc = np.around((euler / resolution)).astype(int)
disc[disc == int(360 / resolution)] = 0
return disc
def discrete_euler_to_quaternion(discrete_euler, resolution):
euluer = (discrete_euler * resolution) - 180
return Rotation.from_euler("xyz", euluer, degrees=True).as_quat()
def point_to_voxel_index(
point: np.ndarray, voxel_size: np.ndarray, coord_bounds: np.ndarray
):
bb_mins = np.array(coord_bounds[0:3])
bb_maxs = np.array(coord_bounds[3:])
dims_m_one = np.array([voxel_size] * 3) - 1
bb_ranges = bb_maxs - bb_mins
res = bb_ranges / (np.array([voxel_size] * 3) + 1e-12)
voxel_indicy = np.minimum(
np.floor((point - bb_mins) / (res + 1e-12)).astype(np.int32), dims_m_one
)
return voxel_indicy
def stack_on_channel(x):
# expect (B, T, C, ...)
return torch.cat(torch.split(x, 1, dim=1), dim=2).squeeze(1)
def _compute_initial_camera_pose(scene):
# Adapted from:
# https://github.com/mmatl/pyrender/blob/master/pyrender/viewer.py#L1032
centroid = scene.centroid
scale = scene.scale
# if scale == 0.0:
# scale = DEFAULT_SCENE_SCALE
scale = 4.0
s2 = 1.0 / np.sqrt(2.0)
cp = np.eye(4)
cp[:3, :3] = np.array([[0.0, -s2, s2], [1.0, 0.0, 0.0], [0.0, s2, s2]])
hfov = np.pi / 6.0
dist = scale / (2.0 * np.tan(hfov))
cp[:3, 3] = dist * np.array([1.0, 0.0, 1.0]) + centroid
return cp
def _from_trimesh_scene(trimesh_scene, bg_color=None, ambient_light=None):
# convert trimesh geometries to pyrender geometries
geometries = {
name: pyrender.Mesh.from_trimesh(geom, smooth=False)
for name, geom in trimesh_scene.geometry.items()
}
# create the pyrender scene object
scene_pr = pyrender.Scene(bg_color=bg_color, ambient_light=ambient_light)
# add every node with geometry to the pyrender scene
for node in trimesh_scene.graph.nodes_geometry:
pose, geom_name = trimesh_scene.graph[node]
scene_pr.add(geometries[geom_name], pose=pose)
return scene_pr
def create_voxel_scene(
voxel_grid: np.ndarray,
q_attention: np.ndarray = None,
highlight_coordinate: np.ndarray = None,
highlight_gt_coordinate: np.ndarray = None,
highlight_alpha: float = 1.0,
voxel_size: float = 0.1,
show_bb: bool = False,
alpha: float = 0.5,
):
_, d, h, w = voxel_grid.shape
v = voxel_grid.transpose((1, 2, 3, 0))
occupancy = v[:, :, :, -1] != 0
alpha = np.expand_dims(np.full_like(occupancy, alpha, dtype=np.float32), -1)
rgb = np.concatenate([(v[:, :, :, 3:6] + 1) / 2.0, alpha], axis=-1)
if q_attention is not None:
q = np.max(q_attention, 0)
q = q / np.max(q)
show_q = q > 0.75
occupancy = (show_q + occupancy).astype(bool)
q = np.expand_dims(q - 0.5, -1) # Max q can be is 0.9
q_rgb = np.concatenate(
[q, np.zeros_like(q), np.zeros_like(q), np.clip(q, 0, 1)], axis=-1
)
rgb = np.where(np.expand_dims(show_q, -1), q_rgb, rgb)
if highlight_coordinate is not None:
x, y, z = highlight_coordinate
occupancy[x, y, z] = True
rgb[x, y, z] = [1.0, 0.0, 0.0, highlight_alpha]
if highlight_gt_coordinate is not None:
x, y, z = highlight_gt_coordinate
occupancy[x, y, z] = True
rgb[x, y, z] = [0.0, 0.0, 1.0, highlight_alpha]
transform = trimesh.transformations.scale_and_translate(
scale=voxel_size, translate=(0.0, 0.0, 0.0)
)
trimesh_voxel_grid = trimesh.voxel.VoxelGrid(
encoding=occupancy, transform=transform
)
geometry = trimesh_voxel_grid.as_boxes(colors=rgb)
scene = trimesh.Scene()
scene.add_geometry(geometry)
if show_bb:
assert d == h == w
_create_bounding_box(scene, voxel_size, d)
return scene
def visualise_voxel(
voxel_grid: np.ndarray,
q_attention: np.ndarray = None,
highlight_coordinate: np.ndarray = None,
highlight_gt_coordinate: np.ndarray = None,
highlight_alpha: float = 1.0,
rotation_amount: float = 0.0,
show: bool = False,
voxel_size: float = 0.1,
offscreen_renderer: pyrender.OffscreenRenderer = None,
show_bb: bool = False,
alpha: float = 0.5,
render_gripper=False,
gripper_pose=None,
gripper_mesh_scale=1.0,
):
scene = create_voxel_scene(
voxel_grid,
q_attention,
highlight_coordinate,
highlight_gt_coordinate,
highlight_alpha,
voxel_size,
show_bb,
alpha,
)
if show:
scene.show()
else:
r = offscreen_renderer or pyrender.OffscreenRenderer(
viewport_width=1920, viewport_height=1080, point_size=1.0
)
s = _from_trimesh_scene(
scene, ambient_light=[0.8, 0.8, 0.8], bg_color=[1.0, 1.0, 1.0]
)
cam = pyrender.PerspectiveCamera(
yfov=np.pi / 4.0, aspectRatio=r.viewport_width / r.viewport_height
)
p = _compute_initial_camera_pose(s)
t = Trackball(p, (r.viewport_width, r.viewport_height), s.scale, s.centroid)
t.rotate(rotation_amount, np.array([0.0, 0.0, 1.0]))
s.add(cam, pose=t.pose)
if render_gripper:
gripper_trimesh = trimesh.load("peract_colab/meshes/hand.dae", force="mesh")
gripper_trimesh.vertices *= gripper_mesh_scale
radii = np.linalg.norm(
gripper_trimesh.vertices - gripper_trimesh.center_mass, axis=1
)
gripper_trimesh.visual.vertex_colors = trimesh.visual.interpolate(
radii * gripper_mesh_scale, color_map="winter"
)
gripper_mesh = pyrender.Mesh.from_trimesh(
gripper_trimesh, poses=np.array([gripper_pose]), smooth=False
)
s.add(gripper_mesh)
color, depth = r.render(s)
return color.copy()
def get_gripper_render_pose(
voxel_scale, scene_bound_origin, continuous_trans, continuous_quat
):
# finger tip to gripper offset
offset = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0.1 * voxel_scale], [0, 0, 0, 1]]
)
# scale and translate by origin
translation = (continuous_trans - (np.array(scene_bound_origin[:3]))) * voxel_scale
mat = np.eye(4, 4)
mat[:3, :3] = Rotation.from_quat(
[continuous_quat[0], continuous_quat[1], continuous_quat[2], continuous_quat[3]]
).as_matrix()
offset_mat = np.matmul(mat, offset)
mat[:3, 3] = translation - offset_mat[:3, 3]
return mat
| 7,072 | 32.842105 | 88 | py |
semantic-abstraction | semantic-abstraction-main/arm/network_utils.py | # Adapted from https://github.com/stepjam/ARM/blob/main/arm/network_utils.py
import copy
from typing import List, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
LRELU_SLOPE = 0.02
def act_layer(act):
if act == "relu":
return nn.ReLU()
elif act == "lrelu":
return nn.LeakyReLU(LRELU_SLOPE)
elif act == "elu":
return nn.ELU()
elif act == "tanh":
return nn.Tanh()
elif act == "prelu":
return nn.PReLU()
else:
raise ValueError("%s not recognized." % act)
def norm_layer2d(norm, channels):
if norm == "batch":
return nn.BatchNorm2d(channels)
elif norm == "instance":
return nn.InstanceNorm2d(channels, affine=True)
elif norm == "layer":
return nn.GroupNorm(1, channels, affine=True)
elif norm == "group":
return nn.GroupNorm(4, channels, affine=True)
else:
raise ValueError("%s not recognized." % norm)
def norm_layer1d(norm, num_channels):
if norm == "batch":
return nn.BatchNorm1d(num_channels)
elif norm == "instance":
return nn.InstanceNorm1d(num_channels, affine=True)
elif norm == "layer":
return nn.LayerNorm(num_channels)
else:
raise ValueError("%s not recognized." % norm)
class FiLMBlock(nn.Module):
def __init__(self):
super(FiLMBlock, self).__init__()
def forward(self, x, gamma, beta):
beta = beta.view(x.size(0), x.size(1), 1, 1)
gamma = gamma.view(x.size(0), x.size(1), 1, 1)
x = gamma * x + beta
return x
class Conv2DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes,
strides,
norm=None,
activation=None,
padding_mode="replicate",
):
super(Conv2DBlock, self).__init__()
padding = (
kernel_sizes // 2
if isinstance(kernel_sizes, int)
else (kernel_sizes[0] // 2, kernel_sizes[1] // 2)
)
self.conv2d = nn.Conv2d(
in_channels,
out_channels,
kernel_sizes,
strides,
padding=padding,
padding_mode=padding_mode,
)
if activation is None:
nn.init.xavier_uniform_(
self.conv2d.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.conv2d.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.conv2d.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.conv2d.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.conv2d.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.conv2d.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.conv2d.weight, nonlinearity="relu")
nn.init.zeros_(self.conv2d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
self.norm = norm_layer2d(norm, out_channels)
if activation is not None:
self.activation = act_layer(activation)
def forward(self, x):
x = self.conv2d(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class Conv2DFiLMBlock(Conv2DBlock):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes,
strides,
norm=None,
activation=None,
padding_mode="replicate",
):
super(Conv2DFiLMBlock, self).__init__(
in_channels,
out_channels,
kernel_sizes,
strides,
norm,
activation,
padding_mode,
)
self.film = FiLMBlock()
def forward(self, x, gamma, beta):
x = self.conv2d(x)
x = self.norm(x) if self.norm is not None else x
x = self.film(x, gamma, beta)
x = self.activation(x) if self.activation is not None else x
return x
class Conv3DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes: Union[int, list] = 3,
strides=1,
norm=None,
activation=None,
padding_mode="replicate",
padding=None,
):
super(Conv3DBlock, self).__init__()
padding = kernel_sizes // 2 if padding is None else padding
self.conv3d = nn.Conv3d(
in_channels,
out_channels,
kernel_sizes,
strides,
padding=padding,
padding_mode=padding_mode,
)
if activation is None:
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.conv3d.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.conv3d.weight, nonlinearity="relu")
nn.init.zeros_(self.conv3d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
raise NotImplementedError("Norm not implemented.")
if activation is not None:
self.activation = act_layer(activation)
self.out_channels = out_channels
def forward(self, x):
x = self.conv3d(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class ConvTranspose3DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes: Union[int, list],
strides,
norm=None,
activation=None,
padding_mode="zeros",
padding=None,
):
super(ConvTranspose3DBlock, self).__init__()
padding = kernel_sizes // 2 if padding is None else padding
self.conv3d = nn.ConvTranspose3d(
in_channels,
out_channels,
kernel_sizes,
strides,
padding=padding,
padding_mode=padding_mode,
)
if activation is None:
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.conv3d.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.conv3d.weight, nonlinearity="relu")
nn.init.zeros_(self.conv3d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
self.norm = norm_layer3d(norm, out_channels)
if activation is not None:
self.activation = act_layer(activation)
def forward(self, x):
x = self.conv3d(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class Conv2DUpsampleBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes,
strides,
norm=None,
activation=None,
):
super(Conv2DUpsampleBlock, self).__init__()
layer = [
Conv2DBlock(in_channels, out_channels, kernel_sizes, 1, norm, activation)
]
if strides > 1:
layer.append(
nn.Upsample(scale_factor=strides, mode="bilinear", align_corners=False)
)
convt_block = Conv2DBlock(
out_channels, out_channels, kernel_sizes, 1, norm, activation
)
layer.append(convt_block)
self.conv_up = nn.Sequential(*layer)
def forward(self, x):
return self.conv_up(x)
class Conv3DUpsampleBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
strides,
kernel_sizes=3,
norm=None,
activation=None,
):
super(Conv3DUpsampleBlock, self).__init__()
layer = [
Conv3DBlock(in_channels, out_channels, kernel_sizes, 1, norm, activation)
]
if strides > 1:
layer.append(
nn.Upsample(scale_factor=strides, mode="trilinear", align_corners=False)
)
convt_block = Conv3DBlock(
out_channels, out_channels, kernel_sizes, 1, norm, activation
)
layer.append(convt_block)
self.conv_up = nn.Sequential(*layer)
def forward(self, x):
return self.conv_up(x)
class DenseBlock(nn.Module):
def __init__(self, in_features, out_features, norm=None, activation=None):
super(DenseBlock, self).__init__()
self.linear = nn.Linear(in_features, out_features)
if activation is None:
nn.init.xavier_uniform_(
self.linear.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.linear.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.linear.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.linear.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.linear.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.linear.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.linear.weight, nonlinearity="relu")
nn.init.zeros_(self.linear.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
self.norm = norm_layer1d(norm, out_features)
if activation is not None:
self.activation = act_layer(activation)
def forward(self, x):
x = self.linear(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class SiameseNet(nn.Module):
def __init__(
self,
input_channels: List[int],
filters: List[int],
kernel_sizes: List[int],
strides: List[int],
norm: str = None,
activation: str = "relu",
):
super(SiameseNet, self).__init__()
self._input_channels = input_channels
self._filters = filters
self._kernel_sizes = kernel_sizes
self._strides = strides
self._norm = norm
self._activation = activation
self.output_channels = filters[-1] # * len(input_channels)
def build(self):
self._siamese_blocks = nn.ModuleList()
for i, ch in enumerate(self._input_channels):
blocks = []
for i, (filt, ksize, stride) in enumerate(
zip(self._filters, self._kernel_sizes, self._strides)
):
conv_block = Conv2DBlock(
ch, filt, ksize, stride, self._norm, self._activation
)
blocks.append(conv_block)
self._siamese_blocks.append(nn.Sequential(*blocks))
self._fuse = Conv2DBlock(
self._filters[-1] * len(self._siamese_blocks),
self._filters[-1],
1,
1,
self._norm,
self._activation,
)
def forward(self, x):
if len(x) != len(self._siamese_blocks):
raise ValueError(
"Expected a list of tensors of size %d." % len(self._siamese_blocks)
)
self.streams = [stream(y) for y, stream in zip(x, self._siamese_blocks)]
y = self._fuse(torch.cat(self.streams, 1))
return y
class CNNAndFcsNet(nn.Module):
def __init__(
self,
siamese_net: SiameseNet,
low_dim_state_len: int,
input_resolution: List[int],
filters: List[int],
kernel_sizes: List[int],
strides: List[int],
norm: str = None,
fc_layers: List[int] = None,
activation: str = "relu",
):
super(CNNAndFcsNet, self).__init__()
self._siamese_net = copy.deepcopy(siamese_net)
self._input_channels = self._siamese_net.output_channels + low_dim_state_len
self._filters = filters
self._kernel_sizes = kernel_sizes
self._strides = strides
self._norm = norm
self._activation = activation
self._fc_layers = [] if fc_layers is None else fc_layers
self._input_resolution = input_resolution
def build(self):
self._siamese_net.build()
layers = []
channels = self._input_channels
for i, (filt, ksize, stride) in enumerate(
list(zip(self._filters, self._kernel_sizes, self._strides))[:-1]
):
layers.append(
Conv2DBlock(channels, filt, ksize, stride, self._norm, self._activation)
)
channels = filt
layers.append(
Conv2DBlock(
channels, self._filters[-1], self._kernel_sizes[-1], self._strides[-1]
)
)
self._cnn = nn.Sequential(*layers)
self._maxp = nn.AdaptiveMaxPool2d(1)
channels = self._filters[-1]
dense_layers = []
for n in self._fc_layers[:-1]:
dense_layers.append(DenseBlock(channels, n, activation=self._activation))
channels = n
dense_layers.append(DenseBlock(channels, self._fc_layers[-1]))
self._fcs = nn.Sequential(*dense_layers)
def forward(self, observations, low_dim_ins):
x = self._siamese_net(observations)
_, _, h, w = x.shape
low_dim_latents = low_dim_ins.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, h, w)
combined = torch.cat([x, low_dim_latents], dim=1)
x = self._cnn(combined)
x = self._maxp(x).squeeze(-1).squeeze(-1)
return self._fcs(x)
class CNNLangAndFcsNet(nn.Module):
def __init__(
self,
siamese_net: SiameseNet,
low_dim_state_len: int,
input_resolution: List[int],
filters: List[int],
kernel_sizes: List[int],
strides: List[int],
norm: str = None,
fc_layers: List[int] = None,
activation: str = "relu",
):
super(CNNLangAndFcsNet, self).__init__()
self._siamese_net = copy.deepcopy(siamese_net)
self._input_channels = self._siamese_net.output_channels + low_dim_state_len
self._filters = filters
self._kernel_sizes = kernel_sizes
self._strides = strides
self._norm = norm
self._activation = activation
self._fc_layers = [] if fc_layers is None else fc_layers
self._input_resolution = input_resolution
self._lang_feat_dim = 1024
def build(self):
self._siamese_net.build()
layers = []
channels = self._input_channels
self.conv1 = Conv2DFiLMBlock(
channels, self._filters[0], self._kernel_sizes[0], self._strides[0]
)
self.gamma1 = nn.Linear(self._lang_feat_dim, self._filters[0])
self.beta1 = nn.Linear(self._lang_feat_dim, self._filters[0])
self.conv2 = Conv2DFiLMBlock(
self._filters[0], self._filters[1], self._kernel_sizes[1], self._strides[1]
)
self.gamma2 = nn.Linear(self._lang_feat_dim, self._filters[1])
self.beta2 = nn.Linear(self._lang_feat_dim, self._filters[1])
self.conv3 = Conv2DFiLMBlock(
self._filters[1], self._filters[2], self._kernel_sizes[2], self._strides[2]
)
self.gamma3 = nn.Linear(self._lang_feat_dim, self._filters[2])
self.beta3 = nn.Linear(self._lang_feat_dim, self._filters[2])
self._maxp = nn.AdaptiveMaxPool2d(1)
channels = self._filters[-1]
dense_layers = []
for n in self._fc_layers[:-1]:
dense_layers.append(DenseBlock(channels, n, activation=self._activation))
channels = n
dense_layers.append(DenseBlock(channels, self._fc_layers[-1]))
self._fcs = nn.Sequential(*dense_layers)
def forward(self, observations, low_dim_ins, lang_goal_feats):
x = self._siamese_net(observations)
_, _, h, w = x.shape
low_dim_latents = low_dim_ins.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, h, w)
combined = torch.cat([x, low_dim_latents], dim=1)
g1 = self.gamma1(lang_goal_feats)
b1 = self.beta1(lang_goal_feats)
x = self.conv1(combined, g1, b1)
g2 = self.gamma2(lang_goal_feats)
b2 = self.beta2(lang_goal_feats)
x = self.conv2(x, g2, b2)
g3 = self.gamma3(lang_goal_feats)
b3 = self.beta3(lang_goal_feats)
x = self.conv3(x, g3, b3)
x = self._maxp(x).squeeze(-1).squeeze(-1)
return self._fcs(x)
class Conv3DInceptionBlockUpsampleBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
scale_factor,
norm=None,
activation=None,
residual=False,
):
super(Conv3DInceptionBlockUpsampleBlock, self).__init__()
layer = []
convt_block = Conv3DInceptionBlock(in_channels, out_channels, norm, activation)
layer.append(convt_block)
if scale_factor > 1:
layer.append(
nn.Upsample(
scale_factor=scale_factor, mode="trilinear", align_corners=False
)
)
convt_block = Conv3DInceptionBlock(out_channels, out_channels, norm, activation)
layer.append(convt_block)
self.conv_up = nn.Sequential(*layer)
def forward(self, x):
return self.conv_up(x)
class Conv3DInceptionBlock(nn.Module):
def __init__(
self, in_channels, out_channels, norm=None, activation=None, residual=False
):
super(Conv3DInceptionBlock, self).__init__()
self._residual = residual
cs = out_channels // 4
assert out_channels % 4 == 0
latent = 32
self._1x1conv = Conv3DBlock(
in_channels,
cs * 2,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._1x1conv_a = Conv3DBlock(
in_channels,
latent,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._3x3conv = Conv3DBlock(
latent, cs, kernel_sizes=3, strides=1, norm=norm, activation=activation
)
self._1x1conv_b = Conv3DBlock(
in_channels,
latent,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._5x5_via_3x3conv_a = Conv3DBlock(
latent, latent, kernel_sizes=3, strides=1, norm=norm, activation=activation
)
self._5x5_via_3x3conv_b = Conv3DBlock(
latent, cs, kernel_sizes=3, strides=1, norm=norm, activation=activation
)
self.out_channels = out_channels + (in_channels if residual else 0)
def forward(self, x):
yy = []
if self._residual:
yy = [x]
return torch.cat(
yy
+ [
self._1x1conv(x),
self._3x3conv(self._1x1conv_a(x)),
self._5x5_via_3x3conv_b(self._5x5_via_3x3conv_a(self._1x1conv_b(x))),
],
1,
)
class ConvTransposeUp3DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
strides=2,
padding=0,
norm=None,
activation=None,
residual=False,
):
super(ConvTransposeUp3DBlock, self).__init__()
self._residual = residual
self._1x1conv = Conv3DBlock(
in_channels,
out_channels,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._3x3conv = ConvTranspose3DBlock(
out_channels,
out_channels,
kernel_sizes=2,
strides=strides,
norm=norm,
activation=activation,
padding=padding,
)
self._1x1conv_a = Conv3DBlock(
out_channels,
out_channels,
kernel_sizes=1,
strides=1,
norm=norm,
)
self.out_channels = out_channels
def forward(self, x):
x = self._1x1conv(x)
x = self._3x3conv(x)
x = self._1x1conv_a(x)
return x
class SpatialSoftmax3D(torch.nn.Module):
def __init__(self, depth, height, width, channel):
super(SpatialSoftmax3D, self).__init__()
self.depth = depth
self.height = height
self.width = width
self.channel = channel
self.temperature = 0.01
pos_x, pos_y, pos_z = np.meshgrid(
np.linspace(-1.0, 1.0, self.depth),
np.linspace(-1.0, 1.0, self.height),
np.linspace(-1.0, 1.0, self.width),
)
pos_x = torch.from_numpy(
pos_x.reshape(self.depth * self.height * self.width)
).float()
pos_y = torch.from_numpy(
pos_y.reshape(self.depth * self.height * self.width)
).float()
pos_z = torch.from_numpy(
pos_z.reshape(self.depth * self.height * self.width)
).float()
self.register_buffer("pos_x", pos_x)
self.register_buffer("pos_y", pos_y)
self.register_buffer("pos_z", pos_z)
def forward(self, feature):
feature = feature.view(
-1, self.height * self.width * self.depth
) # (B, c*d*h*w)
softmax_attention = F.softmax(feature / self.temperature, dim=-1)
expected_x = torch.sum(self.pos_x * softmax_attention, dim=1, keepdim=True)
expected_y = torch.sum(self.pos_y * softmax_attention, dim=1, keepdim=True)
expected_z = torch.sum(self.pos_z * softmax_attention, dim=1, keepdim=True)
expected_xy = torch.cat([expected_x, expected_y, expected_z], 1)
feature_keypoints = expected_xy.view(-1, self.channel * 3)
return feature_keypoints
| 23,208 | 30.363514 | 88 | py |
semantic-abstraction | semantic-abstraction-main/arm/__init__.py | 0 | 0 | 0 | py | |
semantic-abstraction | semantic-abstraction-main/arm/optim/__init__.py | 0 | 0 | 0 | py | |
semantic-abstraction | semantic-abstraction-main/arm/optim/lamb.py | # From https://github.com/cybertronai/pytorch-lamb/blob/master/pytorch_lamb/lamb.py
"""Lamb optimizer."""
import collections
import math
import torch
from torch.optim import Optimizer
# def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
# """Log a histogram of trust ratio scalars in across layers."""
# results = collections.defaultdict(list)
# for group in optimizer.param_groups:
# for p in group['params']:
# state = optimizer.state[p]
# for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
# if i in state:
# results[i].append(state[i])
#
# for k, v in results.items():
# event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Lamb does not support sparse gradients, consider SparseAdam instad."
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group[
"lr"
] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"])
if group["weight_decay"] != 0:
adam_step.add_(p.data, alpha=group["weight_decay"])
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state["weight_norm"] = weight_norm
state["adam_norm"] = adam_norm
state["trust_ratio"] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
| 5,163 | 39.34375 | 103 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/setup.py | import os
import pkg_resources
from setuptools import setup, find_packages
setup(
name="clip",
py_modules=["clip"],
version="1.0",
description="",
author="OpenAI",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
include_package_data=True,
extras_require={"dev": ["pytest"]},
)
| 491 | 21.363636 | 77 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/clip_explainability.py | # modified from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP/clip/clip.py
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model_explainability import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px, overload_resolution=False):
transforms = [
_convert_image_to_rgb,
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
),
]
if not overload_resolution:
transforms = [Resize(224, interpolation=BICUBIC), CenterCrop(n_px)] + transforms
return Compose(transforms)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False,
download_root: str = None,
overload_resolution=False,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(
_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")
)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
)
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution, overload_resolution)
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes("prim::Constant")
if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith(
"cuda"
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item(), overload_resolution)
def tokenize(
texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False
) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = torch.tensor(tokens)
return result
| 9,663 | 34.270073 | 154 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/simple_tokenizer.py | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz"
)
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
| 4,851 | 31.13245 | 111 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/auxiliary.py | # adding hooks, copied from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/e63b4ab0d0722faa11ff2f7549c4f88074e7edd7/CLIP/clip/auxilary.py
import torch
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from math import ceil, floor
# We define this function as _pad because it takes an argument
# named pad, which clobbers the recursive reference to the pad
# function needed for __torch_function__ support
pad = F.pad
# This class exists solely for Transformer; it has an annotation stating
# that bias is never None, which appeases TorchScript
def interpolate_positional_emb(positional_embedding, target_seq_len):
interpolated_positional_emb = torch.zeros_like(positional_embedding[0])[
None, :
].repeat(target_seq_len, 1)
for i in range(target_seq_len):
i3 = float(i) / (target_seq_len / 50)
i1 = floor(i3)
i2 = ceil(i3)
if i2 < len(positional_embedding):
interpolated_positional_emb[i] = torch.lerp(
positional_embedding[i1, :], positional_embedding[i2, :], i3 - i1
)
else:
interpolated_positional_emb[i] = positional_embedding[-1, :]
return interpolated_positional_emb
class _LinearWithBias(torch.nn.Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
attention_probs_forward_hook=None,
attention_probs_backwards_hook=None,
) -> Tuple[Tensor, Optional[Tensor]]:
if not torch.jit.is_scripting():
tens_ops = (
query,
key,
value,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
out_proj_weight,
out_proj_bias,
)
if any([type(t) is not Tensor for t in tens_ops]) and F.has_torch_function(
tens_ops
):
return F.handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = F.linear(
key, k_proj_weight_non_opt, in_proj_bias[embed_dim : (embed_dim * 2)]
)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2) :])
else:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias)
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(
attn_mask.dtype
)
if attn_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError(
"attn_mask's dimension {} is not supported".format(attn_mask.dim())
)
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat(
[
k,
torch.zeros(
(k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device
),
],
dim=1,
)
v = torch.cat(
[
v,
torch.zeros(
(v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device
),
],
dim=1,
)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(
bsz * num_heads, tgt_len, src_len
)
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
# use hooks for the attention weights if necessary
if (
attention_probs_forward_hook is not None
and attention_probs_backwards_hook is not None
):
attention_probs_forward_hook(attn_output_weights)
# attn_output_weights.register_hook(attention_probs_backwards_hook)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
class MultiheadAttention(torch.nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(
self,
query,
key,
value,
key_padding_mask=None,
need_weights=True,
attn_mask=None,
attention_probs_forward_hook=None,
attention_probs_backwards_hook=None,
):
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
attention_probs_forward_hook=attention_probs_forward_hook,
attention_probs_backwards_hook=attention_probs_backwards_hook,
)
else:
return multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
attention_probs_forward_hook=attention_probs_forward_hook,
attention_probs_backwards_hook=attention_probs_backwards_hook,
)
| 21,829 | 38.981685 | 157 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
# if [int(i) for i in torch.__version__.split(".")] < [1, 7, 1]:
# warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize", "tokenizer"]
tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px, overload_resolution=False):
transforms = [
_convert_image_to_rgb,
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
),
]
if not overload_resolution:
transforms = [Resize(224, interpolation=BICUBIC), CenterCrop(n_px)] + transforms
return Compose(transforms)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False,
download_root: str = None,
overload_resolution=False,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(
_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")
)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
)
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution, overload_resolution)
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes("prim::Constant")
if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith(
"cuda"
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item(), overload_resolution)
def tokenize(
texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False
) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = tokenizer.encoder["<|startoftext|>"]
eot_token = tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = torch.tensor(tokens)
return result
| 9,497 | 33.791209 | 154 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .auxiliary import interpolate_positional_emb
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
assert len(x) >= 50
if len(x) > 50:
target_seq_len = len(x)
pe = interpolate_positional_emb(self.positional_embedding, target_seq_len)
x = x + pe[:, None, :] # (HW+1)NC
else:
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
)
def forward(self, x: torch.Tensor, tile_attn_mask: torch.Tensor = None):
prev_attn_masks = []
if tile_attn_mask is not None:
for resblock in filter(
lambda module: isinstance(module, ResidualAttentionBlock),
self.resblocks.modules(),
):
prev_attn_masks.append(
resblock.attn_mask.clone()
if resblock.attn_mask is not None
else None
)
resblock.attn_mask = tile_attn_mask
x = self.resblocks(x)
if tile_attn_mask is not None:
for resblock, prev_attn_mask in zip(
filter(
lambda module: isinstance(module, ResidualAttentionBlock),
self.resblocks.modules(),
),
prev_attn_masks,
):
resblock.attn_mask = prev_attn_mask
return x
class VisionTransformer(nn.Module):
def __init__(
self,
input_resolution: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)
)
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, **kwargs):
x = self.conv1(x) # shape = [*, width, grid, grid]
# shape = [*, width, grid ** 2]
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, **kwargs)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, **kwargs):
return self.visual(image.type(self.dtype), **kwargs)
def encode_text(self, text, return_transformer_outputs=False):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)[: x.shape[1], :]
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
transformer_output = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = (
transformer_output[
torch.arange(transformer_output.shape[0]), text.argmax(dim=-1)
]
@ self.text_projection
)
if return_transformer_outputs:
return x, transformer_output
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"visual.layer{b}")
)
)
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert (
output_width**2 + 1
== state_dict["visual.attnpool.positional_embedding"].shape[0]
)
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 20,260 | 33.457483 | 112 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/clip_gradcam.py | from typing import List
import torch
import torch.nn as nn
from .clip_explainability import load
from .clip import tokenize
from torch import device
import numpy as np
import torch.nn.functional as nnf
import itertools
def zeroshot_classifier(clip_model, classnames, templates, device):
with torch.no_grad():
texts = list(
itertools.chain(
*[
[template.format(classname) for template in templates]
for classname in classnames
]
)
) # format with class
texts = tokenize(texts).to(device) # tokenize
class_embeddings = clip_model.encode_text(texts)
class_embeddings = class_embeddings.view(len(classnames), len(templates), -1)
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
zeroshot_weights = class_embeddings.mean(dim=1)
return zeroshot_weights.T # shape: [dim, n classes]
class ClipGradcam(nn.Module):
def __init__(
self,
clip_model_name: str,
classes: List[str],
templates: List[str],
device: device,
num_layers=10,
positive_attn_only=False,
**kwargs
):
super(ClipGradcam, self).__init__()
self.clip_model_name = clip_model_name
self.model, self.preprocess = load(clip_model_name, device=device, **kwargs)
self.templates = templates
self.device = device
self.target_classes = None
self.set_classes(classes)
self.num_layers = num_layers
self.positive_attn_only = positive_attn_only
self.num_res_attn_blocks = {
"ViT-B/32": 12,
"ViT-B/16": 12,
"ViT-L/14": 16,
"ViT-L/14@336px": 16,
}[clip_model_name]
def forward(self, x: torch.Tensor, o: List[str]):
"""
non-standard hack around an nn, really should be more principled here
"""
image_features = self.model.encode_image(x.to(self.device))
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
zeroshot_weights = torch.cat(
[self.class_to_language_feature[prompt] for prompt in o], dim=1
)
logits_per_image = 100.0 * image_features @ zeroshot_weights
return self.interpret(logits_per_image, self.model, self.device)
def interpret(self, logits_per_image, model, device):
# modified from: https://colab.research.google.com/github/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP_explainability.ipynb#scrollTo=fWKGyu2YAeSV
batch_size = logits_per_image.shape[0]
num_prompts = logits_per_image.shape[1]
one_hot = [logit for logit in logits_per_image.sum(dim=0)]
model.zero_grad()
image_attn_blocks = list(
dict(model.visual.transformer.resblocks.named_children()).values()
)
num_tokens = image_attn_blocks[0].attn_probs.shape[-1]
R = torch.eye(
num_tokens, num_tokens, dtype=image_attn_blocks[0].attn_probs.dtype
).to(device)
R = R[None, None, :, :].repeat(num_prompts, batch_size, 1, 1)
for i, block in enumerate(image_attn_blocks):
if i <= self.num_layers:
continue
# TODO try scaling block.attn_probs by value magnitude
# TODO actual parallelized prompt gradients
grad = torch.stack(
[
torch.autograd.grad(logit, [block.attn_probs], retain_graph=True)[
0
].detach()
for logit in one_hot
]
)
grad = grad.view(
num_prompts,
batch_size,
self.num_res_attn_blocks,
num_tokens,
num_tokens,
)
cam = (
block.attn_probs.view(
1, batch_size, self.num_res_attn_blocks, num_tokens, num_tokens
)
.detach()
.repeat(num_prompts, 1, 1, 1, 1)
)
cam = cam.reshape(num_prompts, batch_size, -1, cam.shape[-1], cam.shape[-1])
grad = grad.reshape(
num_prompts, batch_size, -1, grad.shape[-1], grad.shape[-1]
)
cam = grad * cam
cam = cam.reshape(
num_prompts * batch_size, -1, cam.shape[-1], cam.shape[-1]
)
if self.positive_attn_only:
cam = cam.clamp(min=0)
# average of all heads
cam = cam.mean(dim=-3)
R = R + torch.bmm(
cam, R.view(num_prompts * batch_size, num_tokens, num_tokens)
).view(num_prompts, batch_size, num_tokens, num_tokens)
image_relevance = R[:, :, 0, 1:]
img_dim = int(np.sqrt(num_tokens - 1))
image_relevance = image_relevance.reshape(
num_prompts, batch_size, img_dim, img_dim
)
return image_relevance
def set_classes(self, classes):
self.target_classes = classes
language_features = zeroshot_classifier(
self.model, self.target_classes, self.templates, self.device
)
self.class_to_language_feature = {}
for i, c in enumerate(self.target_classes):
self.class_to_language_feature[c] = language_features[:, [i]]
| 5,420 | 36.909091 | 165 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/__init__.py | from .clip import *
from .clip_gradcam import ClipGradcam
import torch
import numpy as np
from PIL import Image
import torchvision
from functools import reduce
def factors(n):
return set(
reduce(
list.__add__,
([i, n // i] for i in range(1, int(n**0.5) + 1) if n % i == 0),
)
)
saliency_configs = {
"ours": lambda img_dim: {
"distractor_labels": {},
"horizontal_flipping": True,
"augmentations": 5,
"imagenet_prompt_ensemble": False,
"positive_attn_only": True,
"cropping_augmentations": [
{"tile_size": img_dim, "stride": img_dim // 4},
{"tile_size": int(img_dim * 2 / 3), "stride": int(img_dim * 2 / 3) // 4},
{"tile_size": img_dim // 2, "stride": (img_dim // 2) // 4},
{"tile_size": img_dim // 4, "stride": (img_dim // 4) // 4},
],
},
"chefer_et_al": lambda img_dim: {
"distractor_labels": {},
"horizontal_flipping": False,
"augmentations": 0,
"imagenet_prompt_ensemble": False,
"positive_attn_only": True,
"cropping_augmentations": [{"tile_size": img_dim, "stride": img_dim // 4}],
},
}
class ClipWrapper:
# SINGLETON WRAPPER
clip_model = None
clip_preprocess = None
clip_gradcam = None
lavt = None
device = None
jittering_transforms = None
def __init__(self, clip_model_type, device, **kwargs):
ClipWrapper.device = device
ClipWrapper.jittering_transforms = torchvision.transforms.ColorJitter(
brightness=0.6, contrast=0.6, saturation=0.6, hue=0.1
)
ClipWrapper.clip_model, ClipWrapper.clip_preprocess = load(
clip_model_type, ClipWrapper.device, **kwargs
)
ClipWrapper.clip_gradcam = ClipGradcam(
clip_model_name=clip_model_type,
classes=[""],
templates=["{}"],
device=ClipWrapper.device,
**kwargs
)
@classmethod
def check_initialized(cls, clip_model_type="ViT-B/32", **kwargs):
if cls.clip_gradcam is None:
ClipWrapper(
clip_model_type=clip_model_type,
device="cuda" if torch.cuda.is_available() else "cpu",
**kwargs
)
@classmethod
def get_clip_text_feature(cls, string):
ClipWrapper.check_initialized()
with torch.no_grad():
return (
cls.clip_model.encode_text(
tokenize(string, context_length=77).to(cls.device)
)
.squeeze()
.cpu()
.numpy()
)
@classmethod
def get_visual_feature(cls, rgb, tile_attn_mask, device=None):
if device is None:
device = ClipWrapper.device
ClipWrapper.check_initialized()
rgb = ClipWrapper.clip_preprocess(Image.fromarray(rgb)).unsqueeze(0)
with torch.no_grad():
clip_feature = ClipWrapper.clip_model.encode_image(
rgb.to(ClipWrapper.device), tile_attn_mask=tile_attn_mask
).squeeze()
return clip_feature.to(device)
@classmethod
def get_clip_saliency(
cls,
img,
text_labels,
prompts,
distractor_labels=set(),
use_lavt=False,
**kwargs
):
cls.check_initialized()
if use_lavt:
return cls.lavt.localize(img=img, prompts=text_labels)
cls.clip_gradcam.templates = prompts
cls.clip_gradcam.set_classes(text_labels)
text_label_features = torch.stack(
list(cls.clip_gradcam.class_to_language_feature.values()), dim=0
)
text_label_features = text_label_features.squeeze(dim=-1).cpu()
text_maps = cls.get_clip_saliency_convolve(
img=img, text_labels=text_labels, **kwargs
)
if len(distractor_labels) > 0:
distractor_labels = set(distractor_labels) - set(text_labels)
cls.clip_gradcam.set_classes(list(distractor_labels))
distractor_maps = cls.get_clip_saliency_convolve(
img=img, text_labels=list(distractor_labels), **kwargs
)
text_maps -= distractor_maps.mean(dim=0)
text_maps = text_maps.cpu()
return text_maps, text_label_features.squeeze(dim=-1)
@classmethod
def get_clip_saliency_convolve(
cls,
text_labels,
horizontal_flipping=False,
positive_attn_only: bool = False,
tile_batch_size=32,
prompt_batch_size=32,
tile_interpolate_batch_size=32,
**kwargs
):
cls.clip_gradcam.positive_attn_only = positive_attn_only
tiles, tile_imgs, counts, tile_sizes = cls.create_tiles(**kwargs)
outputs = {
k: torch.zeros(
[len(text_labels)] + list(count.shape), device=cls.device
).half()
for k, count in counts.items()
}
tile_gradcams = torch.cat(
[
torch.cat(
[
cls.clip_gradcam(
x=tile_imgs[tile_idx : tile_idx + tile_batch_size],
o=text_labels[prompt_idx : prompt_idx + prompt_batch_size],
)
for tile_idx in np.arange(0, len(tile_imgs), tile_batch_size)
],
dim=1,
)
for prompt_idx in np.arange(0, len(text_labels), prompt_batch_size)
],
dim=0,
)
if horizontal_flipping:
flipped_tile_imgs = tile_imgs[
..., torch.flip(torch.arange(0, tile_imgs.shape[-1]), dims=[0])
]
flipped_tile_gradcams = torch.cat(
[
torch.cat(
[
cls.clip_gradcam(
x=flipped_tile_imgs[
tile_idx : tile_idx + tile_batch_size
],
o=text_labels[
prompt_idx : prompt_idx + prompt_batch_size
],
)
for tile_idx in np.arange(
0, len(tile_imgs), tile_batch_size
)
],
dim=1,
)
for prompt_idx in np.arange(0, len(text_labels), prompt_batch_size)
],
dim=0,
)
with torch.no_grad():
flipped_tile_gradcams = flipped_tile_gradcams[
...,
torch.flip(
torch.arange(0, flipped_tile_gradcams.shape[-1]), dims=[0]
),
]
tile_gradcams = (tile_gradcams + flipped_tile_gradcams) / 2
del flipped_tile_gradcams
with torch.no_grad():
torch.cuda.empty_cache()
for tile_size in np.unique(tile_sizes):
tile_size_mask = tile_sizes == tile_size
curr_size_grads = tile_gradcams[:, tile_size_mask]
curr_size_tiles = tiles[tile_size_mask]
for tile_idx in np.arange(
0, curr_size_grads.shape[1], tile_interpolate_batch_size
):
resized_tiles = torch.nn.functional.interpolate(
curr_size_grads[
:, tile_idx : tile_idx + tile_interpolate_batch_size
],
size=tile_size,
mode="bilinear",
align_corners=False,
)
for tile_idx, tile_slice in enumerate(
curr_size_tiles[
tile_idx : tile_idx + tile_interpolate_batch_size
]
):
outputs[tile_size][tile_slice] += resized_tiles[
:, tile_idx, ...
]
output = sum(
output.float() / count
for output, count in zip(outputs.values(), counts.values())
) / len(counts)
del outputs, counts, tile_gradcams
output = output.cpu()
return output
@classmethod
def create_tiles(cls, img, augmentations, cropping_augmentations, **kwargs):
assert type(img) == np.ndarray
images = []
cls.check_initialized()
# compute image crops
img_pil = Image.fromarray(img)
images.append(np.array(img_pil))
for _ in range(augmentations):
images.append(np.array(cls.jittering_transforms(img_pil)))
# for taking average
counts = {
crop_aug["tile_size"]: torch.zeros(img.shape[:2], device=cls.device).float()
+ 1e-5
for crop_aug in cropping_augmentations
}
tiles = []
tile_imgs = []
tile_sizes = []
for img in images:
for crop_aug in cropping_augmentations:
tile_size = crop_aug["tile_size"]
stride = crop_aug["stride"]
for y in np.arange(0, img.shape[1] - tile_size + 1, stride):
if y >= img.shape[0]:
continue
for x in np.arange(0, img.shape[0] - tile_size + 1, stride):
if x >= img.shape[1]:
continue
tile = (
slice(None, None),
slice(x, x + tile_size),
slice(y, y + tile_size),
)
tiles.append(tile)
counts[tile_size][tile[1:]] += 1
tile_sizes.append(tile_size)
# this is currently biggest bottle neck
tile_imgs.append(
cls.clip_gradcam.preprocess(
Image.fromarray(img[tiles[-1][1:]])
)
)
tile_imgs = torch.stack(tile_imgs).to(cls.device)
return np.array(tiles), tile_imgs, counts, np.array(tile_sizes)
imagenet_templates = [
"a bad photo of a {}.",
"a photo of many {}.",
"a sculpture of a {}.",
"a photo of the hard to see {}.",
"a low resolution photo of the {}.",
"a rendering of a {}.",
"graffiti of a {}.",
"a bad photo of the {}.",
"a cropped photo of the {}.",
"a tattoo of a {}.",
"the embroidered {}.",
"a photo of a hard to see {}.",
"a bright photo of a {}.",
"a photo of a clean {}.",
"a photo of a dirty {}.",
"a dark photo of the {}.",
"a drawing of a {}.",
"a photo of my {}.",
"the plastic {}.",
"a photo of the cool {}.",
"a close-up photo of a {}.",
"a black and white photo of the {}.",
"a painting of the {}.",
"a painting of a {}.",
"a pixelated photo of the {}.",
"a sculpture of the {}.",
"a bright photo of the {}.",
"a cropped photo of a {}.",
"a plastic {}.",
"a photo of the dirty {}.",
"a jpeg corrupted photo of a {}.",
"a blurry photo of the {}.",
"a photo of the {}.",
"a good photo of the {}.",
"a rendering of the {}.",
"a {} in a video game.",
"a photo of one {}.",
"a doodle of a {}.",
"a close-up photo of the {}.",
"a photo of a {}.",
"the origami {}.",
"the {} in a video game.",
"a sketch of a {}.",
"a doodle of the {}.",
"a origami {}.",
"a low resolution photo of a {}.",
"the toy {}.",
"a rendition of the {}.",
"a photo of the clean {}.",
"a photo of a large {}.",
"a rendition of a {}.",
"a photo of a nice {}.",
"a photo of a weird {}.",
"a blurry photo of a {}.",
"a cartoon {}.",
"art of a {}.",
"a sketch of the {}.",
"a embroidered {}.",
"a pixelated photo of a {}.",
"itap of the {}.",
"a jpeg corrupted photo of the {}.",
"a good photo of a {}.",
"a plushie {}.",
"a photo of the nice {}.",
"a photo of the small {}.",
"a photo of the weird {}.",
"the cartoon {}.",
"art of the {}.",
"a drawing of the {}.",
"a photo of the large {}.",
"a black and white photo of a {}.",
"the plushie {}.",
"a dark photo of a {}.",
"itap of a {}.",
"graffiti of the {}.",
"a toy {}.",
"itap of my {}.",
"a photo of a cool {}.",
"a photo of a small {}.",
"a tattoo of the {}.",
]
__all__ = ["ClipWrapper", "imagenet_templates"]
| 12,890 | 33.934959 | 88 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/model_explainability.py | # modified from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP/clip/model.py
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .auxiliary import (
multi_head_attention_forward,
MultiheadAttention,
interpolate_positional_emb,
)
import sys
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample_modules = OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes, planes * self.expansion, 1, stride=1, bias=False
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
self.downsample = nn.Sequential(self.downsample_modules)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, is_visual=True
):
super().__init__()
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp_modules = OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
self.mlp = nn.Sequential(self.mlp_modules)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.attn_probs = None
self.attn_grad = None
self.is_visual = is_visual
def set_attn_probs(self, attn_probs):
self.attn_probs = attn_probs
def set_attn_grad(self, attn_grad):
self.attn_grad = attn_grad
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
if self.is_visual:
return self.attn(
x,
x,
x,
need_weights=False,
attn_mask=self.attn_mask,
attention_probs_forward_hook=self.set_attn_probs,
attention_probs_backwards_hook=self.set_attn_grad,
)[0]
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None,
is_visual=False,
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks_modules = [
ResidualAttentionBlock(width, heads, attn_mask, is_visual=is_visual)
for _ in range(layers)
]
self.resblocks = nn.Sequential(*self.resblocks_modules)
def forward(self, x: torch.Tensor, tile_attn_mask: torch.Tensor = None):
prev_attn_masks = []
if tile_attn_mask is not None:
for resblock in self.resblocks.modules():
prev_attn_masks.append(resblock.attn_mask.clone())
resblock.attn_mask = tile_attn_mask
x = self.resblocks(x)
if tile_attn_mask is not None:
for resblock, prev_attn_mask in zip(
self.resblocks.modules(), prev_attn_masks
):
resblock.attn_mask = prev_attn_mask
return x
class VisionTransformer(nn.Module):
def __init__(
self,
input_resolution: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)
)
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, is_visual=True)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, **kwargs):
x = self.conv1(x) # shape = [*, width, grid, grid]
# shape = [*, width, grid ** 2]
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
if len(x[0]) != 50:
pe = interpolate_positional_emb(self.positional_embedding, len(x[0]))
x += pe.to(x.dtype)
else:
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, **kwargs)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
is_visual=False,
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, **kwargs):
return self.visual(image.type(self.dtype), **kwargs)
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"visual.layer{b}")
)
)
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert (
output_width**2 + 1
== state_dict["visual.attnpool.positional_embedding"].shape[0]
)
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 20,409 | 32.84743 | 112 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/tests/test_consistency.py | import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize("model_name", clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
| 812 | 30.269231 | 73 | py |
UniVL | UniVL-main/main_task_retrieval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
from torch.utils.data import (SequentialSampler)
import numpy as np
import random
import os
from metrics import compute_metrics
import time
import argparse
from modules.tokenization import BertTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import UniVL
from modules.optimization import BertAdam
from torch.utils.data import DataLoader
from util import parallel_apply, get_logger
from dataloaders.dataloader_youcook_retrieval import Youcook_DataLoader
from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_DataLoader
from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_TrainDataLoader
torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='UniVL on Retrieval Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/youcookii_singlef_train.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/youcookii_singlef_val.csv', help='')
parser.add_argument('--data_path', type=str, default='data/youcookii_caption.pickle', help='data pickle file path')
parser.add_argument('--features_path', type=str, default='data/youcookii_videos_feature.pickle', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str, required=True,
help="Bert pre-trained model")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task_type", default="retrieval", type=str, help="Point the task `retrieval` to finetune.")
parser.add_argument("--datatype", default="youcook", type=str, help="Point the dataset `youcook` to finetune.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=2, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=3, help="Layer NO. of decoder.")
parser.add_argument('--train_sim_after_cross', action='store_true', help="Test retrieval after cross encoder.")
parser.add_argument('--expand_msrvtt_sentences', action='store_true', help="")
args = parser.parse_args()
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
def dataloader_youcook_train(args, tokenizer):
youcook_dataset = Youcook_DataLoader(
csv=args.train_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(youcook_dataset)
dataloader = DataLoader(
youcook_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(youcook_dataset), train_sampler
def dataloader_youcook_test(args, tokenizer):
youcook_testset = Youcook_DataLoader(
csv=args.val_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
test_sampler = SequentialSampler(youcook_testset)
dataloader_youcook = DataLoader(
youcook_testset,
sampler=test_sampler,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
pin_memory=False,
)
logger.info('YoucookII validation pairs: {}'.format(len(youcook_testset)))
return dataloader_youcook, len(youcook_testset)
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_TrainDataLoader(
csv_path=args.train_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
unfold_sentences=args.expand_msrvtt_sentences,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(
msrvtt_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(msrvtt_dataset), train_sampler
def dataloader_msrvtt_test(args, tokenizer):
msrvtt_testset = MSRVTT_DataLoader(
csv_path=args.val_csv,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
dataloader_msrvtt = DataLoader(
msrvtt_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
)
return dataloader_msrvtt, len(msrvtt_testset)
def save_model(epoch, args, model, type_name=""):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin.{}{}".format("" if type_name=="" else type_name+".", epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
# multi-gpu does scattering it-self
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask,
pairs_masked_text=pairs_masked_text, pairs_token_labels=pairs_token_labels,
masked_video=masked_video, video_labels_index=video_labels_index)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):
sim_matrix = []
for idx1, b1 in enumerate(batch_list_t):
input_ids, input_mask, segment_ids, _, _, _, _, _, _ = b1
sequence_output = batch_sequence_output_list[idx1]
each_row = []
for idx2, b2 in enumerate(batch_list_v):
_, _, _, video, video_mask, _, _, _, _ = b2
visual_output = batch_visual_output_list[idx2]
b1b2_logits = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=-1)
sim_matrix.append(each_row)
return sim_matrix
def eval_epoch(args, model, test_dataloader, device, n_gpu):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
batch_list = []
batch_sequence_output_list, batch_visual_output_list = [], []
for bid, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, _, _, _, _ = batch
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
batch_sequence_output_list.append(sequence_output)
batch_visual_output_list.append(visual_output)
batch_list.append(batch)
print("{}/{}\r".format(bid, len(test_dataloader)), end="")
if n_gpu > 1:
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list)
split_len = (bacth_len + n_gpu - 1) // n_gpu
for dev_id in device_ids:
s_, e_ = dev_id * split_len, (dev_id + 1) * split_len
if dev_id == 0:
batch_list_t_splits.append(batch_list[s_:e_])
batch_list_v_splits.append(batch_list)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list]
batch_list_v_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id],
batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
sim_matrix = _run_on_single_gpu(model, batch_list, batch_list, batch_sequence_output_list, batch_visual_output_list)
metrics = compute_metrics(sim_matrix)
logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
logger.info('\t>>> R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}'.
format(metrics['R1'], metrics['R5'], metrics['R10'], metrics['MR']))
R1 = metrics['R1']
return R1
DATALOADER_DICT = {}
DATALOADER_DICT["youcook"] = {"train":dataloader_youcook_train, "val":dataloader_youcook_test}
DATALOADER_DICT["msrvtt"] = {"train":dataloader_msrvtt_train, "val":dataloader_msrvtt_test}
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
assert args.task_type == "retrieval"
model = init_model(args, device, n_gpu, args.local_rank)
assert args.datatype in DATALOADER_DICT
test_dataloader, test_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer)
if args.local_rank == 0:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", args.batch_size_val)
logger.info(" Num steps = %d", len(test_dataloader))
if args.do_train:
train_dataloader, train_length, train_sampler = DATALOADER_DICT[args.datatype]["train"](args, tokenizer)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if args.local_rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.00001
best_output_model_file = None
global_step = 0
for epoch in range(args.epochs):
train_sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.local_rank)
if args.local_rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
output_model_file = save_model(epoch, args, model, type_name="")
R1 = eval_epoch(args, model, test_dataloader, device, n_gpu)
if best_score <= R1:
best_score = R1
best_output_model_file = output_model_file
logger.info("The best model is: {}, the R1 is: {:.4f}".format(best_output_model_file, best_score))
if args.local_rank == 0:
model = load_model(-1, args, n_gpu, device, model_file=best_output_model_file)
eval_epoch(args, model, test_dataloader, device, n_gpu)
elif args.do_eval:
if args.local_rank == 0:
eval_epoch(args, model, test_dataloader, device, n_gpu)
if __name__ == "__main__":
main() | 24,353 | 46.28932 | 144 | py |
UniVL | UniVL-main/main_pretrain.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
from torch.utils.data import (SequentialSampler)
import numpy as np
import random
import os
from collections import OrderedDict
import pickle
import time
import argparse
from modules.tokenization import BertTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import UniVL
from modules.optimization import BertAdam
from dataloaders.dataloader_howto100m import Youtube_DataLoader
from torch.utils.data import DataLoader
from util import get_logger
torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='UniVL on Pretrain'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/HowTo100M_v1.csv', help='train csv')
parser.add_argument('--features_path', type=str, default='feature', help='feature path for 2D features')
parser.add_argument('--data_path', type=str, default='data/data.pickle', help='data pickle file path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--min_words', type=int, default=0, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--min_time', type=float, default=5.0, help='Gather small clips')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str, required=True,
help="Bert pre-trained model")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether use MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=2, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=3, help="Layer NO. of decoder.")
parser.add_argument('--stage_two', action='store_true', help="Whether training with decoder.")
parser.add_argument('--pretrain_enhance_vmodal', action='store_true', help="Enhance visual and other modalities when pretraining.")
parser.add_argument("--load_checkpoint", action="store_true")
parser.add_argument("--checkpoint_model", default="pytorch_model.bin.checkpoint", type=str, required=False,
help="Save the last model as a checkpoint.")
args = parser.parse_args()
if args.sampled_use_mil: # sample from each video, has a higher priority than use_mil.
args.use_mil = True
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_pretrain:
raise ValueError("`do_pretrain` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
args.checkpoint_model = '{}_{}_{}_{}.checkpoint'.format(args.checkpoint_model, args.bert_model, args.max_words, args.max_frames)
return args
def set_seed_logger(args):
global logger
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
def dataloader_pretrain(args, tokenizer, only_sim=False):
if args.local_rank == 0:
logger.info('Loading captions: {}'.format(args.data_path))
data_dict = pickle.load(open(args.data_path, 'rb'))
if args.local_rank == 0:
logger.info('Done, data_dict length: {}'.format(len(data_dict)))
dataset = Youtube_DataLoader(
csv=args.train_csv,
features_path=args.features_path,
data_dict=data_dict,
min_time=args.min_time,
max_words=args.max_words,
min_words=args.min_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
n_pair=args.n_pair,
max_frames=args.max_frames,
use_mil=args.use_mil,
only_sim=only_sim,
sampled_use_mil=args.sampled_use_mil,
pretrain_enhance_vmodal=args.pretrain_enhance_vmodal,
video_dim=args.video_dim,
)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(sampler is None),
sampler=sampler,
drop_last=True,
)
return dataloader, len(dataset), sampler
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_model(epoch, args, model, local_rank, type_name="", global_step=-1, optimizer=None):
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin.{}{}".format("" if type_name=="" else type_name+".", epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
if global_step != -1 and optimizer is not None:
state_dict = {
'epoch': epoch,
'global_step': global_step,
'model_state_dict': model_to_save.state_dict(),
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
}
checkpoint_model_file = os.path.join(args.output_dir, args.checkpoint_model)
torch.save(state_dict, checkpoint_model_file)
logger.info("Checkpoint is saved. use `load_checkpoint` to recovery it.")
return output_model_file
def load_model(epoch, args, n_gpu, device, model, global_step=0, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
last_optim_state = None
checkpoint_model_file = os.path.join(args.output_dir, args.checkpoint_model)
if epoch == -1 and args.load_checkpoint and os.path.exists(checkpoint_model_file):
checkpoint_state = torch.load(checkpoint_model_file, map_location='cpu')
epoch = checkpoint_state['epoch']
global_step = checkpoint_state['global_step']
model_state_dict = checkpoint_state['model_state_dict']
last_optim_state = checkpoint_state['last_optimizer_state']
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
if args.local_rank == 0:
logger.info("Checkpoint loaded from %s", checkpoint_model_file)
elif os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return epoch, global_step, last_optim_state, model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index,\
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask,
pairs_masked_text=pairs_masked_text, pairs_token_labels=pairs_token_labels,
masked_video=masked_video, video_labels_index=video_labels_index,
input_caption_ids=pairs_input_caption_ids, decoder_mask=pairs_decoder_mask,
output_caption_ids=pairs_output_caption_ids)
if n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
model = init_model(args, device, n_gpu, args.local_rank)
only_sim = model.module._stage_one if hasattr(model, 'module') else model._stage_one
train_dataloader, train_length, sampler = dataloader_pretrain(args, tokenizer, only_sim=only_sim)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
global_step = 0
epoch = -1
last_optim_state = None
if args.load_checkpoint:
epoch, global_step, last_optim_state, model = load_model(epoch, args, n_gpu, device, model, global_step=global_step)
epoch += 1
if args.local_rank == 0:
logger.warning("Will continue to epoch: {}".format(epoch))
epoch = 0 if epoch < 0 else epoch
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if last_optim_state is not None:
optimizer.load_state_dict(last_optim_state)
if args.local_rank == 0:
logger.info("***** Running pretraining *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
iter_ls_ = [itm for itm in range(args.epochs) if itm >= epoch]
for epoch in iter_ls_:
sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.local_rank)
if args.local_rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
save_model(epoch, args, model, args.local_rank, type_name="pretrain", global_step=global_step, optimizer=optimizer)
if __name__ == "__main__":
main() | 19,914 | 47.691932 | 140 | py |
UniVL | UniVL-main/main_task_caption.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
from torch.utils.data import (SequentialSampler)
import numpy as np
import random
import os
from collections import OrderedDict
from nlgeval import NLGEval
import time
import argparse
from modules.tokenization import BertTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import UniVL
from modules.optimization import BertAdam
from modules.beam import Beam
from torch.utils.data import DataLoader
from dataloaders.dataloader_youcook_caption import Youcook_Caption_DataLoader
from dataloaders.dataloader_msrvtt_caption import MSRVTT_Caption_DataLoader
from util import get_logger
torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='UniVL on Caption Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/youcookii_singlef_train.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/youcookii_singlef_val.csv', help='')
parser.add_argument('--data_path', type=str, default='data/youcookii_caption_transcript.pickle',
help='caption and transcription pickle file path')
parser.add_argument('--features_path', type=str, default='data/youcookii_videos_feature.pickle',
help='feature path for 2D features')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--min_time', type=float, default=5.0, help='Gather small clips')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str, required=True, help="Bert pre-trained model")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task_type", default="caption", type=str, help="Point the task `caption` to finetune.")
parser.add_argument("--datatype", default="youcook", type=str, help="Point the dataset `youcook` to finetune.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether use MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=2, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=3, help="Layer NO. of decoder.")
parser.add_argument('--stage_two', action='store_true', help="Whether training with decoder.")
args = parser.parse_args()
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
def dataloader_youcook_train(args, tokenizer):
youcook_dataset = Youcook_Caption_DataLoader(
csv=args.train_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(youcook_dataset)
dataloader = DataLoader(
youcook_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(youcook_dataset), train_sampler
def dataloader_youcook_test(args, tokenizer):
youcook_testset = Youcook_Caption_DataLoader(
csv=args.val_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
test_sampler = SequentialSampler(youcook_testset)
dataloader_youcook = DataLoader(
youcook_testset,
sampler=test_sampler,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
pin_memory=False,
)
if args.local_rank == 0:
logger.info('YoucookII validation pairs: {}'.format(len(youcook_testset)))
return dataloader_youcook, len(youcook_testset)
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_Caption_DataLoader(
csv_path=args.train_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
split_type="train",
)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(
msrvtt_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(msrvtt_dataset), train_sampler
def dataloader_msrvtt_test(args, tokenizer, split_type="test",):
msrvtt_testset = MSRVTT_Caption_DataLoader(
csv_path=args.val_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
split_type=split_type,
)
test_sampler = SequentialSampler(msrvtt_testset)
dataloader_msrvtt = DataLoader(
msrvtt_testset,
sampler=test_sampler,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
pin_memory=False,
drop_last=False,
)
return dataloader_msrvtt, len(msrvtt_testset)
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_model(epoch, args, model, type_name=""):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin.{}{}".format("" if type_name=="" else type_name+".", epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, tokenizer, device, n_gpu, optimizer, scheduler,
global_step, nlgEvalObj=None, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
# if n_gpu == 1:
# # multi-gpu does scattering it-self
# batch = tuple(t.to(device) for t in batch)
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index,\
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask,
pairs_masked_text=pairs_masked_text, pairs_token_labels=pairs_token_labels,
masked_video=masked_video, video_labels_index=video_labels_index,
input_caption_ids=pairs_input_caption_ids, decoder_mask=pairs_decoder_mask,
output_caption_ids=pairs_output_caption_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
# ---------------------------------------->
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(input_tuples, inst_idx_to_position_map, active_inst_idx_list, n_bm, device):
assert isinstance(input_tuples, tuple)
sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt = input_tuples
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(device)
active_sequence_output_rpt = collect_active_part(sequence_output_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_visual_output_rpt = collect_active_part(visual_output_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_input_ids_rpt = collect_active_part(input_ids_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_input_mask_rpt = collect_active_part(input_mask_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_video_mask_rpt = collect_active_part(video_mask_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
return (active_sequence_output_rpt, active_visual_output_rpt, active_input_ids_rpt, active_input_mask_rpt, active_video_mask_rpt), \
active_inst_idx_to_position_map
def beam_decode_step(decoder, inst_dec_beams, len_dec_seq,
inst_idx_to_position_map, n_bm, device, input_tuples, decoder_length=None):
assert isinstance(input_tuples, tuple)
''' Decode and update beam status, and then return active beam idx'''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def predict_word(next_decoder_ids, n_active_inst, n_bm, device, input_tuples):
sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt = input_tuples
next_decoder_mask = torch.ones(next_decoder_ids.size(), dtype=torch.uint8).to(device)
dec_output = decoder(sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt,
video_mask_rpt, next_decoder_ids, next_decoder_mask, shaped=True, get_logits=True)
dec_output = dec_output[:, -1, :]
word_prob = torch.nn.functional.log_softmax(dec_output, dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map, decoder_length=None):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
if decoder_length is None:
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
else:
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position], word_length=decoder_length[inst_idx])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
word_prob = predict_word(dec_seq, n_active_inst, n_bm, device, input_tuples)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, inst_idx_to_position_map,
decoder_length=decoder_length)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
# >----------------------------------------
def eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=None, test_set=None):
if hasattr(model, 'module'):
model = model.module.to(device)
if model._stage_one:
return 0.
all_result_lists = []
all_caption_lists = []
model.eval()
for batch in test_dataloader:
batch = tuple(t.to(device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids = batch
with torch.no_grad():
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
# -- Repeat data for beam search
n_bm = 5 # beam_size
device = sequence_output.device
n_inst, len_s, d_h = sequence_output.size()
_, len_v, v_h = visual_output.size()
decoder = model.decoder_caption
# Note: shaped first, then decoder need the parameter shaped=True
input_ids = input_ids.view(-1, input_ids.shape[-1])
input_mask = input_mask.view(-1, input_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
sequence_output_rpt = sequence_output.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
visual_output_rpt = visual_output.repeat(1, n_bm, 1).view(n_inst * n_bm, len_v, v_h)
input_ids_rpt = input_ids.repeat(1, n_bm).view(n_inst * n_bm, len_s)
input_mask_rpt = input_mask.repeat(1, n_bm).view(n_inst * n_bm, len_s)
video_mask_rpt = video_mask.repeat(1, n_bm).view(n_inst * n_bm, len_v)
# -- Prepare beams
inst_dec_beams = [Beam(n_bm, device=device, tokenizer=tokenizer) for _ in range(n_inst)]
# -- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
# -- Decode
for len_dec_seq in range(1, args.max_words + 1):
active_inst_idx_list = beam_decode_step(decoder, inst_dec_beams,
len_dec_seq, inst_idx_to_position_map, n_bm, device,
(sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt))
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
(sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt), \
inst_idx_to_position_map = collate_active_info((sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt),
inst_idx_to_position_map, active_inst_idx_list, n_bm, device)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, 1)
result_list = [batch_hyp[i][0] for i in range(n_inst)]
pairs_output_caption_ids = pairs_output_caption_ids.view(-1, pairs_output_caption_ids.shape[-1])
caption_list = pairs_output_caption_ids.cpu().detach().numpy()
for re_idx, re_list in enumerate(result_list):
decode_text_list = tokenizer.convert_ids_to_tokens(re_list)
if "[SEP]" in decode_text_list:
SEP_index = decode_text_list.index("[SEP]")
decode_text_list = decode_text_list[:SEP_index]
if "[PAD]" in decode_text_list:
PAD_index = decode_text_list.index("[PAD]")
decode_text_list = decode_text_list[:PAD_index]
decode_text = ' '.join(decode_text_list)
decode_text = decode_text.replace(" ##", "").strip("##").strip()
all_result_lists.append(decode_text)
for re_idx, re_list in enumerate(caption_list):
decode_text_list = tokenizer.convert_ids_to_tokens(re_list)
if "[SEP]" in decode_text_list:
SEP_index = decode_text_list.index("[SEP]")
decode_text_list = decode_text_list[:SEP_index]
if "[PAD]" in decode_text_list:
PAD_index = decode_text_list.index("[PAD]")
decode_text_list = decode_text_list[:PAD_index]
decode_text = ' '.join(decode_text_list)
decode_text = decode_text.replace(" ##", "").strip("##").strip()
all_caption_lists.append(decode_text)
# Save full results
if test_set is not None and hasattr(test_set, 'iter2video_pairs_dict'):
hyp_path = os.path.join(args.output_dir, "hyp_complete_results.txt")
with open(hyp_path, "w", encoding='utf-8') as writer:
writer.write("{}\t{}\t{}\n".format("video_id", "start_time", "caption"))
for idx, pre_txt in enumerate(all_result_lists):
video_id, sub_id = test_set.iter2video_pairs_dict[idx]
start_time = test_set.data_dict[video_id]['start'][sub_id]
writer.write("{}\t{}\t{}\n".format(video_id, start_time, pre_txt))
logger.info("File of complete results is saved in {}".format(hyp_path))
# Save pure results
hyp_path = os.path.join(args.output_dir, "hyp.txt")
with open(hyp_path, "w", encoding='utf-8') as writer:
for pre_txt in all_result_lists:
writer.write(pre_txt+"\n")
ref_path = os.path.join(args.output_dir, "ref.txt")
with open(ref_path, "w", encoding='utf-8') as writer:
for ground_txt in all_caption_lists:
writer.write(ground_txt + "\n")
if args.datatype == "msrvtt":
all_caption_lists = []
sentences_dict = test_dataloader.dataset.sentences_dict
video_sentences_dict = test_dataloader.dataset.video_sentences_dict
for idx in range(len(sentences_dict)):
video_id, _ = sentences_dict[idx]
sentences = video_sentences_dict[video_id]
all_caption_lists.append(sentences)
all_caption_lists = [list(itms) for itms in zip(*all_caption_lists)]
else:
all_caption_lists = [all_caption_lists]
# Evaluate
metrics_nlg = nlgEvalObj.compute_metrics(ref_list=all_caption_lists, hyp_list=all_result_lists)
logger.info(">>> BLEU_1: {:.4f}, BLEU_2: {:.4f}, BLEU_3: {:.4f}, BLEU_4: {:.4f}".
format(metrics_nlg["Bleu_1"], metrics_nlg["Bleu_2"], metrics_nlg["Bleu_3"], metrics_nlg["Bleu_4"]))
logger.info(">>> METEOR: {:.4f}, ROUGE_L: {:.4f}, CIDEr: {:.4f}".format(metrics_nlg["METEOR"], metrics_nlg["ROUGE_L"], metrics_nlg["CIDEr"]))
Bleu_4 = metrics_nlg["Bleu_4"]
return Bleu_4
DATALOADER_DICT = {}
DATALOADER_DICT["youcook"] = {"train":dataloader_youcook_train, "val":dataloader_youcook_test}
DATALOADER_DICT["msrvtt"] = {"train":dataloader_msrvtt_train, "val":dataloader_msrvtt_test}
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
model = init_model(args, device, n_gpu, args.local_rank)
assert args.task_type == "caption"
nlgEvalObj = NLGEval(no_overlap=False, no_skipthoughts=True, no_glove=True, metrics_to_omit=None)
assert args.datatype in DATALOADER_DICT
test_dataloader, test_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer)
if args.local_rank == 0:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", args.batch_size_val)
logger.info(" Num steps = %d", len(test_dataloader))
if args.do_train:
train_dataloader, train_length, train_sampler = DATALOADER_DICT[args.datatype]["train"](args, tokenizer)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if args.local_rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.00001
best_output_model_file = None
global_step = 0
for epoch in range(args.epochs):
train_sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, tokenizer, device, n_gpu, optimizer,
scheduler, global_step, nlgEvalObj=nlgEvalObj, local_rank=args.local_rank)
if args.local_rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
output_model_file = save_model(epoch, args, model, type_name="")
if epoch > 0:
Bleu_4 = eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=nlgEvalObj)
if best_score <= Bleu_4:
best_score = Bleu_4
best_output_model_file = output_model_file
logger.info("The best model is: {}, the Bleu_4 is: {:.4f}".format(best_output_model_file, best_score))
else:
logger.warning("Skip the evaluation after {}-th epoch.".format(epoch+1))
if args.local_rank == 0:
model = load_model(-1, args, n_gpu, device, model_file=best_output_model_file)
eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=nlgEvalObj)
elif args.do_eval:
if args.local_rank == 0:
eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=nlgEvalObj)
if __name__ == "__main__":
main() | 33,617 | 47.792453 | 151 | py |
UniVL | UniVL-main/util.py | import torch
import torch.nn as nn
import threading
from torch._utils import ExceptionWrapper
import logging
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert len(modules) == len(inputs)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker, args=(i, module, input))
for i, (module, input) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger | 2,495 | 33.191781 | 99 | py |
UniVL | UniVL-main/metrics.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import numpy as np
def compute_metrics(x):
sx = np.sort(-x, axis=1)
d = np.diag(-x)
d = d[:, np.newaxis]
ind = sx - d
ind = np.where(ind == 0)
ind = ind[1]
metrics = {}
metrics['R1'] = float(np.sum(ind == 0)) / len(ind)
metrics['R5'] = float(np.sum(ind < 5)) / len(ind)
metrics['R10'] = float(np.sum(ind < 10)) / len(ind)
metrics['MR'] = np.median(ind) + 1
return metrics
def print_computed_metrics(metrics):
r1 = metrics['R1']
r5 = metrics['R5']
r10 = metrics['R10']
mr = metrics['MR']
print('R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}'.format(r1, r5, r10, mr))
| 796 | 27.464286 | 92 | py |
UniVL | UniVL-main/modules/module_visual.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'visual_config.json'
WEIGHTS_NAME = 'visual_pytorch_model.bin'
class VisualConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `VisualModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file=4096,
hidden_size=768,
num_hidden_layers=3,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02):
"""Constructs VisualConfig.
Args:
vocab_size_or_config_json_file: Size of the encoder layers and the pooler layer.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class VisualEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(VisualEmbeddings, self).__init__()
self.word_embeddings = nn.Linear(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_embeddings):
seq_length = input_embeddings.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(input_embeddings.size(0), -1)
words_embeddings = self.word_embeddings(input_embeddings)
# words_embeddings = self.transform_act_fn(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class VisualSelfAttention(nn.Module):
def __init__(self, config):
super(VisualSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in VisualModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class VisualSelfOutput(nn.Module):
def __init__(self, config):
super(VisualSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualAttention(nn.Module):
def __init__(self, config):
super(VisualAttention, self).__init__()
self.self = VisualSelfAttention(config)
self.output = VisualSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class VisualIntermediate(nn.Module):
def __init__(self, config):
super(VisualIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class VisualOutput(nn.Module):
def __init__(self, config):
super(VisualOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualLayer(nn.Module):
def __init__(self, config):
super(VisualLayer, self).__init__()
self.attention = VisualAttention(config)
self.intermediate = VisualIntermediate(config)
self.output = VisualOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class VisualEncoder(nn.Module):
def __init__(self, config):
super(VisualEncoder, self).__init__()
layer = VisualLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class VisualPooler(nn.Module):
def __init__(self, config):
super(VisualPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class VisualPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(VisualPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class VisualLMPredictionHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualLMPredictionHead, self).__init__()
self.transform = VisualPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.weight = visual_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(visual_model_embedding_weights.size(1)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(self.weight) + self.bias
return hidden_states
class VisualOnlyMLMHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualOnlyMLMHead, self).__init__()
self.predictions = VisualLMPredictionHead(config, visual_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class VisualOnlyNSPHead(nn.Module):
def __init__(self, config):
super(VisualOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class VisualPreTrainingHeads(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualPreTrainingHeads, self).__init__()
self.predictions = VisualLMPredictionHead(config, visual_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VisualModel(PreTrainedModel):
"""Visual model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a VisualConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for Visual-base, 24 for Visual-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see 's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
config = modeling.VisualConfig(vocab_size_or_config_json_file=4096, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.VisualModel(config=config)
all_encoder_layers, pooled_output = model(video, video_mask)
```
"""
def __init__(self, config):
super(VisualModel, self).__init__(config)
self.embeddings = VisualEmbeddings(config)
self.encoder = VisualEncoder(config)
self.pooler = VisualPooler(config)
self.apply(self.init_weights)
def forward(self, video, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(video.size(0), video.size(1))
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(video)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output | 19,708 | 45.374118 | 139 | py |
UniVL | UniVL-main/modules/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
# next_m.mul_(beta1).add_(1 - beta1, grad) --> pytorch 1.7
next_m.mul_(beta1).add_(grad, alpha=1 - beta1)
# next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) --> pytorch 1.7
next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | 7,260 | 42.220238 | 141 | py |
UniVL | UniVL-main/modules/module_decoder.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
import torch
from torch import nn
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'decoder_config.json'
WEIGHTS_NAME = 'decoder_pytorch_model.bin'
class DecoderConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `DecoderModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_vocab_size=2,
initializer_range=0.02,
max_target_embeddings=128,
num_decoder_layers=1):
"""Constructs DecoderConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `DecoderModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`DecoderModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
max_target_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
num_decoder_layers:
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.max_target_embeddings = max_target_embeddings
self.num_decoder_layers = num_decoder_layers
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(decoder_model_embedding_weights.size(1),
decoder_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = decoder_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(decoder_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, config):
super(MultiHeadAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, q, k, v, attention_mask):
mixed_query_layer = self.query(q)
mixed_key_layer = self.key(k)
mixed_value_layer = self.value(v)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(ACT2FN["gelu"](self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class DecoderAttention(nn.Module):
def __init__(self, config):
super(DecoderAttention, self).__init__()
self.att = MultiHeadAttention(config)
self.output = BertSelfOutput(config)
def forward(self, q, k, v, attention_mask):
att_output, attention_probs = self.att(q, k, v, attention_mask)
attention_output = self.output(att_output, q)
return attention_output, attention_probs
class DecoderLayer(nn.Module):
def __init__(self, config):
super(DecoderLayer, self).__init__()
self.slf_attn = DecoderAttention(config)
self.enc_attn = DecoderAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None):
slf_output, _ = self.slf_attn(dec_input, dec_input, dec_input, slf_attn_mask)
dec_output, dec_att_scores = self.enc_attn(slf_output, enc_output, enc_output, dec_enc_attn_mask)
intermediate_output = self.intermediate(dec_output)
dec_output = self.output(intermediate_output, dec_output)
return dec_output, dec_att_scores
class DecoderEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, decoder_word_embeddings_weight, decoder_position_embeddings_weight):
super(DecoderEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_target_embeddings, config.hidden_size)
self.word_embeddings.weight = decoder_word_embeddings_weight
self.position_embeddings.weight = decoder_position_embeddings_weight
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class Decoder(nn.Module):
def __init__(self, config):
super(Decoder, self).__init__()
layer = DecoderLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_decoder_layers)])
def forward(self, hidden_states, encoder_outs, self_attn_mask, attention_mask, output_all_encoded_layers=False):
dec_att_scores = None
all_encoder_layers = []
all_dec_att_probs = []
for layer_module in self.layer:
hidden_states, dec_att_scores = layer_module(hidden_states, encoder_outs, self_attn_mask, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
all_dec_att_probs.append(dec_att_scores)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
all_dec_att_probs.append(dec_att_scores)
return all_encoder_layers, all_dec_att_probs
class DecoderClassifier(nn.Module):
def __init__(self, config, embedding_weights):
super(DecoderClassifier, self).__init__()
self.cls = BertOnlyMLMHead(config, embedding_weights)
def forward(self, hidden_states):
cls_scores = self.cls(hidden_states)
return cls_scores
class DecoderModel(PreTrainedModel):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
final_norm (bool, optional): apply layer norm to the output of the
final decoder layer (default: True).
"""
def __init__(self, config, decoder_word_embeddings_weight, decoder_position_embeddings_weight):
super(DecoderModel, self).__init__(config)
self.config = config
self.max_target_length = config.max_target_embeddings
self.embeddings = DecoderEmbeddings(config, decoder_word_embeddings_weight, decoder_position_embeddings_weight)
self.decoder = Decoder(config)
self.classifier = DecoderClassifier(config, decoder_word_embeddings_weight)
self.apply(self.init_weights)
def forward(self, input_ids, encoder_outs=None, answer_mask=None, encoder_mask=None):
"""
Args:
input_ids (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing
encoder_outs (Tensor, optional): output from the encoder, used for encoder-side attention
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len, vocab)`
- the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)`
"""
embedding_output = self.embeddings(input_ids)
extended_encoder_mask = encoder_mask.unsqueeze(1).unsqueeze(2) # b x 1 x 1 x ls
extended_encoder_mask = extended_encoder_mask.to(dtype=self.dtype) # fp16 compatibility
extended_encoder_mask = (1.0 - extended_encoder_mask) * -10000.0
extended_answer_mask = answer_mask.unsqueeze(1).unsqueeze(2)
extended_answer_mask = extended_answer_mask.to(dtype=self.dtype) # fp16 compatibility
sz_b, len_s, _ = embedding_output.size()
subsequent_mask = torch.triu(torch.ones((len_s, len_s), device=embedding_output.device, dtype=embedding_output.dtype), diagonal=1)
self_attn_mask = subsequent_mask.unsqueeze(0).expand(sz_b, -1, -1).unsqueeze(1) # b x 1 x ls x ls
slf_attn_mask = ((1.0 - extended_answer_mask) + self_attn_mask).gt(0).to(dtype=self.dtype)
self_attn_mask = slf_attn_mask * -10000.0
decoded_layers, dec_att_scores = self.decoder(embedding_output,
encoder_outs,
self_attn_mask,
extended_encoder_mask,
)
sequence_output = decoded_layers[-1]
cls_scores = self.classifier(sequence_output)
return cls_scores
| 18,283 | 43.923833 | 138 | py |
UniVL | UniVL-main/modules/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import sys
import logging
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'base-uncased': 512,
'large-uncased': 512,
'base-cased': 512,
'large-cased': 512,
'base-multilingual-uncased': 512,
'base-multilingual-cased': 512,
'base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, never_split=("[UNK]", "[SEP]", "[MASK]", "[CLS]")):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
if token not in self.vocab:
ids.append(self.vocab["[UNK]"])
logger.error("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
vocab_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if os.path.exists(vocab_file) is False:
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
print(vocab_file)
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found. "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
kwargs['never_split'] = ("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
def add_tokens(self, new_tokens, model):
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str)
to_add_tokens.append(token)
# logger.info("Adding %s to the vocabulary", token)
vocab = collections.OrderedDict()
for token in self.vocab.keys():
vocab[token] = self.vocab[token]
for token in to_add_tokens:
vocab[token] = len(vocab)
self.vocab = self.wordpiece_tokenizer.vocab = vocab
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
model.resize_token_embeddings(new_num_tokens=len(vocab))
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 16,424 | 39.158924 | 219 | py |
UniVL | UniVL-main/modules/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from modules.until_module import PreTrainedModel, LayerNorm, CrossEn, MILNCELoss, MaxMarginRankingLoss
from modules.module_bert import BertModel, BertConfig, BertOnlyMLMHead
from modules.module_visual import VisualModel, VisualConfig, VisualOnlyMLMHead
from modules.module_cross import CrossModel, CrossConfig
from modules.module_decoder import DecoderModel, DecoderConfig
logger = logging.getLogger(__name__)
class UniVLPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, bert_config, visual_config, cross_config, decoder_config, *inputs, **kwargs):
# utilize bert config as base config
super(UniVLPreTrainedModel, self).__init__(bert_config)
self.bert_config = bert_config
self.visual_config = visual_config
self.cross_config = cross_config
self.decoder_config = decoder_config
self.bert = None
self.visual = None
self.cross = None
self.decoder = None
@classmethod
def from_pretrained(cls, pretrained_bert_name, visual_model_name, cross_model_name, decoder_model_name,
state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
bert_config, state_dict = BertConfig.get_config(pretrained_bert_name, cache_dir, type_vocab_size, state_dict, task_config=task_config)
visual_config, _ = VisualConfig.get_config(visual_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
decoder_config, _ = DecoderConfig.get_config(decoder_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(bert_config, visual_config, cross_config, decoder_config, *inputs, **kwargs)
assert model.bert is not None
assert model.visual is not None
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
class NormalizeVideo(nn.Module):
def __init__(self, task_config):
super(NormalizeVideo, self).__init__()
self.visual_norm2d = LayerNorm(task_config.video_dim)
def forward(self, video):
video = torch.as_tensor(video).float()
video = video.view(-1, video.shape[-2], video.shape[-1])
video = self.visual_norm2d(video)
return video
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class UniVL(UniVLPreTrainedModel):
def __init__(self, bert_config, visual_config, cross_config, decoder_config, task_config):
super(UniVL, self).__init__(bert_config, visual_config, cross_config, decoder_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words <= bert_config.max_position_embeddings
assert self.task_config.max_words <= decoder_config.max_target_embeddings
assert self.task_config.max_frames <= visual_config.max_position_embeddings
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
if check_attr('stage_two', self.task_config):
self._stage_one = False
self._stage_two = self.task_config.stage_two
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.train_sim_after_cross = False
if self._stage_one and check_attr('train_sim_after_cross', self.task_config):
self.train_sim_after_cross = True
show_log(task_config, "Test retrieval after cross encoder.")
# Text Encoder ===>
bert_config = update_attr("bert_config", bert_config, "num_hidden_layers",
self.task_config, "text_num_hidden_layers")
self.bert = BertModel(bert_config)
bert_word_embeddings_weight = self.bert.embeddings.word_embeddings.weight
bert_position_embeddings_weight = self.bert.embeddings.position_embeddings.weight
# <=== End of Text Encoder
# Video Encoder ===>
visual_config = update_attr("visual_config", visual_config, "num_hidden_layers",
self.task_config, "visual_num_hidden_layers")
self.visual = VisualModel(visual_config)
visual_word_embeddings_weight = self.visual.embeddings.word_embeddings.weight
# <=== End of Video Encoder
if self._stage_one is False or self.train_sim_after_cross:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers",
self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
if self.train_sim_after_cross is False:
# Decoder ===>
decoder_config = update_attr("decoder_config", decoder_config, "num_decoder_layers",
self.task_config, "decoder_num_hidden_layers")
self.decoder = DecoderModel(decoder_config, bert_word_embeddings_weight, bert_position_embeddings_weight)
# <=== End of Decoder
if self.task_config.do_pretrain:
self.cls = BertOnlyMLMHead(bert_config, bert_word_embeddings_weight)
self.cls_visual = VisualOnlyMLMHead(visual_config, visual_word_embeddings_weight)
self.alm_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.similarity_dense = nn.Linear(bert_config.hidden_size, 1)
self.decoder_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.normalize_video = NormalizeVideo(task_config)
mILNCELoss = MILNCELoss(batch_size=task_config.batch_size // task_config.n_gpu, n_pair=task_config.n_pair, )
maxMarginRankingLoss = MaxMarginRankingLoss(margin=task_config.margin,
negative_weighting=task_config.negative_weighting,
batch_size=task_config.batch_size // task_config.n_gpu,
n_pair=task_config.n_pair,
hard_negative_rate=task_config.hard_negative_rate, )
if task_config.use_mil:
self.loss_fct = CrossEn() if self._stage_two else mILNCELoss
self._pretrain_sim_loss_fct = mILNCELoss
else:
self.loss_fct = CrossEn() if self._stage_two else maxMarginRankingLoss
self._pretrain_sim_loss_fct = maxMarginRankingLoss
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None,
pairs_masked_text=None, pairs_token_labels=None, masked_video=None, video_labels_index=None,
input_caption_ids=None, decoder_mask=None, output_caption_ids=None):
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = self.normalize_video(video)
if input_caption_ids is not None:
input_caption_ids = input_caption_ids.view(-1, input_caption_ids.shape[-1])
decoder_mask = decoder_mask.view(-1, decoder_mask.shape[-1])
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True)
if self.training:
loss = 0.
if self._stage_one:
sim_matrix = self.get_similarity_logits(sequence_output, visual_output, attention_mask,
video_mask, shaped=True)
sim_loss = self.loss_fct(sim_matrix)
loss += sim_loss
if self._stage_two:
if self.task_config.do_pretrain:
pairs_masked_text = pairs_masked_text.view(-1, pairs_masked_text.shape[-1])
pairs_token_labels = pairs_token_labels.view(-1, pairs_token_labels.shape[-1])
masked_video = self.normalize_video(masked_video)
video_labels_index = video_labels_index.view(-1, video_labels_index.shape[-1])
sequence_output_alm, visual_output_alm = self.get_sequence_visual_output(pairs_masked_text, token_type_ids,
attention_mask, masked_video, video_mask, shaped=True)
cross_output, pooled_output, concat_mask = self._get_cross_output(sequence_output_alm, visual_output_alm, attention_mask, video_mask)
sequence_cross_output, visual_cross_output = torch.split(cross_output, [attention_mask.size(-1), video_mask.size(-1)], dim=1)
alm_loss = self._calculate_mlm_loss(sequence_cross_output, pairs_token_labels)
loss += alm_loss
nce_loss = self._calculate_mfm_loss(visual_cross_output, video, video_mask, video_labels_index)
loss += nce_loss
sim_matrix = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, _pretrain_joint=True)
sim_loss_joint = self._pretrain_sim_loss_fct(sim_matrix)
loss += sim_loss_joint
if (input_caption_ids is not None) and \
(self.task_config.do_pretrain
or (self.task_config.do_pretrain is False and self.task_config.task_type == "caption")):
if self.task_config.do_pretrain:
decoder_scores, res_tuples = self._get_decoder_score(sequence_output_alm, visual_output_alm,
input_ids, attention_mask, video_mask,
input_caption_ids, decoder_mask, shaped=True)
elif self.task_config.task_type == "caption":
decoder_scores, res_tuples = self._get_decoder_score(sequence_output, visual_output,
input_ids, attention_mask, video_mask,
input_caption_ids, decoder_mask, shaped=True)
else:
raise NotImplementedError
output_caption_ids = output_caption_ids.view(-1, output_caption_ids.shape[-1])
decoder_loss = self.decoder_loss_fct(decoder_scores.view(-1, self.bert_config.vocab_size), output_caption_ids.view(-1))
loss += decoder_loss
if self.task_config.do_pretrain or self.task_config.task_type == "retrieval":
if self.task_config.do_pretrain:
sim_matrix_text_visual = self.get_similarity_logits(sequence_output_alm, visual_output_alm,
attention_mask, video_mask, shaped=True)
elif self.task_config.task_type == "retrieval":
sim_matrix_text_visual = self.get_similarity_logits(sequence_output, visual_output,
attention_mask, video_mask, shaped=True)
else:
raise NotImplementedError
sim_loss_text_visual = self.loss_fct(sim_matrix_text_visual)
loss += sim_loss_text_visual
return loss
else:
return None
def _calculate_mlm_loss(self, sequence_output_alm, pairs_token_labels):
alm_scores = self.cls(sequence_output_alm)
alm_loss = self.alm_loss_fct(alm_scores.view(-1, self.bert_config.vocab_size), pairs_token_labels.view(-1))
return alm_loss
def _calculate_mfm_loss(self, visual_output_alm, video, video_mask, video_labels_index):
afm_scores = self.cls_visual(visual_output_alm)
afm_scores_tr = afm_scores.view(-1, afm_scores.shape[-1])
video_tr = video.permute(2, 0, 1)
video_tr = video_tr.view(video_tr.shape[0], -1)
logits_matrix = torch.mm(afm_scores_tr, video_tr)
video_mask_float = video_mask.to(dtype=torch.float)
mask_matrix = torch.mm(video_mask_float.view(-1, 1), video_mask_float.view(1, -1))
masked_logits = logits_matrix + (1. - mask_matrix) * -1e8
logpt = F.log_softmax(masked_logits, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
video_labels_index_mask = (video_labels_index != self.ignore_video_index)
nce_loss = nce_loss.masked_select(video_labels_index_mask.view(-1))
nce_loss = nce_loss.mean()
return nce_loss
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = self.normalize_video(video)
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=True)
sequence_output = encoded_layers[-1]
visual_layers, _ = self.visual(video, video_mask, output_all_encoded_layers=True)
visual_output = visual_layers[-1]
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return text_out, video_out
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = 5
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, _pretrain_joint=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
if (self._stage_two and _pretrain_joint is False) or self.train_sim_after_cross:
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask)
else:
text_out, video_out = self._mean_pooling_for_similarity(sequence_output, visual_output, attention_mask, video_mask)
if self.task_config.use_mil is False:
text_out = F.normalize(text_out, dim=-1)
video_out = F.normalize(video_out, dim=-1)
retrieve_logits = torch.matmul(text_out, video_out.t())
return retrieve_logits
def _get_decoder_score(self, sequence_output, visual_output, input_ids, attention_mask, video_mask, input_caption_ids, decoder_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
input_caption_ids = input_caption_ids.view(-1, input_caption_ids.shape[-1])
decoder_mask = decoder_mask.view(-1, decoder_mask.shape[-1])
res_tuples = ()
cross_output, pooled_output, concat_mask = self._get_cross_output(sequence_output, visual_output, attention_mask, video_mask)
decoder_scores = self.decoder(input_caption_ids, encoder_outs=cross_output, answer_mask=decoder_mask, encoder_mask=concat_mask)
return decoder_scores, res_tuples
def decoder_caption(self, sequence_output, visual_output, input_ids, attention_mask, video_mask, input_caption_ids, decoder_mask,
shaped=False, get_logits=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
input_caption_ids = input_caption_ids.view(-1, input_caption_ids.shape[-1])
decoder_mask = decoder_mask.view(-1, decoder_mask.shape[-1])
decoder_scores, _ = self._get_decoder_score(sequence_output, visual_output,
input_ids, attention_mask, video_mask,
input_caption_ids, decoder_mask, shaped=True)
if get_logits:
return decoder_scores
_, decoder_scores_result = torch.max(decoder_scores, -1)
return decoder_scores_result | 22,558 | 51.707944 | 153 | py |
UniVL | UniVL-main/modules/until_module.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import math
from modules.until_config import PretrainedConfig
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if prefix is not None:
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append(prefix + key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
if prefix is None and (task_config is None or task_config.local_rank == 0):
logger.info("-" * 20)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
if len(error_msgs) > 0:
logger.error("Weights from pretrained model cause errors in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
return model
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
"""
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
return model
model = cls.init_preweight(model, state_dict)
return model
##################################
###### LOSS FUNCTION #############
##################################
class CrossEn(nn.Module):
def __init__(self,):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1,):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float(".".join(torch.__version__.split(".")[:2]))
self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = sim_matrix + mm_mask * -1e12
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1)
logpt = F.log_softmax(new_sim_matrix, dim=-1)
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1)
masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12
new_logpt = -torch.logsumexp(masked_logpt, dim=-1)
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2)
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
class MaxMarginRankingLoss(nn.Module):
def __init__(self,
margin=1.0,
negative_weighting=False,
batch_size=1,
n_pair=1,
hard_negative_rate=0.5,
):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \
F.relu(self.margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask.to(max_margin.device)
return max_margin.mean()
| 10,299 | 39.873016 | 114 | py |
UniVL | UniVL-main/modules/beam.py | """
Manage beam search info structure.
Heavily borrowed from OpenNMT-py.
For code in OpenNMT-py, please check the following link (maybe in oldest version):
https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Beam.py
"""
import torch
class Constants():
def __init__(self):
self.PAD = 0
self.UNK = 1
self.BOS = 2
self.EOS = 3
self.PAD_WORD = '[PAD]'
self.UNK_WORD = '[UNK]'
self.BOS_WORD = '[CLS]'
self.EOS_WORD = '[SEP]'
@classmethod
def from_tokenizer(cls, tokenizer):
instance = cls()
instance.PAD = tokenizer.vocab[instance.PAD_WORD]
instance.UNK = tokenizer.vocab[instance.UNK_WORD]
instance.BOS = tokenizer.vocab[instance.BOS_WORD]
instance.EOS = tokenizer.vocab[instance.EOS_WORD]
return instance
class Beam():
''' Beam search '''
def __init__(self, size, device=False, tokenizer=None):
if tokenizer is None:
self.constants = Constants()
else:
self.constants = Constants.from_tokenizer(tokenizer)
self.size = size
self._done = False
# The score for each interface on the beam.
self.scores = torch.zeros((size,), dtype=torch.float, device=device)
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [torch.full((size,), self.constants.BOS, dtype=torch.long, device=device)]
def get_current_state(self):
"Get the outputs for the current timestep."
return self.get_tentative_hypothesis()
def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prev_ks[-1]
@property
def done(self):
return self._done
def advance(self, word_prob, word_length=None):
"Update beam status and check if finished or not."
num_words = word_prob.size(1)
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)
else:
beam_lk = word_prob[0]
flat_beam_lk = beam_lk.view(-1)
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort
self.all_scores.append(self.scores)
self.scores = best_scores
# bestScoresId is flattened as a (beam x word) array,
# so we need to calculate which word and beam each score came from
prev_k = best_scores_id // num_words
self.prev_ks.append(prev_k)
self.next_ys.append(best_scores_id - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.next_ys[-1][0].item() == self.constants.EOS:
self._done = True
return self._done
def sort_scores(self):
"Sort the scores."
return torch.sort(self.scores, 0, True)
def get_the_best_score_and_idx(self):
"Get the score of the best in the beam."
scores, ids = self.sort_scores()
return scores[1], ids[1]
def get_tentative_hypothesis(self):
"Get the decoded sequence for the current timestep."
if len(self.next_ys) == 1:
dec_seq = self.next_ys[0].unsqueeze(1)
else:
_, keys = self.sort_scores()
hyps = [self.get_hypothesis(k) for k in keys]
hyps = [[self.constants.BOS] + h for h in hyps]
dec_seq = torch.LongTensor(hyps)
return dec_seq
def get_hypothesis(self, k):
""" Walk back to construct the full hypothesis. """
hyp = []
for j in range(len(self.prev_ks) - 1, -1, -1):
hyp.append(self.next_ys[j+1][k])
k = self.prev_ks[j][k]
return list(map(lambda x: x.item(), hyp[::-1]))
| 3,840 | 31.82906 | 97 | py |
UniVL | UniVL-main/modules/module_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
class BertConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `BertModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertModel(PreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output | 21,157 | 46.333333 | 139 | py |
UniVL | UniVL-main/modules/module_cross.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'cross_config.json'
WEIGHTS_NAME = 'cross_pytorch_model.bin'
class CrossConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `CrossModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs CrossConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CrossModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`CrossModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class CrossEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(CrossEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, concat_embeddings, concat_type=None):
batch_size, seq_length = concat_embeddings.size(0), concat_embeddings.size(1)
if concat_type is None:
concat_type = torch.zeros(batch_size, concat_type).to(concat_embeddings.device)
position_ids = torch.arange(seq_length, dtype=torch.long, device=concat_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(concat_embeddings.size(0), -1)
token_type_embeddings = self.token_type_embeddings(concat_type)
position_embeddings = self.position_embeddings(position_ids)
embeddings = concat_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class CrossSelfAttention(nn.Module):
def __init__(self, config):
super(CrossSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in CrossModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class CrossSelfOutput(nn.Module):
def __init__(self, config):
super(CrossSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CrossAttention(nn.Module):
def __init__(self, config):
super(CrossAttention, self).__init__()
self.self = CrossSelfAttention(config)
self.output = CrossSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class CrossIntermediate(nn.Module):
def __init__(self, config):
super(CrossIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class CrossOutput(nn.Module):
def __init__(self, config):
super(CrossOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CrossLayer(nn.Module):
def __init__(self, config):
super(CrossLayer, self).__init__()
self.attention = CrossAttention(config)
self.intermediate = CrossIntermediate(config)
self.output = CrossOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class CrossEncoder(nn.Module):
def __init__(self, config):
super(CrossEncoder, self).__init__()
layer = CrossLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class CrossPooler(nn.Module):
def __init__(self, config):
super(CrossPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class CrossPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(CrossPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class CrossLMPredictionHead(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossLMPredictionHead, self).__init__()
self.transform = CrossPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(cross_model_embedding_weights.size(1),
cross_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = cross_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(cross_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class CrossOnlyMLMHead(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossOnlyMLMHead, self).__init__()
self.predictions = CrossLMPredictionHead(config, cross_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class CrossOnlyNSPHead(nn.Module):
def __init__(self, config):
super(CrossOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class CrossPreTrainingHeads(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossPreTrainingHeads, self).__init__()
self.predictions = CrossLMPredictionHead(config, cross_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class CrossModel(PreTrainedModel):
def __init__(self, config):
super(CrossModel, self).__init__(config)
self.embeddings = CrossEmbeddings(config)
self.encoder = CrossEncoder(config)
self.pooler = CrossPooler(config)
self.apply(self.init_weights)
def forward(self, concat_input, concat_type=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(concat_input.size(0), concat_input.size(1))
if concat_type is None:
concat_type = torch.zeros_like(attention_mask)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(concat_input, concat_type)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
| 17,516 | 43.346835 | 108 | py |
UniVL | UniVL-main/modules/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,021 | 32.425 | 98 | py |
UniVL | UniVL-main/modules/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.