text stringlengths 1 93.6k |
|---|
wrap_class, wrap_params = parse_wrapper_class(wrapper_class_str)
|
env = wrap_class(env, *wrap_params)
|
# Load the model based on the algorithm type
|
if CONFIG.algorithm == "CLIP-SAC":
|
model = CLIPRewardedSAC.load(model_ckpt, env=env, config=CONFIG, device=args["device"], load_clip=False)
|
model.inference_only = True
|
elif CONFIG.algorithm == "CLIP-PPO":
|
model = CLIPRewardedPPO.load(model_ckpt, env=env, config=CONFIG, device=args["device"], load_clip=False)
|
model.inference_only = True
|
else:
|
model = AlgorithmRL.load(model_ckpt, env=env, device=args["device"])
|
print("Model loaded successfully...")
|
run_eval(env, model, model_ckpt, record_video=args["no_record_video"], eval_suffix=eval_suffix)
|
env.close()
|
# <FILESEP>
|
import numpy as np
|
import tensorflow as tf
|
import os, sys
|
from utils import process
|
from utils.faiss_rerank import compute_jaccard_distance
|
from tensorflow.python.layers.core import Dense
|
from sklearn.preprocessing import label_binarize
|
from sklearn.cluster import DBSCAN
|
import torch
|
import torch.nn.functional as F
|
import collections
|
from sklearn.metrics import average_precision_score
|
from sklearn import metrics as mr
|
from sklearn.metrics.cluster import adjusted_mutual_info_score as AMI_score
|
import gc
|
from functools import partial
|
from collections import Counter
|
dataset = ''
|
probe = ''
|
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
config = tf.ConfigProto()
|
config.gpu_options.allow_growth = True
|
nb_nodes = 20
|
ft_size = 3 # originial node feature dimension (D)
|
time_step = 6 # sequence length (F)
|
# training params
|
batch_size = 256
|
nb_epochs = 100000
|
patience = 250 # patience for early stopping
|
k1, k2 = 20, 6 # parameters to compute feature distance matrix
|
tf.app.flags.DEFINE_string('save_model', '1', "") # save best model
|
tf.app.flags.DEFINE_string('batch_size', '256', "")
|
tf.app.flags.DEFINE_string('model_size', '0', "") # output model size and computational complexity
|
tf.app.flags.DEFINE_string('dataset', 'KS20', "Dataset: IAS, KS20, BIWI, CASIA-B or KGBD")
|
tf.app.flags.DEFINE_string('probe', 'probe', "for testing probe")
|
tf.app.flags.DEFINE_string('length', '6', "4, 6, 8 or 10")
|
tf.app.flags.DEFINE_string('H', '256', "") # embedding size (h) for skeleton representations
|
tf.app.flags.DEFINE_string('M', '8', "") # number (M) of meta-transformation heads
|
tf.app.flags.DEFINE_string('eps', '', "distance parameter in DBSCAN")
|
tf.app.flags.DEFINE_string('min_samples', '', "minimum sample number in DBSCAN")
|
tf.app.flags.DEFINE_string('gpu', '0', "GPU number")
|
tf.app.flags.DEFINE_string('probe_type', '', "probe.gallery") # probe and gallery setting for CASIA-B
|
tf.app.flags.DEFINE_string('patience', '50', "epochs for early stopping")
|
tf.app.flags.DEFINE_string('mode', 'Train', "Training (Train) or Evaluation (Eval)")
|
tf.app.flags.DEFINE_string('lr', '0.00035', "learning rate")
|
tf.app.flags.DEFINE_string('k1', '20', "")
|
tf.app.flags.DEFINE_string('k2', '6', "")
|
tf.app.flags.DEFINE_string('focus', '1', "")
|
FLAGS = tf.app.flags.FLAGS
|
k1, k2 = int(FLAGS.k1), int(FLAGS.k2)
|
# check parameters
|
if FLAGS.dataset not in ['IAS', 'KGBD', 'KS20', 'BIWI', 'CASIA_B']:
|
raise Exception('Dataset must be IAS, KGBD, KS20, BIWI or CASIA B.')
|
if FLAGS.dataset == 'CASIA_B':
|
FLAGS.length = '40'
|
if FLAGS.length not in ['40', '50', '60']:
|
raise Exception('Length number must be 40, 50 or 60')
|
else:
|
if FLAGS.length not in ['4', '6', '8', '10']:
|
raise Exception('Length number must be 4, 6, 8 or 10')
|
if FLAGS.probe not in ['probe', 'Walking', 'Still', 'A', 'B']:
|
raise Exception('Dataset probe must be "A" (for IAS-A), "B" (for IAS-B), "probe" (for KS20, KGBD).')
|
if FLAGS.mode not in ['Train', 'Eval']:
|
raise Exception('Mode must be Train or Eval.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.