Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
fancyimpute
fancyimpute-master/fancyimpute/scaler.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np class Scaler(object): """ Iterative estimation of row and column centering/scaling using the algorithm from page 31 of: Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares """ def __init__( self, center_columns=True, scale_columns=True, min_value=None, max_value=None, verbose=True): self.center_columns = center_columns self.scale_columns = scale_columns self.min_value = min_value self.max_value = max_value self.verbose = verbose self.column_centers = None self.column_scales = None def fit(self, X): if self.center_columns: self.column_centers = np.nanmean(X, axis=0) if self.scale_columns: self.column_scales = np.nanstd(X, axis=0) self.column_scales[self.column_scales == 0] = 1.0 return self def transform(self, X): X = np.asarray(X).copy() if self.center_columns: X -= self.column_centers if self.scale_columns: X /= self.column_scales return X def fit_transform(self, X): self.fit(X) return self.transform(X) def inverse_transform(self, X): X = np.asarray(X).copy() if self.scale_columns: X *= self.column_scales if self.center_columns: X += self.column_centers return X class BiScaler(object): """ Iterative estimation of row and column centering/scaling using the algorithm from page 31 of: Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares """ def __init__( self, center_rows=True, center_columns=True, scale_rows=True, scale_columns=True, min_value=None, max_value=None, max_iters=100, tolerance=0.001, verbose=True): self.center_rows = center_rows self.center_columns = center_columns self.scale_rows = scale_rows self.scale_columns = scale_columns self.min_value = min_value self.max_value = max_value self.max_iters = max_iters self.tolerance = tolerance self.verbose = verbose def estimate_row_means( self, X, observed, column_means, column_scales): """ row_center[i] = sum{j in observed[i, :]}{ (1 / column_scale[j]) * (X[i, j] - column_center[j]) } ------------------------------------------------------------ sum{j in observed[i, :]}{1 / column_scale[j]} """ n_rows, n_cols = X.shape column_means = np.asarray(column_means) if len(column_means) != n_cols: raise ValueError("Expected length %d but got shape %s" % ( n_cols, column_means.shape)) X = X - column_means.reshape((1, n_cols)) column_weights = 1.0 / column_scales X *= column_weights.reshape((1, n_cols)) row_means = np.zeros(n_rows, dtype=X.dtype) row_residual_sums = np.nansum(X, axis=1) for i in range(n_rows): row_mask = observed[i, :] sum_weights = column_weights[row_mask].sum() row_means[i] = row_residual_sums[i] / sum_weights return row_means def estimate_column_means( self, X, observed, row_means, row_scales): """ column_center[j] = sum{i in observed[:, j]}{ (1 / row_scale[i]) * (X[i, j]) - row_center[i]) } ------------------------------------------------------------ sum{i in observed[:, j]}{1 / row_scale[i]} """ n_rows, n_cols = X.shape row_means = np.asarray(row_means) if len(row_means) != n_rows: raise ValueError("Expected length %d but got shape %s" % ( n_rows, row_means.shape)) column_means = np.zeros(n_cols, dtype=X.dtype) X = X - row_means.reshape((n_rows, 1)) row_weights = 1.0 / row_scales X *= row_weights.reshape((n_rows, 1)) col_residual_sums = np.nansum(X, axis=0) for j in range(n_cols): col_mask = observed[:, j] sum_weights = row_weights[col_mask].sum() column_means[j] = col_residual_sums[j] / sum_weights return column_means def center(self, X, row_means, column_means, inplace=False): n_rows, n_cols = X.shape row_means = np.asarray(row_means) column_means = np.asarray(column_means) if len(row_means) != n_rows: raise ValueError("Expected length %d but got shape %s" % ( n_rows, row_means.shape)) if len(column_means) != n_cols: raise ValueError("Expected length %d but got shape %s" % ( n_cols, column_means.shape)) if not inplace: X = X.copy() X -= row_means.reshape((n_rows, 1)) X -= column_means.reshape((1, n_cols)) return X def rescale(self, X, row_scales, column_scales, inplace=False): if not inplace: X = X.copy() n_rows, n_cols = X.shape X /= row_scales.reshape((n_rows, 1)) X /= column_scales.reshape((1, n_cols)) return X def estimate_row_scales( self, X_centered, column_scales): """ row_scale[i]**2 = mean{j in observed[i, :]}{ (X[i, j] - row_center[i] - column_center[j]) ** 2 -------------------------------------------------- column_scale[j] ** 2 } """ n_rows, n_cols = X_centered.shape column_scales = np.asarray(column_scales) if len(column_scales) != n_cols: raise ValueError("Expected length %d but got shape %s" % ( n_cols, column_scales)) row_variances = np.nanmean( X_centered ** 2 / (column_scales ** 2).reshape((1, n_cols)), axis=1) row_variances[row_variances == 0] = 1.0 assert len(row_variances) == n_rows, "%d != %d" % ( len(row_variances), n_rows) return np.sqrt(row_variances) def estimate_column_scales( self, X_centered, row_scales): """ column_scale[j] ** 2 = mean{i in observed[:, j]}{ (X[i, j] - row_center[i] - column_center[j]) ** 2 ------------------------------------------------- row_scale[i] ** 2 } """ n_rows, n_cols = X_centered.shape row_scales = np.asarray(row_scales) if len(row_scales) != n_rows: raise ValueError("Expected length %s, got shape %s" % ( n_rows, row_scales.shape,)) column_variances = np.nanmean( X_centered ** 2 / (row_scales ** 2).reshape((n_rows, 1)), axis=0) column_variances[column_variances == 0] = 1.0 assert len(column_variances) == n_cols, "%d != %d" % ( len(column_variances), n_cols) return np.sqrt(column_variances) def residual(self, X_normalized): total = 0 if self.center_rows: row_means = np.nanmean(X_normalized, axis=1) total += (row_means ** 2).sum() if self.center_columns: column_means = np.nanmean(X_normalized, axis=0) total += (column_means ** 2).sum() if self.scale_rows: row_variances = np.nanvar(X_normalized, axis=1) row_variances[row_variances == 0] = 1.0 total += (np.log(row_variances) ** 2).sum() if self.scale_columns: column_variances = np.nanvar(X_normalized, axis=0) column_variances[column_variances == 0] = 1.0 total += (np.log(column_variances) ** 2).sum() return total def clamp(self, X, inplace=False): if not inplace: X = X.copy() if self.min_value is not None: X[X < self.min_value] = self.min_value if self.max_value is not None: X[X > self.max_value] = self.max_value return X def fit(self, X): X = self.clamp(X) n_rows, n_cols = X.shape dtype = X.dtype # To avoid inefficient memory access we keep around two copies # of the array, one contiguous in the rows and the other # contiguous in the columns X_row_major = np.asarray(X, order="C") X_column_major = np.asarray(X, order="F") observed_row_major = ~np.isnan(X_row_major) n_observed_per_row = observed_row_major.sum(axis=1) n_empty_rows = (n_observed_per_row == 0).sum() if n_empty_rows > 0: raise ValueError("%d rows have no observed values" % n_empty_rows) observed_column_major = np.asarray(observed_row_major, order="F") n_observed_per_column = observed_column_major.sum(axis=0) n_empty_columns = (n_observed_per_column == 0).sum() if n_empty_columns > 0: raise ValueError("%d columns have no observed values" % ( n_empty_columns,)) # initialize by assuming that rows are zero-mean/unit variance and # with a direct estimate of mean and standard deviation # of each column row_means = np.zeros(n_rows, dtype=dtype) row_scales = np.ones(n_rows, dtype=dtype) if self.center_columns: column_means = np.nanmean(X, axis=0) else: column_means = np.zeros(n_cols, dtype=dtype) if self.scale_columns: column_scales = np.nanstd(X, axis=0) column_scales[column_scales == 0] = 1.0 else: column_scales = np.ones(n_cols, dtype=dtype) last_residual = self.residual(X) if self.verbose: print("[BiScaler] Initial log residual value = %f" % ( np.log(last_residual),)) for i in range(self.max_iters): if last_residual == 0: # already have a perfect fit, so let's get out of here print("[BiScaler] No room for improvement") break assert len(column_means) == n_cols, \ "Wrong number of column means, expected %d but got %d" % ( n_cols, len(column_means)) assert len(column_scales) == n_cols, \ "Wrong number of column scales, expected %d but got %d" % ( n_cols, len(column_scales)) assert len(row_means) == n_rows, \ "Wrong number of row means, expected %d but got %d" % ( n_rows, len(row_means)) assert len(row_scales) == n_rows, \ "Wrong number of row scales, expected %d but got %d" % ( n_rows, len(row_scales)) if self.center_rows: row_means = self.estimate_row_means( X=X_row_major, observed=observed_row_major, column_means=column_means, column_scales=column_scales) if self.center_columns: column_means = self.estimate_column_means( X=X_column_major, observed=observed_column_major, row_means=row_means, row_scales=row_scales) X_centered = self.center( X, row_means, column_means) if self.scale_rows: row_scales = self.estimate_row_scales( X_centered=X_centered, column_scales=column_scales) if self.scale_columns: column_scales = self.estimate_column_scales( X_centered=X_centered, row_scales=row_scales) X_normalized = self.rescale(X_centered, row_scales, column_scales) residual = self.residual(X_normalized) change_in_residual = last_residual - residual if self.verbose: print("[BiScaler] Iter %d: log residual = %f, log improvement ratio=%f" % ( i + 1, np.log(residual), np.log(last_residual / residual))) if change_in_residual / last_residual < self.tolerance: break last_residual = residual self.row_means = row_means self.row_scales = row_scales self.column_means = column_means self.column_scales = column_scales def transform(self, X): X = np.asarray(X).copy() X = self.center(X, self.row_means, self.column_means, inplace=True) X = self.rescale(X, self.row_scales, self.column_scales, inplace=True) return X def inverse_transform(self, X, inplace=False): X = np.asarray(X) if not inplace: X = X.copy() X = self.rescale( X, 1.0 / self.row_scales, 1.0 / self.column_scales, inplace=True) X = self.center(X, -self.row_means, -self.column_means, inplace=True) return self.clamp(X) def fit_transform(self, X): self.fit(X) return self.transform(X)
14,126
34.3175
91
py
fancyimpute
fancyimpute-master/fancyimpute/similarity_weighted_averaging.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from sklearn.utils import check_array import numpy as np from .dictionary_helpers import ( collect_nested_keys, reverse_lookup_from_nested_dict, matrix_to_nested_dictionary, transpose_nested_dictionary, ) class SimilarityWeightedAveraging(object): """ Fill in missing each missing row/column value by averaging across the k-nearest neighbors columns (taking into account missing data when computing column similarities and choosing which neighbors to inspect). Currently does not inherit from Solver since it expects sparse inputs in the form of nested dictionaries. """ def __init__( self, min_weight_for_similarity=0.1, min_count_for_similarity=2, similarity_exponent=4.0, shrinkage_value=0.0001, orientation="rows", verbose=False, ): """ Parameters ---------- min_weight_for_similarity : float If sum of values in shared rows between two columns falls below this threhold then similarity can't be computed between those columns. min_count_for_similarity : int If number of overlapping rows between two columns falls below this threhold then similarity can't be computed between those columns. similarity_exponent : float Exponent for turning similarities into weights on values of other columns. shrinkage_value : float Shrinks reconstructed values toward 0 orientation : str Whether to compute similarities along rows or columns verbose : bool """ self.min_weight_for_similarity = min_weight_for_similarity self.min_count_for_similarity = min_count_for_similarity self.similarity_exponent = similarity_exponent self.shrinkage_value = shrinkage_value self.orientation = orientation self.verbose = verbose def jacard_similarity_from_nested_dicts(self, nested_dictionaries): """ Compute the continuous Jacard similarity between all pairs of keys in dictionary-of-dictionaries given as an input. Returns three element tuple: - similarity dictionary: (key, key) -> float - overlap count dictionary: key -> key -> int - weight dictionary: key -> key -> float """ sims = {} overlaps = {} weights = {} for a, column_dict_a in nested_dictionaries.items(): row_set_a = set(column_dict_a.keys()) for b, column_dict_b in nested_dictionaries.items(): row_set_b = set(column_dict_b.keys()) common_rows = row_set_a.intersection(row_set_b) n_overlap = len(common_rows) overlaps[(a, b)] = n_overlap total = 0.0 weight = 0.0 for row_name in common_rows: value_a = column_dict_a[row_name] value_b = column_dict_b[row_name] minval = min(value_a, value_b) maxval = max(value_a, value_b) total += minval weight += maxval weights[(a, b)] = weight if weight < self.min_weight_for_similarity: continue if n_overlap < self.min_count_for_similarity: continue sims[(a, b)] = total / weight return sims, overlaps, weights def complete_dict(self, values_dict): """ Keys of nested dictionaries can be arbitrary objects. """ if self.orientation != "rows": values_dict = transpose_nested_dictionary(values_dict) row_keys, column_keys = collect_nested_keys(values_dict) if self.verbose: print("[SimilarityWeightedAveraging] # rows = %d" % (len(row_keys))) print("[SimilarityWeightedAveraging] # columns = %d" % (len(column_keys))) similarities, overlaps, weights = self.jacard_similarity_from_nested_dicts(values_dict) if self.verbose: print("[SimilarityWeightedAveraging] Computed %d similarities between rows" % (len(similarities),)) column_to_row_values = reverse_lookup_from_nested_dict(values_dict) result = defaultdict(dict) exponent = self.similarity_exponent shrinkage_value = self.shrinkage_value for i, row_key in enumerate(row_keys): for column_key, value_triplets in column_to_row_values.items(): total = 0 denom = shrinkage_value for (other_row_key, y) in value_triplets: sample_weight = 1.0 sim = similarities.get((row_key, other_row_key), 0) combined_weight = sim ** exponent combined_weight *= sample_weight total += combined_weight * y denom += combined_weight if denom > shrinkage_value: result[row_key][column_key] = total / denom if self.orientation != "rows": result = transpose_nested_dictionary(result) return result def fit_transform(self, X): X = check_array(X, force_all_finite=False) if self.verbose: print(("[SimilarityWeightedAveraging] Creating dictionary from matrix " " with shape %s") % (X.shape,)) missing_mask = np.isnan(X) observed_mask = ~missing_mask sparse_dict = matrix_to_nested_dictionary(X, filter_fn=np.isfinite) completed_dict = self.complete_dict(sparse_dict) array_result = np.zeros_like(X) for row_idx, row_dict in completed_dict.items(): for col_idx, value in row_dict.items(): array_result[row_idx, col_idx] = value array_result[observed_mask] = X[observed_mask] return array_result
6,541
38.409639
115
py
fancyimpute
fancyimpute-master/fancyimpute/simple_fill.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .solver import Solver class SimpleFill(Solver): def __init__(self, fill_method="mean", min_value=None, max_value=None): """ Possible values for fill_method: "zero": fill missing entries with zeros "mean": fill with column means "median" : fill with column medians "min": fill with min value per column "random": fill with gaussian noise according to mean/std of column """ Solver.__init__( self, fill_method=fill_method, min_value=None, max_value=None) def solve(self, X, missing_mask): """ Since X is given to us already filled, just return it. """ return X
1,292
33.945946
78
py
fancyimpute
fancyimpute-master/fancyimpute/soft_impute.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from sklearn.utils.extmath import randomized_svd from sklearn.utils import check_array from .common import masked_mae from .solver import Solver F32PREC = np.finfo(np.float32).eps class SoftImpute(Solver): """ Implementation of the SoftImpute algorithm from: "Spectral Regularization Algorithms for Learning Large Incomplete Matrices" by Mazumder, Hastie, and Tibshirani. """ def __init__( self, shrinkage_value=None, convergence_threshold=0.001, max_iters=100, max_rank=None, n_power_iterations=1, init_fill_method="zero", min_value=None, max_value=None, normalizer=None, verbose=True): """ Parameters ---------- shrinkage_value : float Value by which we shrink singular values on each iteration. If omitted then the default value will be the maximum singular value of the initialized matrix (zeros for missing values) divided by 50. convergence_threshold : float Minimum ration difference between iterations (as a fraction of the Frobenius norm of the current solution) before stopping. max_iters : int Maximum number of SVD iterations max_rank : int, optional Perform a truncated SVD on each iteration with this value as its rank. n_power_iterations : int Number of power iterations to perform with randomized SVD init_fill_method : str How to initialize missing values of data matrix, default is to fill them with zeros. min_value : float Smallest allowable value in the solution max_value : float Largest allowable value in the solution normalizer : object Any object (such as BiScaler) with fit() and transform() methods verbose : bool Print debugging info """ Solver.__init__( self, fill_method=init_fill_method, min_value=min_value, max_value=max_value, normalizer=normalizer) self.shrinkage_value = shrinkage_value self.convergence_threshold = convergence_threshold self.max_iters = max_iters self.max_rank = max_rank self.n_power_iterations = n_power_iterations self.verbose = verbose def _converged(self, X_old, X_new, missing_mask): # check for convergence old_missing_values = X_old[missing_mask] new_missing_values = X_new[missing_mask] difference = old_missing_values - new_missing_values ssd = np.sum(difference ** 2) old_norm = np.sqrt((old_missing_values ** 2).sum()) # edge cases if old_norm == 0 or (old_norm < F32PREC and np.sqrt(ssd) > F32PREC): return False else: return (np.sqrt(ssd) / old_norm) < self.convergence_threshold def _svd_step(self, X, shrinkage_value, max_rank=None): """ Returns reconstructed X from low-rank thresholded SVD and the rank achieved. """ if max_rank: # if we have a max rank then perform the faster randomized SVD (U, s, V) = randomized_svd( X, max_rank, n_iter=self.n_power_iterations, random_state=None) else: # perform a full rank SVD using ARPACK (U, s, V) = np.linalg.svd( X, full_matrices=False, compute_uv=True) s_thresh = np.maximum(s - shrinkage_value, 0) rank = (s_thresh > 0).sum() s_thresh = s_thresh[:rank] U_thresh = U[:, :rank] V_thresh = V[:rank, :] S_thresh = np.diag(s_thresh) X_reconstruction = np.dot(U_thresh, np.dot(S_thresh, V_thresh)) return X_reconstruction, rank def _max_singular_value(self, X_filled): # quick decomposition of X_filled into rank-1 SVD _, s, _ = randomized_svd( X_filled, 1, n_iter=5, random_state=None) return s[0] def solve(self, X, missing_mask): X = check_array(X, force_all_finite=False) X_init = X.copy() X_filled = X observed_mask = ~missing_mask max_singular_value = self._max_singular_value(X_filled) if self.verbose: print("[SoftImpute] Max Singular Value of X_init = %f" % ( max_singular_value)) if self.shrinkage_value: shrinkage_value = self.shrinkage_value else: # totally hackish heuristic: keep only components # with at least 1/50th the max singular value shrinkage_value = max_singular_value / 50.0 for i in range(self.max_iters): X_reconstruction, rank = self._svd_step( X_filled, shrinkage_value, max_rank=self.max_rank) X_reconstruction = self.clip(X_reconstruction) # print error on observed data if self.verbose: mae = masked_mae( X_true=X_init, X_pred=X_reconstruction, mask=observed_mask) print( "[SoftImpute] Iter %d: observed MAE=%0.6f rank=%d" % ( i + 1, mae, rank)) converged = self._converged( X_old=X_filled, X_new=X_reconstruction, missing_mask=missing_mask) X_filled[missing_mask] = X_reconstruction[missing_mask] if converged: break if self.verbose: print("[SoftImpute] Stopped after iteration %d for lambda=%f" % ( i + 1, shrinkage_value)) return X_filled
6,595
33.176166
79
py
fancyimpute
fancyimpute-master/fancyimpute/solver.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import numpy as np from sklearn.utils import check_array from .common import generate_random_column_samples class Solver(object): def __init__( self, fill_method="zero", min_value=None, max_value=None, normalizer=None): self.fill_method = fill_method self.min_value = min_value self.max_value = max_value self.normalizer = normalizer def __repr__(self): return str(self) def __str__(self): field_list = [] for (k, v) in sorted(self.__dict__.items()): if v is None or isinstance(v, (float, int)): field_list.append("%s=%s" % (k, v)) elif isinstance(v, str): field_list.append("%s='%s'" % (k, v)) return "%s(%s)" % ( self.__class__.__name__, ", ".join(field_list)) def _check_input(self, X): if len(X.shape) != 2: raise ValueError("Expected 2d matrix, got %s array" % (X.shape,)) def _check_missing_value_mask(self, missing): if not missing.any(): warnings.simplefilter("always") warnings.warn("Input matrix is not missing any values") if missing.all(): raise ValueError("Input matrix must have some non-missing values") def _fill_columns_with_fn(self, X, missing_mask, col_fn): for col_idx in range(X.shape[1]): missing_col = missing_mask[:, col_idx] n_missing = missing_col.sum() if n_missing == 0: continue col_data = X[:, col_idx] fill_values = col_fn(col_data) if np.all(np.isnan(fill_values)): fill_values = 0 X[missing_col, col_idx] = fill_values def fill( self, X, missing_mask, fill_method=None, inplace=False): """ Parameters ---------- X : np.array Data array containing NaN entries missing_mask : np.array Boolean array indicating where NaN entries are fill_method : str "zero": fill missing entries with zeros "mean": fill with column means "median" : fill with column medians "min": fill with min value per column "random": fill with gaussian samples according to mean/std of column inplace : bool Modify matrix or fill a copy """ X = check_array(X, force_all_finite=False) if not inplace: X = X.copy() if not fill_method: fill_method = self.fill_method if fill_method not in ("zero", "mean", "median", "min", "random"): raise ValueError("Invalid fill method: '%s'" % (fill_method)) elif fill_method == "zero": # replace NaN's with 0 X[missing_mask] = 0 elif fill_method == "mean": self._fill_columns_with_fn(X, missing_mask, np.nanmean) elif fill_method == "median": self._fill_columns_with_fn(X, missing_mask, np.nanmedian) elif fill_method == "min": self._fill_columns_with_fn(X, missing_mask, np.nanmin) elif fill_method == "random": self._fill_columns_with_fn( X, missing_mask, col_fn=generate_random_column_samples) return X def prepare_input_data(self, X): """ Check to make sure that the input matrix and its mask of missing values are valid. Returns X and missing mask. """ X = check_array(X, force_all_finite=False) if X.dtype != "f" and X.dtype != "d": X = X.astype(float) self._check_input(X) missing_mask = np.isnan(X) self._check_missing_value_mask(missing_mask) return X, missing_mask def clip(self, X): """ Clip values to fall within any global or column-wise min/max constraints """ X = np.asarray(X) if self.min_value is not None: X[X < self.min_value] = self.min_value if self.max_value is not None: X[X > self.max_value] = self.max_value return X def project_result(self, X): """ First undo normalization and then clip to the user-specified min/max range. """ X = np.asarray(X) if self.normalizer is not None: X = self.normalizer.inverse_transform(X) return self.clip(X) def solve(self, X, missing_mask): """ Given an initialized matrix X and a mask of where its missing values had been, return a completion of X. """ raise ValueError("%s.solve not yet implemented!" % ( self.__class__.__name__,)) def fit_transform(self, X, y=None): """ Fit the imputer and then transform input `X` Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer in sklearn) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ X_original, missing_mask = self.prepare_input_data(X) observed_mask = ~missing_mask X = X_original.copy() if self.normalizer is not None: X = self.normalizer.fit_transform(X) X_filled = self.fill(X, missing_mask, inplace=True) if not isinstance(X_filled, np.ndarray): raise TypeError( "Expected %s.fill() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_filled))) X_result = self.solve(X_filled, missing_mask) if not isinstance(X_result, np.ndarray): raise TypeError( "Expected %s.solve() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_result))) X_result = self.project_result(X=X_result) X_result[observed_mask] = X_original[observed_mask] return X_result def fit(self, X, y=None): """ Fit the imputer on input `X`. Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer in sklearn) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ raise ValueError( "%s.fit not implemented! This imputation algorithm likely " "doesn't support inductive mode. Only fit_transform is " "supported at this time." % ( self.__class__.__name__,)) def transform(self, X, y=None): """ Transform input `X`. Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer in sklearn) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ raise ValueError( "%s.transform not implemented! This imputation algorithm likely " "doesn't support inductive mode. Only %s.fit_transform is " "supported at this time." % ( self.__class__.__name__, self.__class__.__name__))
7,938
34.128319
80
py
fancyimpute
fancyimpute-master/test/common.py
import numpy as np def reconstruction_error(XY, XY_completed, missing_mask, name=None): """ Returns mean squared error and mean absolute error for completed matrices. """ value_pairs = [ (i, j, XY[i, j], XY_completed[i, j]) for i in range(XY.shape[0]) for j in range(XY.shape[1]) if missing_mask[i, j] ] print("First 10 reconstructed values:") for (i, j, x, xr) in value_pairs[:10]: print(" (%d,%d) %0.4f ~= %0.4f" % (i, j, x, xr)) diffs = [actual - predicted for (_, _, actual, predicted) in value_pairs] missing_mse = np.mean([diff ** 2 for diff in diffs]) missing_mae = np.mean([np.abs(diff) for diff in diffs]) print("%sMSE: %0.4f, MAE: %0.4f" % ( "" if not name else name + " ", missing_mse, missing_mae)) return missing_mse, missing_mae
863
32.230769
77
py
fancyimpute
fancyimpute-master/test/low_rank_data.py
import numpy as np def create_rank_k_dataset( n_rows=5, n_cols=5, k=3, fraction_missing=0.1, symmetric=False, random_seed=0): np.random.seed(random_seed) x = np.random.randn(n_rows, k) y = np.random.randn(k, n_cols) XY = np.dot(x, y) if symmetric: assert n_rows == n_cols XY = 0.5 * XY + 0.5 * XY.T missing_raw_values = np.random.uniform(0, 1, (n_rows, n_cols)) missing_mask = missing_raw_values < fraction_missing XY_incomplete = XY.copy() # fill missing entries with NaN XY_incomplete[missing_mask] = np.nan return XY, XY_incomplete, missing_mask # create some default data to be shared across tests XY, XY_incomplete, missing_mask = create_rank_k_dataset( n_rows=500, n_cols=10, k=3, fraction_missing=0.25)
843
21.810811
66
py
fancyimpute
fancyimpute-master/test/test_dictionary_helpers.py
import numpy as np from fancyimpute.dictionary_helpers import ( dense_matrix_from_pair_dictionary, dense_matrix_from_nested_dictionary, reverse_lookup_from_nested_dict, transpose_nested_dictionary, ) from nose.tools import eq_ def test_dense_matrix_from_nested_dictionary(): d = { "a": {"b": 10}, "b": {"c": 20} } X, rows, columns = dense_matrix_from_nested_dictionary(d) eq_(rows, ["a", "b"]) eq_(columns, ["b", "c"]) eq_(X[0, 0], 10) assert np.isnan(X[0, 1]) assert np.isnan(X[1, 0]) eq_(X[1, 1], 20) def test_dense_matrix_from_nested_dictionary_square(): d = { "a": {"b": 10}, "b": {"c": 20} } X, rows, columns = dense_matrix_from_nested_dictionary(d, square_result=True) eq_(rows, ["a", "b", "c"]) eq_(columns, ["a", "b", "c"]) assert np.isnan(X[0, 0]) eq_(X[0, 1], 10) assert np.isnan(X[0, 2]) assert np.isnan(X[1, 0]) assert np.isnan(X[1, 1]) eq_(X[1, 2], 20) assert np.isnan(X[2, 0]) assert np.isnan(X[2, 1]) assert np.isnan(X[2, 2]) def test_dense_matrix_from_pair_dictionary(): d = { ("a", "b"): 10, ("b", "c"): 20 } X, rows, columns = dense_matrix_from_pair_dictionary(d) eq_(rows, ["a", "b"]) eq_(columns, ["b", "c"]) eq_(X[0, 0], 10) assert np.isnan(X[0, 1]) assert np.isnan(X[1, 0]) eq_(X[1, 1], 20) def test_dense_matrix_from_pair_dictionary_square(): d = { ("a", "b"): 10, ("b", "c"): 20 } X, rows, columns = dense_matrix_from_pair_dictionary(d, square_result=True) eq_(rows, ["a", "b", "c"]) eq_(columns, ["a", "b", "c"]) assert np.isnan(X[0, 0]) eq_(X[0, 1], 10) assert np.isnan(X[0, 2]) assert np.isnan(X[1, 0]) assert np.isnan(X[1, 1]) eq_(X[1, 2], 20) assert np.isnan(X[2, 0]) assert np.isnan(X[2, 1]) assert np.isnan(X[2, 2]) def test_reverse_lookup_from_nested_dict(): d = { "a": {"b": 10, "c": 20}, "b": {"c": 5}, "z": {"c": 100} } reverse_dict = reverse_lookup_from_nested_dict(d) len(reverse_dict.keys()) == 2 assert "c" in reverse_dict eq_(set(reverse_dict["c"]), {("a", 20), ("b", 5), ("z", 100)}) assert "b" in reverse_dict eq_(reverse_dict["b"], [("a", 10)]) def test_transpose_nested_dictionary(): d = {"a": {"b": 20, "c": 50}, "c": {"q": 500}} transposed = transpose_nested_dictionary(d) eq_(set(transposed.keys()), {"b", "c", "q"}) eq_(transposed["q"], {"c": 500}) eq_(transposed["c"], {"a": 50}) eq_(transposed["b"], {"a": 20})
2,618
25.454545
81
py
fancyimpute
fancyimpute-master/test/test_iterative_svd.py
from low_rank_data import XY, XY_incomplete, missing_mask from common import reconstruction_error from fancyimpute import IterativeSVD def test_iterative_svd_with_low_rank_random_matrix(): solver = IterativeSVD(rank=3) XY_completed = solver.fit_transform(XY_incomplete) _, missing_mae = reconstruction_error( XY, XY_completed, missing_mask, name="IterativeSVD") assert missing_mae < 0.1, "Error too high!" if __name__ == "__main__": test_iterative_svd_with_low_rank_random_matrix()
537
28.888889
57
py
fancyimpute
fancyimpute-master/test/test_knn.py
import numpy as np from nose.tools import eq_ from fancyimpute.knn import KNN from low_rank_data import XY, XY_incomplete, missing_mask def test_knn(): # get a baseline error from just zero-filling the missing entries sad_zero_fill = np.sum(np.abs(XY[missing_mask])) mad_zero_fill = sad_zero_fill / missing_mask.sum() print("MAD zero-fill = ", mad_zero_fill) for k in [5, 15, 30]: print("-- k=", k) XY_completed = KNN(k).fit_transform(XY_incomplete) mask = np.isfinite(XY_completed) eq_((~mask).sum(), 0) diff = (XY_completed - XY)[missing_mask] sad = np.sum(np.abs(diff)) print("Sum absolute differences", sad) mad = sad / missing_mask.sum() print("Mean absolute difference", mad) # knnImpute should be at least twice as good as just zero fill assert mad <= (mad_zero_fill / 2.0), \ "Expected knnImpute to be 2x better than zeroFill (%f) but got MAD=%f" % ( mad_zero_fill, mad)
1,035
34.724138
86
py
fancyimpute
fancyimpute-master/test/test_matrix_factorization.py
from fancyimpute import MatrixFactorization from low_rank_data import XY, XY_incomplete, missing_mask from common import reconstruction_error def test_matrix_factorization_with_low_rank_random_matrix(): solver = MatrixFactorization(learning_rate=0.02, rank=5) XY_completed = solver.fit_transform(XY_incomplete) _, missing_mae = reconstruction_error(XY, XY_completed, missing_mask, name="MatrixFactorization") assert missing_mae < 0.1, "Error too high!" if __name__ == "__main__": test_matrix_factorization_with_low_rank_random_matrix()
561
34.125
101
py
fancyimpute
fancyimpute-master/test/test_nuclear_norm_minimization.py
from fancyimpute import NuclearNormMinimization import numpy as np from low_rank_data import XY, XY_incomplete, missing_mask from common import reconstruction_error def create_rank1_data(symmetric=False): """ Returns 5x5 rank1 matrix with missing element at index (1, 2) """ x = np.array([1, 2, 3, 4, 5], dtype=float) y = np.array([0.1, -0.1, 0.2, -0.2, 0.02]) XY = np.outer(x, y) XY_missing = XY.copy() # drop one entry XY_missing[1, 2] = np.nan if not symmetric: return XY, XY_missing # make a symmetric matrix XYXY = XY.T.dot(XY) # drop one entry XYXY_missing = XYXY.copy() XYXY_missing[1, 2] = np.nan return XYXY, XYXY_missing def test_rank1_convex_solver(): XY_rank1, XY_missing_rank1 = create_rank1_data(symmetric=False) solver = NuclearNormMinimization(max_iters=50000) XY_completed_rank1 = solver.fit_transform(XY_missing_rank1) assert abs(XY_completed_rank1[1, 2] - XY_rank1[1, 2]) < 0.01, \ "Expected %0.4f but got %0.4f" % ( XY_rank1[1, 2], XY_completed_rank1[1, 2]) def test_rank1_symmetric_convex_solver(): XYXY_rank1, XYXY_missing_rank1 = create_rank1_data(symmetric=True) solver = NuclearNormMinimization(require_symmetric_solution=True) completed = solver.fit_transform(XYXY_missing_rank1) assert abs(completed[1, 2] - XYXY_rank1[1, 2]) < 0.01, \ "Expected %0.4f but got %0.4f" % ( XYXY_rank1[1, 2], completed[1, 2]) def test_nuclear_norm_minimization_with_low_rank_random_matrix(): solver = NuclearNormMinimization(max_iters=2000) XY_completed = solver.fit_transform(XY_incomplete[:100]) _, missing_mae = reconstruction_error( XY[:100], XY_completed, missing_mask[:100], name="NuclearNorm") assert missing_mae < 0.1, "Error too high!" if __name__ == "__main__": test_rank1_convex_solver() test_rank1_symmetric_convex_solver() test_nuclear_norm_minimization_with_low_rank_random_matrix()
1,997
32.3
71
py
fancyimpute
fancyimpute-master/test/test_similarity_weighted_averaging.py
import numpy as np from nose.tools import eq_ from fancyimpute import SimilarityWeightedAveraging def test_similarity_weighted_column_averaging(): X = np.array([ [0.1, 0.9, 0.2], [0.8, 0.1, 0.01], [0.95, 0.2, 0.3], [0.14, 0.85, 0.3], ]) X_incomplete = X.copy() X_incomplete[1, 1] = np.nan X_incomplete[3, 0] = np.nan missing_mask = np.isnan(X_incomplete) solver = SimilarityWeightedAveraging() X_filled = solver.fit_transform(X_incomplete) eq_(X_incomplete.shape, X_filled.shape) diff = (X - X_filled)[missing_mask] abs_diff = np.abs(diff) mae = np.mean(abs_diff) print("MAE", mae) assert mae < 0.1, "Difference between imputed values! MAE=%0.4f" % mae if __name__ == "__main__": test_similarity_weighted_column_averaging()
821
26.4
74
py
fancyimpute
fancyimpute-master/test/test_soft_impute.py
from low_rank_data import XY, XY_incomplete, missing_mask from common import reconstruction_error from fancyimpute import SoftImpute def test_soft_impute_with_low_rank_random_matrix(): solver = SoftImpute() XY_completed = solver.fit_transform(XY_incomplete) _, missing_mae = reconstruction_error( XY, XY_completed, missing_mask, name="SoftImpute") assert missing_mae < 0.1, "Error too high!" if __name__ == "__main__": test_soft_impute_with_low_rank_random_matrix()
521
28
57
py
fancyimpute
fancyimpute-master/test/test_solver.py
from fancyimpute import Solver, SimpleFill from low_rank_data import XY, XY_incomplete, missing_mask from common import reconstruction_error import numpy as np import warnings def test_prepare_input_data(): _solver = Solver() print(_solver) # for improved coverage # test that a complete matrix returns a warning X1 = np.zeros((5, 5)) with warnings.catch_warnings(record=True) as w: _solver.prepare_input_data(X1) assert str(w[0].message) == "Input matrix is not missing any values", "Warning is not generated for a complete matrix" # test that an incomplete matrix does not return a warning X2 = np.zeros((5, 5)) X2[2, 3] = None with warnings.catch_warnings(record=True) as w: _solver.prepare_input_data(X2) assert len(w) == 0, "Warning is generated for an incomplete matrix" def test_solver_fill_methods_with_low_rank_random_matrix(): for fill_method in ("zero", "mean", "median", "min", "random"): imputer = SimpleFill(fill_method=fill_method) XY_completed = imputer.fit_transform(XY_incomplete) _, missing_mae = reconstruction_error( XY, XY_completed, missing_mask, name="Solver with fill_method=%s" %fill_method) assert missing_mae < 5, "Error too high for Solver with %s fill method!" %fill_method if __name__ == "__main__": test_prepare_input_data() test_solver_fill_methods_with_low_rank_random_matrix()
1,479
36
126
py
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/README.md
# TMC13 ## Building ### OSX - mkdir build - cd build - cmake .. -G Xcode - open the generated xcode project and build it ### Linux - mkdir build - cd build - cmake .. - make ### Windows - md build - cd build - cmake .. -G "Visual Studio 15 2017 Win64" - open the generated visual studio solution and build it ## Running This TMC13 codec implementation encodes frame sequences. A single binary contains the encoder and decoder implementation, with selection using the `--mode` option. Documentation of options is provided via the `--help` command line option. ### Runtime configuration and configuration files All command line parameters may be specified in a configuration file. A set of configuration file templates compliant with the current Common Test Conditions is provided in the cfg/ directory. ### Example To generate the configuration files, run the gen-cfg.sh script: ```console mpeg-pcc-tmc13/cfg$ ../scripts/gen-cfg.sh --all ``` An example script (`scripts/Makefile.tmc13-step`) demonstrates how to launch the encoder, decoder and metric software for a single input frame. The VERBOSE=1 make variable shows the detailed command execution sequence. Further documentation of the parameters are contained within the script. The following example encodes and decodes frame 0100 of the sequence `Ford_01_q_1mm`, making use of the configuration file `cfg/lossy-geom-no-attrs/ford_01_q1mm/r01/encoder.cfg` and storing the intermediate results in the output directory `experiment/lossy-geom-no-attrs/ford_01_q1mm/r01/`. ```console mpeg-pcc-tmc13$ make -f $PWD/scripts/Makefile.tmc13-step \ -C experiment/lossy-geom-no-attrs/ford_01_q1mm/r01/ \ VPATH=$PWD/cfg/octree-predlift/lossy-geom-no-attrs/ford_01_q1mm/r01/ \ ENCODER=$PWD/build/tmc3/tmc3 \ DECODER=$PWD/build/tmc3/tmc3 \ PCERROR=/path/to/pc_error \ SRCSEQ=/path/to/Ford_01_q_1mm/Ford_01_vox1mm-0100.ply \ NORMSEQ=/path/to/Ford_01_q_1mm/Ford_01_vox1mm-0100.ply [encode] Ford_01_vox1mm-0100.ply.bin <- /path/to/Ford_01_q_1mm/Ford_01_vox1mm-0100.ply [md5sum] Ford_01_vox1mm-0100.ply.bin.md5 [md5sum] Ford_01_vox1mm-0100.ply.bin.ply.md5 [decode] Ford_01_vox1mm-0100.ply.bin.decoded.ply <- Ford_01_vox1mm-0100.ply.bin [md5sum] Ford_01_vox1mm-0100.ply.bin.decoded.ply.md5 [metric] Ford_01_vox1mm-0100.ply.bin.decoded.ply.pc_error <- Ford_01_vox1mm-0100.ply.bin.decoded.ply ```
2,399
31.432432
103
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/README.tools.md
ply-merge: A tool to merge/split ply frames =========================================== The ply-merge tool combines point clouds from multiple ply files into a single output with an extra per-attribute frameindex property that identifies which input frame each point belongs to. The tool is also able to reverse the process and split a merged point cloud into individual frames. Merge operation --------------- From a sequence of input ply files, the merge mode repeatedly takes `groupSize` frames, merges their contents and produces a single output file with an additional frameindex property. Split operation --------------- From a sequence of input ply files, and for each value of the frameindex property, the split mode extracts all points with the same frameindex. Options ------- ### `--help` Print a list of available command line (and configuration file) options along with their default values and exit. ### `--config=FILE`, `-c` Specifies a configuration file to be immediately loaded. ### `--mode=MODE` Selects the mode of operation. | Value | Description | |:-----:| ------------------------------------------ | | merge | Combines multiple input files into outputs | | split | Splits input files into multiple outputs | ### `--outputBinaryPly=0|1` Sets the output format of PLY files (Binary=1, ASCII=0). Reading and writing binary PLY files is more efficient than the ASCII variant, but are less suited to simple scripts and direct human inspection. If outputting non-integer point co-ordinates (eg, due to the output geometry scaling), the precision of the binary and ASCII versions are not identical. ### `--srcPath=FILESPEC`, `--outPath=FILESPEC` Specify the input and output ply file names. The tool respectively replaces any instance of a '%d' printf format directive with the current input/output frame numbers. ### `--firstFrameNum=INT-VALUE`, `--firstOutputFrameNum=INT-VALUE` The initial frame number of the respective input/output sequences. ### `--frameCount=INT-VALUE` The number of input frames to process ### `--groupSize=INT-VALUE` (Merge mode only) The number of input frames to merge into each output frame. Example usage ------------- ### Merging ```console $ build/tmc3/ply-merge --mode=merge \ --srcPath=path/to/Ford_01_q_1mm/Ford_01_vox1mm-%.04d.ply \ --outPath=ford-01-vox1mm-merge8f-%.04d.ply \ --firstFrameNum=100 \ --frameCount=32 MPEG PCC ply merge/split tool from Test Model C13 help : 0 config : ... mode : merge srcPath : "path/to/Ford_01_q_1mm/Ford_01_vox1mm-%.04d.ply" outPath : "ford-01-vox1mm-merge8f-%.04d.ply" outputBinaryPly : 0 firstFrameNum : 100 firstOutputFrameNum : 0 frameCount : 32 groupSize : 8 ford-01-vox1mm-merge8f-0000.ply ford-01-vox1mm-merge8f-0001.ply ford-01-vox1mm-merge8f-0002.ply ford-01-vox1mm-merge8f-0003.ply ``` ### Splitting ```console $ build/tmc3/ply-merge --mode=split \ --srcPath=ford-01-vox1mm-merge8f-%.04d.ply \ --outPath=split-Ford_01_vox1mm-%.04d.ply \ --firstOutputFrameNum=100 \ --frameCount=4 ``` help : 0 config : ... mode : split srcPath : "ford-01-vox1mm-merge8f-%.04d.ply" outPath : "split-Ford_01_vox1mm-%.04d.ply" outputBinaryPly : 0 firstFrameNum : 0 firstOutputFrameNum : 100 frameCount : 32 groupSize : 8 split-Ford_01_vox1mm-0100.ply split-Ford_01_vox1mm-0101.ply split-Ford_01_vox1mm-0102.ply ... split-Ford_01_vox1mm-0131.ply ```
3,704
28.64
74
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/README.md
# How to generate the per-data point config files Run the `../scripts/gen-cfg.sh` from within the cfg directory: ``` $ ../scripts/gen-cfg.sh [--octree|--trisoup] [--raht|--pred-lift] ```
191
20.333333
66
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/cfg-predgeom.yaml
--- # the following flags are common to all predgeom configurations # NB: these are applied after the category config encflags: # Some sequences input order is not characteristic of a real # system. This will make it so. - sortInputByAzimuth: '$eval{ ${needs_azimuth_presort} || 0 }' # use predictive geometry, default to azimuthal sort - geomTreeType: 1 - predGeomSort: 2 - predGeomAzimuthSortPrecision: 8 # configuration for angular predictive geometry - - !conditional '${numLasers}' - predGeomAzimuthSortPrecision: 1024 - - !conditional '"${group}" eq "cat3-fused"' - predGeomSort: 4 - predGeomAzimuthSortPrecision: 0.05 # the following are for specific configurations categories: lossless-geom-lossless-attrs: &predgeomlossless encflags: # configuration for angular predictive geometry - - !conditional '${numLasers}' - positionAzimuthScaleLog2: 12 - positionAzimuthSpeed: '$eval{ (${pos_azimuth_speed}) >> 12 - 12 }' - attrSphericalMaxLog2: 17 # these conditions use the same config as earlier lossless-geom-lossy-attrs: *predgeomlossless lossless-geom-nearlossless-attrs: *predgeomlossless lossy-geom-lossy-attrs: encflags: # configuration for angular predictive geometry - - !conditional '${numLasers}' - positionQuantisationEnabled: 1 - positionBaseQp: 58 - positionRadiusInvScaleLog2: r06: 1 r05: 2 r04: 4 r03: 5 r02: 7 r01: 8 - positionAzimuthScaleLog2: r06: 12 r05: 11 r04: 9 r03: 8 r02: 7 r01: 7 - positionAzimuthSpeed: r06: '$eval{ (${pos_azimuth_speed}) + 0 >> 12 - 12 }' r05: '$eval{ (${pos_azimuth_speed}) + 1 >> 12 - 11 }' r04: '$eval{ (${pos_azimuth_speed}) + 4 >> 12 - 9 }' r03: '$eval{ (${pos_azimuth_speed}) + 8 >> 12 - 8 }' r02: '$eval{ (${pos_azimuth_speed}) + 16 >> 12 - 7 }' r01: '$eval{ (${pos_azimuth_speed}) + 16 >> 12 - 7 }' - attrSphericalMaxLog2: r06: 17 r05: 16 r04: 14 r03: 13 r02: 12 r01: 12 sequences: ford_01_q1mm: { needs_azimuth_presort: 1, pos_azimuth_speed: '(33<<7) - 30' } ford_02_q1mm: { needs_azimuth_presort: 1, pos_azimuth_speed: '(33<<7) - 30' } ford_03_q1mm: { needs_azimuth_presort: 1, pos_azimuth_speed: '(33<<7) - 30' } qnxadas-junction-approach: { pos_azimuth_speed: '(364<<7) + 12' } qnxadas-junction-exit: { pos_azimuth_speed: '(364<<7) + 12' } qnxadas-motorway-join: { pos_azimuth_speed: '(364<<7) + 12' } qnxadas-navigating-bends: { pos_azimuth_speed: '(364<<7) + 12' }
2,742
32.048193
79
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/octree-liftt-ctc-lossless-geom-lossy-attrs.yaml
# Test conditions for N17995 CTC C1 using TMC13 octree # octree lossless-geom -- lossy-attrs liftt --- categories: lossless-geom-lossy-attrs: encflags: - mode: 0 - - !conditional '${src-unit-metres}' - srcUnit: metre - srcUnitLength: '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' ## # geometry parameters (octree) # - preserve lossless geometry property - trisoupNodeSizeLog2: 0 - mergeDuplicatedPoints: 0 - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - positionQuantizationScale: 1 - - !conditional '${numLasers}' - angularEnabled: 1 - numLasers: '${numLasers}' - lasersTheta: '${lasersTheta}' - lasersZ: '${lasersZ}' - lasersNumPhiPerTurn: '${lasersNumPhiPerTurn}' - planarBufferDisabled: 1 # idcm intensity depends upon the content type - inferredDirectCodingMode: 1 - - !conditional '"${group}" eq "cat3-fused"' - inferredDirectCodingMode: 2 - - !conditional '"${group}" eq "cat3-frame"' - inferredDirectCodingMode: 3 # default qtbt and planar with cat3-frame exception - maxNumQtBtBeforeOt: 4 - minQtbtSizeLog2: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - - !conditional '"${group}" eq "cat3-frame"' - partitionMethod: 0 - maxNumQtBtBeforeOt: 6 - planarModeIdcmUse: 32 #### # attribute coding (common options -- relies on option ordering) # - use lifting transform for lossy conditions # - scale 16bit reflectance data to 8bit - convertPlyColourspace: 1 - transformType: 2 - numberOfNearestNeighborsInPrediction: 3 - levelOfDetailCount: '$eval{ ${seq_lod} || 12 }' - lodDecimator: 0 - - !conditional '"${group}" =~ m{^cat3}' - lodDecimator: 1 - lod_neigh_bias: ${seq_lod_bias} - - !conditional '"${group}" eq "cat3-fused"' - lodDecimator: 2 - - !conditional '${numLasers}' - lod_neigh_bias: '1,1,1' - spherical_coord_flag: 1 ## # attribute coding -- reflectance - - !conditional '${has_refl}' - adaptivePredictionThreshold: 64 - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 - bitdepth: 8 - - !conditional '${reflectance8b16b_scale_factor}' - attrOffset: 0 - attrScale: ${reflectance8b16b_scale_factor} - attribute: reflectance ## # attribute coding -- colour - - !conditional '${has_colour}' - adaptivePredictionThreshold: 64 - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 - qpChromaOffset: 0 - bitdepth: 8 - attrOffset: 0 - attrScale: 1 - attribute: color decflags: - mode: 1 - - !conditional '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 sequences: # cat3 citytunnel_q1mm: tollbooth_q1mm: overpass_q1mm: ford_01_q1mm: # todo(??): it would be nice if the encoder could make this adjustment # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } ford_02_q1mm: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } ford_03_q1mm: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-junction-approach: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-junction-exit: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-motorway-join: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-navigating-bends: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } # cat1 arco_valentino_dense_vox12: arco_valentino_dense_vox20: basketball_player_vox11_00000200: boxer_viewdep_vox12: dancer_vox11_00000001: egyptian_mask_vox12: egyptian_mask_vox20: facade_00009_vox12: facade_00009_vox20: facade_00015_vox14: facade_00015_vox20: facade_00064_vox11: facade_00064_vox14: facade_00064_vox20: frog_00067_vox12: frog_00067_vox20: head_00039_vox12: head_00039_vox20: house_without_roof_00057_vox12: house_without_roof_00057_vox20: landscape_00014_vox14: landscape_00014_vox20: longdress_viewdep_vox12: longdress_vox10_1300: loot_viewdep_vox12: loot_vox10_1200: palazzo_carignano_dense_vox14: palazzo_carignano_dense_vox20: queen_0200: redandblack_viewdep_vox12: redandblack_vox10_1550: shiva_00035_vox12: shiva_00035_vox20: soldier_viewdep_vox12: soldier_vox10_0690: stanford_area_2_vox16: stanford_area_2_vox20: stanford_area_4_vox16: stanford_area_4_vox20: staue_klimt_vox12: staue_klimt_vox20: thaidancer_viewdep_vox12: ulb_unicorn_hires_vox15: ulb_unicorn_hires_vox20: ulb_unicorn_vox13: ulb_unicorn_vox20:
5,647
26.686275
78
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/octree-liftt-ctc-lossy-geom-lossy-attrs.yaml
# Test conditions for N17995 CTC C2 using TMC13 octree # octree lossy-geom -- lossy-attrs liftt --- categories: lossy-geom-lossy-attrs: encflags: - mode: 0 - - !conditional '${src-unit-metres}' - srcUnit: metre - srcUnitLength: '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' ## # geometry parameters (octree) - trisoupNodeSizeLog2: 0 - mergeDuplicatedPoints: 1 - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - positionQuantizationScale: r01: '$eval{ $rp = 5; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r02: '$eval{ $rp = 4; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r03: '$eval{ $rp = 3; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r04: '$eval{ $rp = 2; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r05: '$eval{ $rp = 1; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r06: '$eval{ $rp = 0; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' - - !conditional '${numLasers}' - angularEnabled: 1 - numLasers: '${numLasers}' - lasersTheta: '${lasersTheta}' - lasersZ: '${lasersZ}' - lasersNumPhiPerTurn: '${lasersNumPhiPerTurn}' - planarBufferDisabled: 1 # default qtbt and planar with cat3-frame exception - maxNumQtBtBeforeOt: 4 - minQtbtSizeLog2: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - - !conditional '"${group}" eq "cat3-frame"' - partitionMethod: 0 - maxNumQtBtBeforeOt: 6 - planarModeIdcmUse: 32 #### # attribute coding (common options -- relies on option ordering) # - scale 16bit reflectance data to 8bit # - use lifting transform for lossy conditions - - convertPlyColourspace: 1 - transformType: 2 - numberOfNearestNeighborsInPrediction: 3 - levelOfDetailCount: '$eval{ ${seq_lod} || 12 }' - lodDecimator: 0 - - !conditional '"${group}" =~ m{^cat3}' - lodDecimator: 1 - lod_neigh_bias: ${seq_lod_bias} - - !conditional '"${group}" eq "cat3-fused"' - lodDecimator: 2 - - !conditional '${numLasers}' - lod_neigh_bias: '1,1,1' - spherical_coord_flag: 1 ## # attribute coding -- reflectance - - !conditional '${has_refl}' - adaptivePredictionThreshold: 64 - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 - bitdepth: 8 - - !conditional '${reflectance8b16b_scale_factor}' - attrOffset: 0 - attrScale: ${reflectance8b16b_scale_factor} - attribute: reflectance ## # attribute coding -- colour - - !conditional '${has_colour}' - adaptivePredictionThreshold: 64 - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 - qpChromaOffset: 0 - bitdepth: 8 - attrOffset: 0 - attrScale: 1 - attribute: color decflags: - mode: 1 - - !conditional '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 sequences: # cat3 citytunnel_q1mm: tollbooth_q1mm: overpass_q1mm: ford_01_q1mm: # todo(??): it would be nice if the encoder could make this adjustment # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } ford_02_q1mm: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } ford_03_q1mm: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-junction-approach: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-junction-exit: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-motorway-join: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } qnxadas-navigating-bends: # override dist2 at particular rate points (lossy geometry only) seq_lod: { r01: 8, r02: 9 } # cat1 arco_valentino_dense_vox12: arco_valentino_dense_vox20: basketball_player_vox11_00000200: boxer_viewdep_vox12: dancer_vox11_00000001: egyptian_mask_vox12: egyptian_mask_vox20: facade_00009_vox12: facade_00009_vox20: facade_00015_vox14: facade_00015_vox20: facade_00064_vox11: facade_00064_vox14: facade_00064_vox20: frog_00067_vox12: frog_00067_vox20: head_00039_vox12: head_00039_vox20: house_without_roof_00057_vox12: house_without_roof_00057_vox20: landscape_00014_vox14: landscape_00014_vox20: longdress_viewdep_vox12: longdress_vox10_1300: loot_viewdep_vox12: loot_vox10_1200: palazzo_carignano_dense_vox14: palazzo_carignano_dense_vox20: queen_0200: redandblack_viewdep_vox12: redandblack_vox10_1550: shiva_00035_vox12: shiva_00035_vox20: soldier_viewdep_vox12: soldier_vox10_0690: stanford_area_2_vox16: stanford_area_2_vox20: stanford_area_4_vox16: stanford_area_4_vox20: staue_klimt_vox12: staue_klimt_vox20: thaidancer_viewdep_vox12: ulb_unicorn_hires_vox15: ulb_unicorn_hires_vox20: ulb_unicorn_vox13: ulb_unicorn_vox20:
7,122
34.437811
282
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/octree-predt-ctc-lossless-geom-lossless-attrs.yaml
# Test conditions for N17995 CTC CW using TMC13 octree # octree lossless-geom -- lossless-attrs predt --- categories: lossless-geom-lossless-attrs: encflags: - mode: 0 - - !conditional '${src-unit-metres}' - srcUnit: metre - srcUnitLength: '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' # preserve lossless geometry property - mergeDuplicatedPoints: 0 - positionQuantizationScale: 1 ## # geometry parameters (octree) - trisoupNodeSizeLog2: 0 - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - - !conditional '${numLasers}' - angularEnabled: 1 - numLasers: '${numLasers}' - lasersTheta: '${lasersTheta}' - lasersZ: '${lasersZ}' - lasersNumPhiPerTurn: '${lasersNumPhiPerTurn}' - planarBufferDisabled: 1 # idcm intensity depends upon the content type - inferredDirectCodingMode: 1 - - !conditional '"${group}" eq "cat3-fused"' - inferredDirectCodingMode: 2 - - !conditional '"${group}" eq "cat3-frame"' - inferredDirectCodingMode: 3 # default qtbt and planar with cat3-frame exception - maxNumQtBtBeforeOt: 4 - minQtbtSizeLog2: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - - !conditional '"${group}" eq "cat3-frame"' - partitionMethod: 0 - maxNumQtBtBeforeOt: 6 - planarModeIdcmUse: 32 #### # attribute coding (common options -- relies on option ordering) # - use YCgCoR colour space to avoid conversion losses # NB: inter component prediction doesn't work well in non-RGB space # - scale 16bit reflectance data to 8bit # - use predicting transform for lossless conditions - - convertPlyColourspace: 1 - transformType: 1 - numberOfNearestNeighborsInPrediction: 3 - levelOfDetailCount: '$eval{ ${seq_lod} || 12 }' - - !conditional '"${group}" =~ m{^cat3}' - levelOfDetailCount: 0 - lod_neigh_bias: ${seq_lod_bias} - direct_avg_predictor_disabled_flag: 1 - - !conditional '${numLasers}' - lod_neigh_bias: '1,1,1' - spherical_coord_flag: 1 - intraLodPredictionSkipLayers: 0 - interComponentPredictionEnabled: 0 ## # attribute coding -- reflectance - - !conditional '${has_refl}' - adaptivePredictionThreshold: 64 - qp: 4 - bitdepth: 8 - - !conditional '${reflectance8b16b_scale_factor}' - attrOffset: 0 - attrScale: ${reflectance8b16b_scale_factor} - attribute: reflectance ## # attribute coding -- colour - - !conditional '${has_colour}' - adaptivePredictionThreshold: 64 - qp: 4 - qpChromaOffset: 0 - bitdepth: 8 - colourMatrix: 8 - attrOffset: 0 - attrScale: 1 - attribute: color decflags: - mode: 1 - - !conditional '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 # NB: use hausdorff to better verify lossless coding - hausdorff sequences: # cat3 citytunnel_q1mm: tollbooth_q1mm: overpass_q1mm: ford_01_q1mm: ford_02_q1mm: ford_03_q1mm: qnxadas-junction-approach: qnxadas-junction-exit: qnxadas-motorway-join: qnxadas-navigating-bends: # cat1 arco_valentino_dense_vox12: arco_valentino_dense_vox20: basketball_player_vox11_00000200: boxer_viewdep_vox12: dancer_vox11_00000001: egyptian_mask_vox12: egyptian_mask_vox20: facade_00009_vox12: facade_00009_vox20: facade_00015_vox14: facade_00015_vox20: facade_00064_vox11: facade_00064_vox14: facade_00064_vox20: frog_00067_vox12: frog_00067_vox20: head_00039_vox12: head_00039_vox20: house_without_roof_00057_vox12: house_without_roof_00057_vox20: landscape_00014_vox14: landscape_00014_vox20: longdress_viewdep_vox12: longdress_vox10_1300: loot_viewdep_vox12: loot_vox10_1200: palazzo_carignano_dense_vox14: palazzo_carignano_dense_vox20: queen_0200: redandblack_viewdep_vox12: redandblack_vox10_1550: shiva_00035_vox12: shiva_00035_vox20: soldier_viewdep_vox12: soldier_vox10_0690: stanford_area_2_vox16: stanford_area_2_vox20: stanford_area_4_vox16: stanford_area_4_vox20: staue_klimt_vox12: staue_klimt_vox20: thaidancer_viewdep_vox12: ulb_unicorn_hires_vox15: ulb_unicorn_hires_vox20: ulb_unicorn_vox13: ulb_unicorn_vox20:
4,934
27.039773
75
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/octree-predt-ctc-lossless-geom-nearlossless-attrs.yaml
# Test conditions for N17995 CTC CY using TMC13 octree # octree lossless-geom -- nearlossless-attrs predt --- categories: lossless-geom-nearlossless-attrs: encflags: - mode: 0 - - !conditional '${src-unit-metres}' - srcUnit: metre - srcUnitLength: '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' # preserve lossless geometry property - mergeDuplicatedPoints: 0 - positionQuantizationScale: 1 ## # geometry parameters (octree) - trisoupNodeSizeLog2: 0 - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - - !conditional '${numLasers}' - angularEnabled: 1 - numLasers: '${numLasers}' - lasersTheta: '${lasersTheta}' - lasersZ: '${lasersZ}' - lasersNumPhiPerTurn: '${lasersNumPhiPerTurn}' - planarBufferDisabled: 1 # idcm intensity depends upon the content type - inferredDirectCodingMode: 1 - - !conditional '"${group}" eq "cat3-fused"' - inferredDirectCodingMode: 2 - - !conditional '"${group}" eq "cat3-frame"' - inferredDirectCodingMode: 3 # default qtbt and planar with cat3-frame exception - maxNumQtBtBeforeOt: 4 - minQtbtSizeLog2: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - - !conditional '"${group}" eq "cat3-frame"' - partitionMethod: 0 - maxNumQtBtBeforeOt: 6 - planarModeIdcmUse: 32 #### # attribute coding (common options -- relies on option ordering) # - code directly GBR (no need to perform colourspace conversion) # - scale 16bit reflectance data to 8bit # - use predicting transform for lossless conditions - convertPlyColourspace: 0 - transformType: 1 - numberOfNearestNeighborsInPrediction: 3 - levelOfDetailCount: '$eval{ ${seq_lod} || 12 }' - - !conditional '"${group}" =~ m{^cat3}' - levelOfDetailCount: 0 - lod_neigh_bias: ${seq_lod_bias} - direct_avg_predictor_disabled_flag: 1 - - !conditional '${numLasers}' - lod_neigh_bias: '1,1,1' - spherical_coord_flag: 1 - intraLodPredictionSkipLayers: 0 - interComponentPredictionEnabled: 1 - - !conditional '${has_colour}' - predWeightBlending: 1 ## # attribute coding -- reflectance - - !conditional '${has_refl}' - adaptivePredictionThreshold: 64 - qp: r01: 10 r02: 16 r03: 22 r04: 28 r05: 34 - bitdepth: 8 - - !conditional '${reflectance8b16b_scale_factor}' - attrOffset: 0 - attrScale: ${reflectance8b16b_scale_factor} - attribute: reflectance ## # attribute coding -- colour - - !conditional '${has_colour}' - adaptivePredictionThreshold: 64 - qp: r01: 10 r02: 16 r03: 22 r04: 28 r05: 34 - qpChromaOffset: 0 - bitdepth: 8 - attrOffset: 0 - attrScale: 1 - colourMatrix: 0 - attribute: color decflags: - mode: 1 - - !conditional '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' - convertPlyColourspace: 0 pcerrorflags: - dropdups: 2 - neighborsProc: 1 - hausdorff sequences: #cat3 citytunnel_q1mm: tollbooth_q1mm: overpass_q1mm: ford_01_q1mm: ford_02_q1mm: ford_03_q1mm: qnxadas-junction-approach: qnxadas-junction-exit: qnxadas-motorway-join: qnxadas-navigating-bends: # cat1 arco_valentino_dense_vox12: arco_valentino_dense_vox20: basketball_player_vox11_00000200: boxer_viewdep_vox12: dancer_vox11_00000001: egyptian_mask_vox12: egyptian_mask_vox20: facade_00009_vox12: facade_00009_vox20: facade_00015_vox14: facade_00015_vox20: facade_00064_vox11: facade_00064_vox14: facade_00064_vox20: frog_00067_vox12: frog_00067_vox20: head_00039_vox12: head_00039_vox20: house_without_roof_00057_vox12: house_without_roof_00057_vox20: landscape_00014_vox14: landscape_00014_vox20: longdress_viewdep_vox12: longdress_vox10_1300: loot_viewdep_vox12: loot_vox10_1200: palazzo_carignano_dense_vox14: palazzo_carignano_dense_vox20: queen_0200: redandblack_viewdep_vox12: redandblack_vox10_1550: shiva_00035_vox12: shiva_00035_vox20: soldier_viewdep_vox12: soldier_vox10_0690: stanford_area_2_vox16: stanford_area_2_vox20: stanford_area_4_vox16: stanford_area_4_vox20: staue_klimt_vox12: staue_klimt_vox20: thaidancer_viewdep_vox12: ulb_unicorn_hires_vox15: ulb_unicorn_hires_vox20: ulb_unicorn_vox13: ulb_unicorn_vox20:
4,995
25.860215
71
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/octree-raht-ctc-lossless-geom-lossy-attrs.yaml
# Test conditions for N17995 CTC C1 using TMC13 octree # octree lossless-geom -- lossy-attrs raht --- categories: lossless-geom-lossy-attrs: encflags: - mode: 0 - - !conditional '${src-unit-metres}' - srcUnit: metre - srcUnitLength: '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' ## # geometry parameters (octree) # - preserve lossless geometry property - trisoupNodeSizeLog2: 0 - mergeDuplicatedPoints: 0 - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - positionQuantizationScale: 1 - - !conditional '${numLasers}' - angularEnabled: 1 - numLasers: '${numLasers}' - lasersTheta: '${lasersTheta}' - lasersZ: '${lasersZ}' - lasersNumPhiPerTurn: '${lasersNumPhiPerTurn}' - planarBufferDisabled: 1 # idcm intensity depends upon the content type - inferredDirectCodingMode: 1 - - !conditional '"${group}" eq "cat3-fused"' - inferredDirectCodingMode: 2 - - !conditional '"${group}" eq "cat3-frame"' - inferredDirectCodingMode: 3 # default qtbt and planar with cat3-frame exception - maxNumQtBtBeforeOt: 4 - minQtbtSizeLog2: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - - !conditional '"${group}" eq "cat3-frame"' - partitionMethod: 0 - maxNumQtBtBeforeOt: 6 - planarModeIdcmUse: 32 #### # attribute coding (common options -- relies on option ordering) # - uses raht transform # - scale 16bit reflectance data to 8bit - convertPlyColourspace: 1 - transformType: 0 - - !conditional '${numLasers}' - spherical_coord_flag: 1 ## # attribute coding -- reflectance - - !conditional '${has_refl}' - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 - bitdepth: 8 - - !conditional '${reflectance8b16b_scale_factor}' - attrOffset: 0 - attrScale: ${reflectance8b16b_scale_factor} - attribute: reflectance ## # attribute coding -- colour - - !conditional '${has_colour}' - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 # NB: raht doesn't yet support qpChromaOffset - qpChromaOffset: 0 - bitdepth: 8 - attrOffset: 0 - attrScale: 1 - attribute: color decflags: - mode: 1 - - !conditional '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 sequences: # cat3 citytunnel_q1mm: tollbooth_q1mm: overpass_q1mm: ford_01_q1mm: ford_02_q1mm: ford_03_q1mm: qnxadas-junction-approach: qnxadas-junction-exit: qnxadas-motorway-join: qnxadas-navigating-bends: # cat1 arco_valentino_dense_vox12: arco_valentino_dense_vox20: basketball_player_vox11_00000200: boxer_viewdep_vox12: dancer_vox11_00000001: egyptian_mask_vox12: egyptian_mask_vox20: facade_00009_vox12: facade_00009_vox20: facade_00015_vox14: facade_00015_vox20: facade_00064_vox11: facade_00064_vox14: facade_00064_vox20: frog_00067_vox12: frog_00067_vox20: head_00039_vox12: head_00039_vox20: house_without_roof_00057_vox12: house_without_roof_00057_vox20: landscape_00014_vox14: landscape_00014_vox20: longdress_viewdep_vox12: longdress_vox10_1300: loot_viewdep_vox12: loot_vox10_1200: palazzo_carignano_dense_vox14: palazzo_carignano_dense_vox20: queen_0200: redandblack_viewdep_vox12: redandblack_vox10_1550: shiva_00035_vox12: shiva_00035_vox20: soldier_viewdep_vox12: soldier_vox10_0690: stanford_area_2_vox16: stanford_area_2_vox20: stanford_area_4_vox16: stanford_area_4_vox20: staue_klimt_vox12: staue_klimt_vox20: thaidancer_viewdep_vox12: ulb_unicorn_hires_vox15: ulb_unicorn_hires_vox20: ulb_unicorn_vox13: ulb_unicorn_vox20:
4,384
24.794118
69
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/octree-raht-ctc-lossy-geom-lossy-attrs.yaml
# Test conditions for N17995 CTC C1 using TMC13 octree # octree lossy-geom -- lossy-attrs raht --- categories: lossy-geom-lossy-attrs: encflags: - mode: 0 - - !conditional '${src-unit-metres}' - srcUnit: metre - srcUnitLength: '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' ## # geometry parameters (octree) - trisoupNodeSizeLog2: 0 - mergeDuplicatedPoints: 1 - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - positionQuantizationScale: r01: '$eval{ $rp = 5; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r02: '$eval{ $rp = 4; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r03: '$eval{ $rp = 3; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r04: '$eval{ $rp = 2; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r05: '$eval{ $rp = 1; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' r06: '$eval{ $rp = 0; $gp = ${src-geometry-precision}; $p_min = max(gp - 9, 7); $start = min(1, $gp - ($p_min + 6)); $step = max(1, (min($gp - 1, $p_min + 7) - $p_min) / 5); $y = $start + round($rp * $step); $div = 1 << (abs($y) + 1); ((1 - 2*signbit($y)) % $div) / $div }' - - !conditional '${numLasers}' - angularEnabled: 1 - numLasers: '${numLasers}' - lasersTheta: '${lasersTheta}' - lasersZ: '${lasersZ}' - lasersNumPhiPerTurn: '${lasersNumPhiPerTurn}' - planarBufferDisabled: 1 # default qtbt and planar with cat3-frame exception - maxNumQtBtBeforeOt: 4 - minQtbtSizeLog2: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - - !conditional '"${group}" eq "cat3-frame"' - partitionMethod: 0 - maxNumQtBtBeforeOt: 6 - planarModeIdcmUse: 32 #### # attribute coding (common options -- relies on option ordering) # - use raht # - scale 16bit reflectance data to 8bit - convertPlyColourspace: 1 - transformType: 0 - - !conditional '${numLasers}' - spherical_coord_flag: 1 ## # attribute coding -- reflectance - - !conditional '${has_refl}' - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 - bitdepth: 8 - - !conditional '${reflectance8b16b_scale_factor}' - attrOffset: 0 - attrScale: ${reflectance8b16b_scale_factor} - attribute: reflectance ## # attribute coding -- colour - - !conditional '${has_colour}' - qp: r01: 51 r02: 46 r03: 40 r04: 34 r05: 28 r06: 22 # NB: raht doesn't yet support quantizationStepChroma - qpChromaOffset: 0 - bitdepth: 8 - attrOffset: 0 - attrScale: 1 - attribute: color decflags: - mode: 1 - - !conditional '${src-unit-metres}' - outputUnitLength: '${src-unit-metres}' - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 sequences: # cat3 citytunnel_q1mm: tollbooth_q1mm: overpass_q1mm: ford_01_q1mm: ford_02_q1mm: ford_03_q1mm: qnxadas-junction-approach: qnxadas-junction-exit: qnxadas-motorway-join: qnxadas-navigating-bends: # cat1 arco_valentino_dense_vox12: arco_valentino_dense_vox20: basketball_player_vox11_00000200: boxer_viewdep_vox12: dancer_vox11_00000001: egyptian_mask_vox12: egyptian_mask_vox20: facade_00009_vox12: facade_00009_vox20: facade_00015_vox14: facade_00015_vox20: facade_00064_vox11: facade_00064_vox14: facade_00064_vox20: frog_00067_vox12: frog_00067_vox20: head_00039_vox12: head_00039_vox20: house_without_roof_00057_vox12: house_without_roof_00057_vox20: landscape_00014_vox14: landscape_00014_vox20: longdress_viewdep_vox12: longdress_vox10_1300: loot_viewdep_vox12: loot_vox10_1200: palazzo_carignano_dense_vox14: palazzo_carignano_dense_vox20: queen_0200: redandblack_viewdep_vox12: redandblack_vox10_1550: shiva_00035_vox12: shiva_00035_vox20: soldier_viewdep_vox12: soldier_vox10_0690: stanford_area_2_vox16: stanford_area_2_vox20: stanford_area_4_vox16: stanford_area_4_vox20: staue_klimt_vox12: staue_klimt_vox20: thaidancer_viewdep_vox12: ulb_unicorn_hires_vox15: ulb_unicorn_hires_vox20: ulb_unicorn_vox13: ulb_unicorn_vox20:
5,747
33.626506
282
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/sequences-cat1.yaml
# Common configuration parameters according to N17995 CTC. --- sequences: arco_valentino_dense_vox12: src: Arco_Valentino_Dense_vox12.ply group: cat1-B src-geometry-precision: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 arco_valentino_dense_vox20: src: Arco_Valentino_Dense_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 basketball_player_vox11_00000200: src: basketball_player_vox11_00000200.ply norm: basketball_player_vox11_00000200.ply group: cat1-A src-geometry-precision: 11 seq_lod: 11 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 2047 boxer_viewdep_vox12: src: boxer_viewdep_vox12.ply norm: boxer_viewdep_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 dancer_vox11_00000001: src: dancer_vox11_00000001.ply norm: dancer_vox11_00000001.ply group: cat1-A src-geometry-precision: 11 seq_lod: 11 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 2047 egyptian_mask_vox12: src: Egyptian_mask_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 11 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 egyptian_mask_vox20: src: Egyptian_mask_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 facade_00009_vox12: src: Facade_00009_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 facade_00009_vox20: src: Facade_00009_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 facade_00015_vox14: src: Facade_00015_vox14.ply group: cat1-A src-geometry-precision: 14 seq_lod: 14 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 16383 facade_00015_vox20: src: Facade_00015_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 facade_00064_vox11: src: Facade_00064_vox11.ply group: cat1-A src-geometry-precision: 11 seq_lod: 13 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 2047 facade_00064_vox14: src: Facade_00064_vox14.ply group: cat1-B src-geometry-precision: 14 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 16383 facade_00064_vox20: src: Facade_00064_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 frog_00067_vox12: src: Frog_00067_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 13 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 frog_00067_vox20: src: Frog_00067_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 head_00039_vox12: src: Head_00039_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 13 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 head_00039_vox20: src: Head_00039_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 house_without_roof_00057_vox12: src: House_without_roof_00057_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 13 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 house_without_roof_00057_vox20: src: House_without_roof_00057_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 landscape_00014_vox14: src: Landscape_00014_vox14.ply group: cat1-B src-geometry-precision: 14 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 16383 landscape_00014_vox20: src: Landscape_00014_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 longdress_viewdep_vox12: src: longdress_viewdep_vox12.ply norm: longdress_viewdep_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 longdress_vox10_1300: src: longdress_vox10_1300.ply norm: longdress_vox10_1300_n.ply group: cat1-A src-geometry-precision: 10 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1023 loot_viewdep_vox12: src: loot_viewdep_vox12.ply norm: loot_viewdep_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 loot_vox10_1200: src: loot_vox10_1200.ply norm: loot_vox10_1200_n.ply group: cat1-A src-geometry-precision: 10 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1023 palazzo_carignano_dense_vox14: src: Palazzo_Carignano_Dense_vox14.ply group: cat1-B src-geometry-precision: 14 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 16383 palazzo_carignano_dense_vox20: src: Palazzo_Carignano_Dense_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 queen_0200: src: queen_0200.ply norm: queen_frame_0200_n.ply group: cat1-A src-geometry-precision: 10 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1023 redandblack_viewdep_vox12: src: redandblack_viewdep_vox12.ply norm: redandblack_viewdep_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 redandblack_vox10_1550: src: redandblack_vox10_1550.ply norm: redandblack_vox10_1550_n.ply group: cat1-A src-geometry-precision: 10 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1023 shiva_00035_vox12: src: Shiva_00035_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 shiva_00035_vox20: src: Shiva_00035_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 soldier_viewdep_vox12: src: soldier_viewdep_vox12.ply norm: soldier_viewdep_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 soldier_vox10_0690: src: soldier_vox10_0690.ply norm: soldier_vox10_0690_n.ply group: cat1-A src-geometry-precision: 10 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1023 stanford_area_2_vox16: src: Stanford_Area_2_vox16.ply group: cat1-B src-geometry-precision: 16 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 65535 stanford_area_2_vox20: src: Stanford_Area_2_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 stanford_area_4_vox16: src: Stanford_Area_4_vox16.ply group: cat1-B src-geometry-precision: 16 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 65535 stanford_area_4_vox20: src: Stanford_Area_4_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 staue_klimt_vox12: src: Staue_Klimt_vox12.ply group: cat1-B src-geometry-precision: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 staue_klimt_vox20: src: Staue_Klimt_vox20.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 thaidancer_viewdep_vox12: src: Thaidancer_viewdep_vox12.ply norm: Thaidancer_viewdep_vox12.ply group: cat1-A src-geometry-precision: 12 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 4095 ulb_unicorn_hires_vox15: src: ULB_Unicorn_HiRes_vox15_n.ply group: cat1-B src-geometry-precision: 15 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 32767 ulb_unicorn_hires_vox20: src: ULB_Unicorn_HiRes_vox20_n.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575 ulb_unicorn_vox13: src: ULB_Unicorn_vox13_n.ply group: cat1-A src-geometry-precision: 13 seq_lod: 12 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 8191 ulb_unicorn_vox20: src: ULB_Unicorn_vox20_n.ply group: cat1-B src-geometry-precision: 20 has_colour: 1 bitdepth_colour: 8 pcerrorflags: - resolution: 1048575
9,815
20.716814
58
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/sequences-cat2.yaml
# Common configuration parameters according to N17523 CTC. --- sequences: 8ivfbv2_longdress_vox10: src-dir: 8iVFBv2/longdress/Ply src: longdress_vox10_{1051..1350}.ply norm-dir: longdress_n norm: longdress_vox10_{1051..1350}_n.ply #src: longdress_vox10_%04d.ply #norm: longdress_vox10_%04d_n.ply frame-rate: 30 num-frames: 300 group: cat2-B src-geometry-precision: 10 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 1023 8ivfbv2_loot_vox10: src-dir: 8iVFBv2/loot/Ply src: loot_vox10_{1000..1299}.ply norm-dir: loot_n norm: loot_vox10_{1000..1299}_n.ply #src: loot_vox10_%04d.ply #norm: loot_vox10_%04d_n.ply frame-rate: 30 num-frames: 300 group: cat2-A src-geometry-precision: 10 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 1023 8ivfbv2_redandblack_vox10: src-dir: 8iVFBv2/redandblack/Ply src: redandblack_vox10_{1450..1749}.ply norm-dir: redandblack_n norm: redandblack_vox10_{1450..1749}_n.ply #src: redandblack_vox10_%04d.ply #norm: redandblack_vox10_%04d_n.ply frame-rate: 30 num-frames: 300 group: cat2-A src-geometry-precision: 10 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 1023 8ivfbv2_soldier_vox10: src-dir: 8iVFBv2/soldier/Ply src: soldier_vox10_{0536..0835}.ply norm-dir: soldier_n norm: soldier_vox10_{0536..0835}_n.ply #src: soldier_vox10_%04d.ply #norm: soldier_vox10_%04d_n.ply frame-rate: 30 num-frames: 300 group: cat2-A src-geometry-precision: 10 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 1023 basketball_player_vox11: src-dir: basketball_player_vox11 src: basketball_player_vox11_{00000001..00000064}.ply #src: basketball_player_vox11_%08d.ply first-frame: 1 frame-rate: 30 num-frames: 64 group: cat2-C src-geometry-precision: 11 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 2047 dancer_player_vox11: src-dir: dancer_vox11 src: dancer_vox11_{00000001..00000064}.ply #src: dancer_vox11_%08d.ply first-frame: 1 frame-rate: 30 num-frames: 64 group: cat2-C src-geometry-precision: 11 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 2047 queen: src-dir: queen src: frame_{0000..0249}.ply norm-dir: queen_n norm: frame_{0000..0249}_n.ply #src: frame_%04d.ply #norm: frame_%04d_n.ply first-frame: 0 frame-rate: 50 num-frames: 250 group: cat2-A src-geometry-precision: 10 has_colour: true bitdepth_colour: 8 pcerrorflags: - resolution: 1023
2,774
23.557522
58
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/sequences-cat3.yaml
# Common configuration parameters according to N17523 CTC. --- sequences: # fused scene (with RGB + Reflectance) citytunnel_q1mm: src: citytunnel_q1mm.ply group: cat3-fused # precision is actually (21, 20, 16) src-geometry-precision: 21 src-unit-metres: 0.001 seq_lod: 10 seq_lod_bias: '1,1,2' has_refl: 1 has_colour: 1 bitdepth_refl: 16 bitdepth_colour: 8 reflectance8b16b_scale_factor: 255 pcerrorflags: - resolution: 30000 overpass_q1mm: src: overpass_q1mm.ply group: cat3-fused # precision is actually (20, 20, 16) src-geometry-precision: 20 src-unit-metres: 0.001 seq_lod: 10 seq_lod_bias: '1,1,2' has_refl: 1 has_colour: 1 bitdepth_refl: 16 bitdepth_colour: 8 reflectance8b16b_scale_factor: 255 pcerrorflags: - resolution: 30000 tollbooth_q1mm: src: tollbooth_q1mm.ply group: cat3-fused # precision is actually (21, 20, 17) src-geometry-precision: 21 src-unit-metres: 0.001 seq_lod: 10 seq_lod_bias: '1,1,2' has_refl: 1 has_colour: 1 bitdepth_refl: 16 bitdepth_colour: 8 reflectance8b16b_scale_factor: 255 pcerrorflags: - resolution: 30000 # temporal sequences (Reflectance only) ford_01_q1mm: src-dir: Ford_01_q_1mm src: Ford_01_vox1mm-{0100..1599}.ply first-frame: 100 frame-rate: 10 num-frames: 1500 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 64 lasersTheta: > -0.461611, -0.451281, -0.440090, -0.430000, -0.418945, -0.408667, -0.398230, -0.388220, -0.377890, -0.367720, -0.357393, -0.347628, -0.337549, -0.327694, -0.317849, -0.308124, -0.298358, -0.289066, -0.279139, -0.269655, -0.260049, -0.250622, -0.241152, -0.231731, -0.222362, -0.213039, -0.203702, -0.194415, -0.185154, -0.175909, -0.166688, -0.157484, -0.149826, -0.143746, -0.137673, -0.131631, -0.125582, -0.119557, -0.113538, -0.107534, -0.101530, -0.095548, -0.089562, -0.083590, -0.077623, -0.071665, -0.065708, -0.059758, -0.053810, -0.047868, -0.041931, -0.035993, -0.030061, -0.024124, -0.018193, -0.012259, -0.006324, -0.000393, 0.005547, 0.011485, 0.017431, 0.023376, 0.029328, 0.035285 lasersZ: > 29.900000, 26.600000, 28.300000, 24.600000, 26.800000, 25.100000, 24.800000, 22.400000, 22.400000, 21.900000, 23.000000, 20.700000, 21.100000, 20.300000, 19.900000, 19.000000, 18.900000, 15.300000, 17.300000, 16.000000, 16.200000, 15.100000, 14.800000, 14.400000, 13.800000, 13.000000, 12.700000, 12.100000, 11.500000, 11.000000, 10.400000, 9.800000, 10.700000, 10.300000, 10.000000, 9.400000, 9.100000, 8.600000, 8.200000, 7.700000, 7.400000, 6.800000, 6.500000, 6.000000, 5.600000, 5.100000, 4.700000, 4.300000, 3.900000, 3.500000, 3.000000, 2.600000, 2.100000, 1.800000, 1.300000, 0.900000, 0.500000, -0.100000, -0.400000, -0.900000, -1.200000, -1.700000, -2.100000, -2.500000 lasersNumPhiPerTurn: > 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000 ford_02_q1mm: src-dir: Ford_02_q_1mm src: Ford_02_vox1mm-{0100..1599}.ply first-frame: 100 frame-rate: 10 num-frames: 1500 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 64 lasersTheta: > -0.461611, -0.451281, -0.440090, -0.430000, -0.418945, -0.408667, -0.398230, -0.388220, -0.377890, -0.367720, -0.357393, -0.347628, -0.337549, -0.327694, -0.317849, -0.308124, -0.298358, -0.289066, -0.279139, -0.269655, -0.260049, -0.250622, -0.241152, -0.231731, -0.222362, -0.213039, -0.203702, -0.194415, -0.185154, -0.175909, -0.166688, -0.157484, -0.149826, -0.143746, -0.137673, -0.131631, -0.125582, -0.119557, -0.113538, -0.107534, -0.101530, -0.095548, -0.089562, -0.083590, -0.077623, -0.071665, -0.065708, -0.059758, -0.053810, -0.047868, -0.041931, -0.035993, -0.030061, -0.024124, -0.018193, -0.012259, -0.006324, -0.000393, 0.005547, 0.011485, 0.017431, 0.023376, 0.029328, 0.035285 lasersZ: > 29.900000, 26.600000, 28.300000, 24.600000, 26.800000, 25.100000, 24.800000, 22.400000, 22.400000, 21.900000, 23.000000, 20.700000, 21.100000, 20.300000, 19.900000, 19.000000, 18.900000, 15.300000, 17.300000, 16.000000, 16.200000, 15.100000, 14.800000, 14.400000, 13.800000, 13.000000, 12.700000, 12.100000, 11.500000, 11.000000, 10.400000, 9.800000, 10.700000, 10.300000, 10.000000, 9.400000, 9.100000, 8.600000, 8.200000, 7.700000, 7.400000, 6.800000, 6.500000, 6.000000, 5.600000, 5.100000, 4.700000, 4.300000, 3.900000, 3.500000, 3.000000, 2.600000, 2.100000, 1.800000, 1.300000, 0.900000, 0.500000, -0.100000, -0.400000, -0.900000, -1.200000, -1.700000, -2.100000, -2.500000 lasersNumPhiPerTurn: > 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000 ford_03_q1mm: src-dir: Ford_03_q_1mm src: Ford_03_vox1mm-{0200..1699}.ply first-frame: 200 frame-rate: 10 num-frames: 1500 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 64 lasersTheta: > -0.461611, -0.451281, -0.440090, -0.430000, -0.418945, -0.408667, -0.398230, -0.388220, -0.377890, -0.367720, -0.357393, -0.347628, -0.337549, -0.327694, -0.317849, -0.308124, -0.298358, -0.289066, -0.279139, -0.269655, -0.260049, -0.250622, -0.241152, -0.231731, -0.222362, -0.213039, -0.203702, -0.194415, -0.185154, -0.175909, -0.166688, -0.157484, -0.149826, -0.143746, -0.137673, -0.131631, -0.125582, -0.119557, -0.113538, -0.107534, -0.101530, -0.095548, -0.089562, -0.083590, -0.077623, -0.071665, -0.065708, -0.059758, -0.053810, -0.047868, -0.041931, -0.035993, -0.030061, -0.024124, -0.018193, -0.012259, -0.006324, -0.000393, 0.005547, 0.011485, 0.017431, 0.023376, 0.029328, 0.035285 lasersZ: > 29.900000, 26.600000, 28.300000, 24.600000, 26.800000, 25.100000, 24.800000, 22.400000, 22.400000, 21.900000, 23.000000, 20.700000, 21.100000, 20.300000, 19.900000, 19.000000, 18.900000, 15.300000, 17.300000, 16.000000, 16.200000, 15.100000, 14.800000, 14.400000, 13.800000, 13.000000, 12.700000, 12.100000, 11.500000, 11.000000, 10.400000, 9.800000, 10.700000, 10.300000, 10.000000, 9.400000, 9.100000, 8.600000, 8.200000, 7.700000, 7.400000, 6.800000, 6.500000, 6.000000, 5.600000, 5.100000, 4.700000, 4.300000, 3.900000, 3.500000, 3.000000, 2.600000, 2.100000, 1.800000, 1.300000, 0.900000, 0.500000, -0.100000, -0.400000, -0.900000, -1.200000, -1.700000, -2.100000, -2.500000 lasersNumPhiPerTurn: > 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000 qnxadas-junction-approach: src-dir: qnxadas-junction-approach src: '{000001..000074}.ply' norm-dir: qnxadas-junction-approach norm: '{000001..000074}_n.ply' first-frame: 1 frame-rate: 5 num-frames: 74 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 16 lasersTheta: > -0.268099, -0.230939, -0.194419, -0.158398, -0.122788, -0.087491, -0.052410, -0.017455, 0.017456, 0.052408, 0.087487, 0.122781, 0.158381, 0.194378, 0.230865, 0.267953 lasersZ: > -2.000000, -1.500000, -1.300000, -1.100000, -1.000000, -1.000000, -1.000000, -1.000000, 0.000000, 0.000000, -0.100000, -0.200000, -0.200000, -0.200000, -0.300000, -0.200000 lasersNumPhiPerTurn: > 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000 qnxadas-junction-exit: src-dir: qnxadas-junction-exit src: '{000001..000074}.ply' norm-dir: qnxadas-junction-exit norm: '{000001..000074}_n.ply' first-frame: 1 frame-rate: 5 num-frames: 74 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 16 lasersTheta: > -0.268099, -0.230939, -0.194419, -0.158398, -0.122788, -0.087491, -0.052410, -0.017455, 0.017456, 0.052408, 0.087487, 0.122781, 0.158381, 0.194378, 0.230865, 0.267953 lasersZ: > -2.000000, -1.500000, -1.300000, -1.100000, -1.000000, -1.000000, -1.000000, -1.000000, 0.000000, 0.000000, -0.100000, -0.200000, -0.200000, -0.200000, -0.300000, -0.200000 lasersNumPhiPerTurn: > 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000 qnxadas-motorway-join: src-dir: qnxadas-motorway-join src: '{000001..000500}.ply' norm-dir: qnxadas-motorway-join norm: '{000001..000500}_n.ply' first-frame: 1 frame-rate: 5 num-frames: 500 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 16 lasersTheta: > -0.268099, -0.230939, -0.194419, -0.158398, -0.122788, -0.087491, -0.052410, -0.017455, 0.017456, 0.052408, 0.087487, 0.122781, 0.158381, 0.194378, 0.230865, 0.267953 lasersZ: > -2.000000, -1.500000, -1.300000, -1.100000, -1.000000, -1.000000, -1.000000, -1.000000, 0.000000, 0.000000, -0.100000, -0.200000, -0.200000, -0.200000, -0.300000, -0.200000 lasersNumPhiPerTurn: > 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000 qnxadas-navigating-bends: src-dir: qnxadas-navigating-bends src: '{000001..000300}.ply' norm-dir: qnxadas-navigating-bends norm: '{000001..000300}_n.ply' first-frame: 1 frame-rate: 5 num-frames: 300 group: cat3-frame src-geometry-precision: 18 src-unit-metres: 0.001 numLasers: 16 lasersTheta: > -0.268099, -0.230939, -0.194419, -0.158398, -0.122788, -0.087491, -0.052410, -0.017455, 0.017456, 0.052408, 0.087487, 0.122781, 0.158381, 0.194378, 0.230865, 0.267953 lasersZ: > -2.000000, -1.500000, -1.300000, -1.100000, -1.000000, -1.000000, -1.000000, -1.000000, 0.000000, 0.000000, -0.100000, -0.200000, -0.200000, -0.200000, -0.300000, -0.200000 lasersNumPhiPerTurn: > 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360 seq_lod: 10 seq_lod_bias: '1,1,8' has_refl: 1 bitdepth_refl: 8 encflags: # fix the size of the bounding box to align frames for predictive coding - autoSeqBbox: 0 - seqOrigin: '-131072, -131072, -131072' - seqSizeWhd: '262143, 262143, 262143' pcerrorflags: - resolution: 30000
13,758
38.088068
77
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/trisoup-liftt-ctc-lossy-geom-lossy-attrs.yaml
# Test conditions for N17995 CTC C2 using TMC13 trisoup # trisoup lossy-geom -- lossy-attrs liftt # -- cat 1 only, since trisoup doesn't apply to very sparce clouds --- categories: lossy-geom-lossy-attrs: encflags: - mode: 0 ## # geometry parameters (trisoup) - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - inferredDirectCodingMode: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - positionQuantizationScale: '$eval{ 1 / (1 << ( ${src-geometry-precision} - ${test-depth} )) }' - trisoupNodeSizeLog2: r01: 5 r02: 4 r03: 3 r04: 2 #### # attribute coding (common options -- relies on option ordering) # - use lifting transform for lossy conditions - convertPlyColourspace: 1 - transformType: 2 - numberOfNearestNeighborsInPrediction: 3 - levelOfDetailCount: '$eval{ ${seq_lod} || 12 }' - lodDecimator: 0 - - !conditional '"${group}" =~ m{^cat3}' - lodDecimator: 1 - lod_neigh_bias: ${seq_lod_bias} ## # attribute coding -- colour - - !conditional '${has_colour}' - adaptivePredictionThreshold: 64 - qp: r01: 40 r02: 34 r03: 28 r04: 22 - qpChromaOffset: 0 - bitdepth: 8 - attribute: color decflags: - mode: 1 - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 sequences: # cat1 arco_valentino_dense_vox12: { test-depth: 9 } arco_valentino_dense_vox20: { test-depth: 9 } basketball_player_vox11_00000200: { test-depth: 11 } boxer_viewdep_vox12: { test-depth: 11 } dancer_vox11_00000001: { test-depth: 11 } egyptian_mask_vox12: { test-depth: 9 } egyptian_mask_vox20: { test-depth: 9 } facade_00009_vox12: { test-depth: 11 } facade_00009_vox20: { test-depth: 11 } facade_00015_vox14: { test-depth: 12 } facade_00015_vox20: { test-depth: 12 } facade_00064_vox11: { test-depth: 11 } facade_00064_vox14: { test-depth: 12 } facade_00064_vox20: { test-depth: 12 } frog_00067_vox12: { test-depth: 11 } frog_00067_vox20: { test-depth: 11 } head_00039_vox12: { test-depth: 12 } head_00039_vox20: { test-depth: 12 } house_without_roof_00057_vox12: { test-depth: 11 } house_without_roof_00057_vox20: { test-depth: 11 } landscape_00014_vox14: { test-depth: 12 } landscape_00014_vox20: { test-depth: 12 } longdress_viewdep_vox12: { test-depth: 11 } longdress_vox10_1300: { test-depth: 10 } loot_viewdep_vox12: { test-depth: 11 } loot_vox10_1200: { test-depth: 10 } palazzo_carignano_dense_vox14: { test-depth: 9 } palazzo_carignano_dense_vox20: { test-depth: 9 } queen_0200: { test-depth: 10 } redandblack_viewdep_vox12: { test-depth: 11 } redandblack_vox10_1550: { test-depth: 10 } shiva_00035_vox12: { test-depth: 10 } shiva_00035_vox20: { test-depth: 10 } soldier_viewdep_vox12: { test-depth: 11 } soldier_vox10_0690: { test-depth: 10 } stanford_area_2_vox16: { test-depth: 12 } stanford_area_2_vox20: { test-depth: 12 } stanford_area_4_vox16: { test-depth: 12 } stanford_area_4_vox20: { test-depth: 12 } staue_klimt_vox12: { test-depth: 9 } staue_klimt_vox20: { test-depth: 9 } thaidancer_viewdep_vox12: { test-depth: 11 } ulb_unicorn_hires_vox15: { test-depth: 12 } ulb_unicorn_hires_vox20: { test-depth: 12 } ulb_unicorn_vox13: { test-depth: 11 } ulb_unicorn_vox20: { test-depth: 11 }
4,283
39.037383
101
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/cfg/trisoup-raht-ctc-lossy-geom-lossy-attrs.yaml
# Test conditions for N17995 CTC C2 using TMC13 trisoup # trisoup lossy-geom -- lossy-attrs raht # -- cat 1 only, since trisoup doesn't apply to very sparce clouds --- categories: lossy-geom-lossy-attrs: encflags: - mode: 0 ## # geometry parameters (trisoup) - neighbourAvailBoundaryLog2: 8 - intra_pred_max_node_size_log2: 6 - inferredDirectCodingMode: 0 - planarEnabled: 1 - planarModeIdcmUse: 0 - positionQuantizationScale: '$eval{ 1 / (1 << ( ${src-geometry-precision} - ${test-depth} )) }' - trisoupNodeSizeLog2: r01: 5 r02: 4 r03: 3 r04: 2 #### # attribute coding (common options -- relies on option ordering) # - use raht - convertPlyColourspace: 1 - transformType: 0 ## # attribute coding -- colour - - !conditional '${has_colour}' - qp: r01: 40 r02: 34 r03: 28 r04: 22 # NB: raht doesn't yet support quantizationStepChroma - qpChromaOffset: 0 - bitdepth: 8 - attribute: color decflags: - mode: 1 - convertPlyColourspace: 1 pcerrorflags: - dropdups: 2 - neighborsProc: 1 sequences: # cat1 arco_valentino_dense_vox12: { test-depth: 9 } arco_valentino_dense_vox20: { test-depth: 9 } basketball_player_vox11_00000200: { test-depth: 11 } boxer_viewdep_vox12: { test-depth: 11 } dancer_vox11_00000001: { test-depth: 11 } egyptian_mask_vox12: { test-depth: 9 } egyptian_mask_vox20: { test-depth: 9 } facade_00009_vox12: { test-depth: 11 } facade_00009_vox20: { test-depth: 11 } facade_00015_vox14: { test-depth: 12 } facade_00015_vox20: { test-depth: 12 } facade_00064_vox11: { test-depth: 11 } facade_00064_vox14: { test-depth: 12 } facade_00064_vox20: { test-depth: 12 } frog_00067_vox12: { test-depth: 11 } frog_00067_vox20: { test-depth: 11 } head_00039_vox12: { test-depth: 12 } head_00039_vox20: { test-depth: 12 } house_without_roof_00057_vox12: { test-depth: 11 } house_without_roof_00057_vox20: { test-depth: 11 } landscape_00014_vox14: { test-depth: 12 } landscape_00014_vox20: { test-depth: 12 } longdress_viewdep_vox12: { test-depth: 11 } longdress_vox10_1300: { test-depth: 10 } loot_viewdep_vox12: { test-depth: 11 } loot_vox10_1200: { test-depth: 10 } palazzo_carignano_dense_vox14: { test-depth: 9 } palazzo_carignano_dense_vox20: { test-depth: 9 } queen_0200: { test-depth: 10 } redandblack_viewdep_vox12: { test-depth: 11 } redandblack_vox10_1550: { test-depth: 10 } shiva_00035_vox12: { test-depth: 10 } shiva_00035_vox20: { test-depth: 10 } soldier_viewdep_vox12: { test-depth: 11 } soldier_vox10_0690: { test-depth: 10 } stanford_area_2_vox16: { test-depth: 12 } stanford_area_2_vox20: { test-depth: 12 } stanford_area_4_vox16: { test-depth: 12 } stanford_area_4_vox20: { test-depth: 12 } staue_klimt_vox12: { test-depth: 9 } staue_klimt_vox20: { test-depth: 9 } thaidancer_viewdep_vox12: { test-depth: 11 } ulb_unicorn_hires_vox15: { test-depth: 12 } ulb_unicorn_hires_vox20: { test-depth: 12 } ulb_unicorn_vox13: { test-depth: 11 } ulb_unicorn_vox20: { test-depth: 11 }
4,023
39.24
101
yaml
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/dependencies/nanoflann/KDTreeVectorOfVectorsAdaptor.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2011-16 Jose Luis Blanco (joseluisblancoc@gmail.com). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #pragma once #include "nanoflann.hpp" #include <vector> // ===== This example shows how to use nanoflann with these types of containers: ======= // typedef std::vector<std::vector<double> > my_vector_of_vectors_t; // typedef std::vector<Eigen::VectorXd> my_vector_of_vectors_t; // This requires #include // <Eigen/Dense> // ===================================================================================== /** A simple vector-of-vectors adaptor for nanoflann, without duplicating the storage. * The i'th vector represents a point in the state space. * * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in * the data set, allowing more compiler optimizations. * \tparam num_t The type of the point coordinates (typically, double or float). * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, * nanoflann::metric_L2_Simple, etc. * \tparam IndexType The type for indices in the KD-tree index (typically, size_t of int) */ template <class VectorOfVectorsType, typename num_t = double, int DIM = -1, class Distance = nanoflann::metric_L2, typename IndexType = size_t> struct KDTreeVectorOfVectorsAdaptor { typedef KDTreeVectorOfVectorsAdaptor<VectorOfVectorsType, num_t, DIM, Distance> self_t; typedef typename Distance::template traits<num_t, self_t>::distance_t metric_t; typedef nanoflann::KDTreeSingleIndexAdaptor<metric_t, self_t, DIM, IndexType> index_t; index_t *index; //! The kd-tree index for the user to call its methods as usual with any other //! FLANN index. /// Constructor: takes a const ref to the vector of vectors object with the data points KDTreeVectorOfVectorsAdaptor(const int dimensionality, const VectorOfVectorsType &mat, const int leaf_max_size = 10) : m_data(mat) { // assert(mat.size()!=0 && mat[0].size()!=0); // const size_t dims = mat[0].size(); // if (DIM>0 && static_cast<int>(dims)!=DIM) // throw std::runtime_error("Data set dimensionality does not match the 'DIM' // template // argument"); // size_t dims = dimensionality; index = new index_t(dimensionality, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size)); index->buildIndex(); } ~KDTreeVectorOfVectorsAdaptor() { delete index; } const VectorOfVectorsType &m_data; /** Query for the \a num_closest closest points to a given point (entered as * query_point[0:dim-1]). * Note that this is a short-cut method for index->findNeighbors(). * The user can also call index->... methods as desired. * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. */ inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const { nanoflann::KNNResultSet<num_t, IndexType> resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); } /** @name Interface expected by KDTreeSingleIndexAdaptor * @{ */ const self_t &derived() const { return *this; } self_t &derived() { return *this; } // Must return the number of data points inline size_t kdtree_get_point_count() const { return m_data.getPointCount(); } // Returns the distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" // stored in the class: inline num_t kdtree_distance(const num_t *p1, const size_t idx_p2, size_t size) const { num_t s = 0; for (size_t i = 0; i < size; i++) { const num_t d = p1[i] - m_data[idx_p2][i]; s += d * d; } return s; } // Returns the dim'th component of the idx'th point in the class: inline num_t kdtree_get_pt(const size_t idx, int dim) const { return m_data[idx][dim]; } // Optional bounding-box computation: return false to default to a standard bbox computation loop. // Return true if the BBOX was already computed by the class and returned in "bb" so it can be // avoided to redo it again. // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) template <class BBOX> bool kdtree_get_bbox(BBOX & /*bb*/) const { return false; } /** @} */ }; // end of KDTreeVectorOfVectorsAdaptor
6,011
45.604651
100
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/dependencies/nanoflann/nanoflann.hpp
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * Copyright 2011-2016 Jose Luis Blanco (joseluisblancoc@gmail.com). * All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ /** \mainpage nanoflann C++ API documentation * nanoflann is a C++ header-only library for building KD-Trees, mostly * optimized for 2D or 3D point clouds. * * nanoflann does not require compiling or installing, just an * #include <nanoflann.hpp> in your code. * * See: * - <a href="modules.html" >C++ API organized by modules</a> * - <a href="https://github.com/jlblancoc/nanoflann" >Online README</a> * - <a href="http://jlblancoc.github.io/nanoflann/" >Doxygen documentation</a> */ #ifndef NANOFLANN_HPP_ #define NANOFLANN_HPP_ #include <vector> #include <array> #include <cassert> #include <algorithm> #include <stdexcept> #include <cstdio> // for fwrite() #define _USE_MATH_DEFINES // Required by MSVC to define M_PI,etc. in <cmath> #include <cmath> // for abs() #include <cstdlib> // for abs() #include <limits> // Avoid conflicting declaration of min/max macros in windows headers #if !defined(NOMINMAX) && (defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64)) # define NOMINMAX # ifdef max # undef max # undef min # endif #endif namespace nanoflann { /** @addtogroup nanoflann_grp nanoflann C++ library for ANN * @{ */ /** * Traits if object is resizable and assignable (typically has a resize | assign method) */ template <typename T, typename = int> struct has_resize : std::false_type {}; template <typename T> struct has_resize <T, decltype((void) std::declval<T>().resize(1), 0)> : std::true_type {}; template <typename T, typename = int> struct has_assign : std::false_type {}; template <typename T> struct has_assign <T, decltype((void) std::declval<T>().assign(1, 0), 0)> : std::true_type {}; /** * Free function to resize a resizable object */ template <typename Container> inline typename std::enable_if<has_resize<Container>::value, void>::type resize(Container& c, const size_t nElements) { c.resize(nElements); } /** * Free function that has no effects on non resizable containers (e.g. std::array) * It raises an exception if the expected size does not match */ template <typename Container> inline typename std::enable_if<!has_resize<Container>::value, void>::type resize(Container& c, const size_t nElements) { if(nElements != c.size()) throw std::logic_error("Try to change the size of a std::array."); } /** * Free function to assign to a container */ template <typename Container, typename T> inline typename std::enable_if<has_assign<Container>::value, void>::type assign(Container& c, const size_t nElements, const T& value) { c.assign(nElements, value); } /** * Free function to assign to a std::array */ template <typename Container, typename T> inline typename std::enable_if<!has_assign<Container>::value, void>::type assign(Container& c, const size_t nElements, const T& value) { for (size_t i=0;i<nElements;i++) c[i]=value; } /** Library version: 0xMmP (M=Major,m=minor,P=patch) */ #define NANOFLANN_VERSION 0x123 /** @addtogroup result_sets_grp Result set classes * @{ */ template <typename DistanceType, typename IndexType = size_t, typename CountType = size_t> class KNNResultSet { IndexType * indices; DistanceType* dists; CountType capacity; CountType count; public: inline KNNResultSet(CountType capacity_) : indices(0), dists(0), capacity(capacity_), count(0) { } inline void init(IndexType* indices_, DistanceType* dists_) { indices = indices_; dists = dists_; count = 0; if (capacity) dists[capacity-1] = (std::numeric_limits<DistanceType>::max)(); } inline CountType size() const { return count; } inline bool full() const { return count == capacity; } /** * Called during search to add an element matching the criteria. * @return true if the search should be continued, false if the results are sufficient */ inline bool addPoint(DistanceType dist, IndexType index) { CountType i; for (i = count; i > 0; --i) { #ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same distance, the one with the lowest-index will be returned first. if ( (dists[i-1] > dist) || ((dist == dists[i-1]) && (indices[i-1] > index)) ) { #else if (dists[i-1] > dist) { #endif if (i < capacity) { dists[i] = dists[i-1]; indices[i] = indices[i-1]; } } else break; } if (i < capacity) { dists[i] = dist; indices[i] = index; } if (count < capacity) count++; // tell caller that the search shall continue return true; } inline DistanceType worstDist() const { return dists[capacity-1]; } }; /** operator "<" for std::sort() */ struct IndexDist_Sorter { /** PairType will be typically: std::pair<IndexType,DistanceType> */ template <typename PairType> inline bool operator()(const PairType &p1, const PairType &p2) const { return p1.second < p2.second; } }; /** * A result-set class used when performing a radius based search. */ template <typename DistanceType, typename IndexType = size_t> class RadiusResultSet { public: const DistanceType radius; std::vector<std::pair<IndexType, DistanceType> > &m_indices_dists; inline RadiusResultSet(DistanceType radius_, std::vector<std::pair<IndexType,DistanceType> > &indices_dists) : radius(radius_), m_indices_dists(indices_dists) { init(); } inline void init() { clear(); } inline void clear() { m_indices_dists.clear(); } inline size_t size() const { return m_indices_dists.size(); } inline bool full() const { return true; } /** * Called during search to add an element matching the criteria. * @return true if the search should be continued, false if the results are sufficient */ inline bool addPoint(DistanceType dist, IndexType index) { if (dist < radius) m_indices_dists.push_back(std::make_pair(index, dist)); return true; } inline DistanceType worstDist() const { return radius; } /** * Find the worst result (furtherest neighbor) without copying or sorting * Pre-conditions: size() > 0 */ std::pair<IndexType,DistanceType> worst_item() const { if (m_indices_dists.empty()) throw std::runtime_error("Cannot invoke RadiusResultSet::worst_item() on an empty list of results."); typedef typename std::vector<std::pair<IndexType, DistanceType> >::const_iterator DistIt; DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end(), IndexDist_Sorter()); return *it; } }; /** @} */ /** @addtogroup loadsave_grp Load/save auxiliary functions * @{ */ template<typename T> void save_value(FILE* stream, const T& value, size_t count = 1) { fwrite(&value, sizeof(value), count, stream); } template<typename T> void save_value(FILE* stream, const std::vector<T>& value) { size_t size = value.size(); fwrite(&size, sizeof(size_t), 1, stream); fwrite(&value[0], sizeof(T), size, stream); } template<typename T> void load_value(FILE* stream, T& value, size_t count = 1) { size_t read_cnt = fread(&value, sizeof(value), count, stream); if (read_cnt != count) { throw std::runtime_error("Cannot read from file"); } } template<typename T> void load_value(FILE* stream, std::vector<T>& value) { size_t size; size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); if (read_cnt != 1) { throw std::runtime_error("Cannot read from file"); } value.resize(size); read_cnt = fread(&value[0], sizeof(T), size, stream); if (read_cnt != size) { throw std::runtime_error("Cannot read from file"); } } /** @} */ /** @addtogroup metric_grp Metric (distance) classes * @{ */ struct Metric { }; /** Manhattan distance functor (generic version, optimized for high-dimensionality data sets). * Corresponding distance traits: nanoflann::metric_L1 * \tparam T Type of the elements (e.g. double, float, uint8_t) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) */ template<class T, class DataSource, typename _DistanceType = T> struct L1_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const { DistanceType result = DistanceType(); const T* last = a + size; const T* lastgroup = last - 3; size_t d = 0; /* Process 4 items with each loop for efficiency. */ while (a < lastgroup) { const DistanceType diff0 = std::abs(a[0] - data_source.kdtree_get_pt(b_idx,d++)); const DistanceType diff1 = std::abs(a[1] - data_source.kdtree_get_pt(b_idx,d++)); const DistanceType diff2 = std::abs(a[2] - data_source.kdtree_get_pt(b_idx,d++)); const DistanceType diff3 = std::abs(a[3] - data_source.kdtree_get_pt(b_idx,d++)); result += diff0 + diff1 + diff2 + diff3; a += 4; if ((worst_dist > 0) && (result > worst_dist)) { return result; } } /* Process last 0-3 components. Not needed for standard vector lengths. */ while (a < last) { result += std::abs( *a++ - data_source.kdtree_get_pt(b_idx, d++) ); } return result; } template <typename U, typename V> inline DistanceType accum_dist(const U a, const V b, int ) const { return std::abs(a-b); } }; /** Squared Euclidean distance functor (generic version, optimized for high-dimensionality data sets). * Corresponding distance traits: nanoflann::metric_L2 * \tparam T Type of the elements (e.g. double, float, uint8_t) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) */ template<class T, class DataSource, typename _DistanceType = T> struct L2_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const { DistanceType result = DistanceType(); const T* last = a + size; const T* lastgroup = last - 3; size_t d = 0; /* Process 4 items with each loop for efficiency. */ while (a < lastgroup) { const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx,d++); const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx,d++); const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx,d++); const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx,d++); result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; a += 4; if ((worst_dist > 0) && (result > worst_dist)) { return result; } } /* Process last 0-3 components. Not needed for standard vector lengths. */ while (a < last) { const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx, d++); result += diff0 * diff0; } return result; } template <typename U, typename V> inline DistanceType accum_dist(const U a, const V b, int ) const { return (a - b) * (a - b); } }; /** Squared Euclidean (L2) distance functor (suitable for low-dimensionality datasets, like 2D or 3D point clouds) * Corresponding distance traits: nanoflann::metric_L2_Simple * \tparam T Type of the elements (e.g. double, float, uint8_t) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) */ template<class T, class DataSource, typename _DistanceType = T> struct L2_Simple_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; L2_Simple_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size) const { DistanceType result = DistanceType(); for (size_t i = 0; i < size; ++i) { const DistanceType diff = a[i] - data_source.kdtree_get_pt(b_idx, i); result += diff * diff; } return result; } template <typename U, typename V> inline DistanceType accum_dist(const U a, const V b, int ) const { DistanceType diff { a - b }; return diff * diff; } }; /** SO2 distance functor * Corresponding distance traits: nanoflann::metric_SO2 * \tparam T Type of the elements (e.g. double, float) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double) * orientation is constrained to be in [-pi, pi] */ template<class T, class DataSource, typename _DistanceType = T> struct SO2_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; SO2_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size) const { return accum_dist(a[size-1], data_source.kdtree_get_pt(b_idx, size - 1) , size - 1); } template <typename U, typename V> inline DistanceType accum_dist(const U a, const V b, int ) const { DistanceType result = DistanceType(); result = b - a; if (result > M_PI) result -= 2. * M_PI; else if (result < -M_PI) result += 2. * M_PI; return result; } }; /** SO3 distance functor (Uses L2_Simple) * Corresponding distance traits: nanoflann::metric_SO3 * \tparam T Type of the elements (e.g. double, float) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double) */ template<class T, class DataSource, typename _DistanceType = T> struct SO3_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; L2_Simple_Adaptor<T, DataSource > distance_L2_Simple; SO3_Adaptor(const DataSource &_data_source) : distance_L2_Simple(_data_source) { } inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size) const { return distance_L2_Simple.evalMetric(a, b_idx, size); } template <typename U, typename V> inline DistanceType accum_dist(const U a, const V b, int idx) const { return distance_L2_Simple.accum_dist(a, b, idx); } }; /** Metaprogramming helper traits class for the L1 (Manhattan) metric */ struct metric_L1 : public Metric { template<class T, class DataSource> struct traits { typedef L1_Adaptor<T, DataSource> distance_t; }; }; /** Metaprogramming helper traits class for the L2 (Euclidean) metric */ struct metric_L2 : public Metric { template<class T, class DataSource> struct traits { typedef L2_Adaptor<T, DataSource> distance_t; }; }; /** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */ struct metric_L2_Simple : public Metric { template<class T, class DataSource> struct traits { typedef L2_Simple_Adaptor<T, DataSource> distance_t; }; }; /** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */ struct metric_SO2 : public Metric { template<class T, class DataSource> struct traits { typedef SO2_Adaptor<T, DataSource> distance_t; }; }; /** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */ struct metric_SO3 : public Metric { template<class T, class DataSource> struct traits { typedef SO3_Adaptor<T, DataSource> distance_t; }; }; /** @} */ /** @addtogroup param_grp Parameter structs * @{ */ /** Parameters (see README.md) */ struct KDTreeSingleIndexAdaptorParams { KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10) : leaf_max_size(_leaf_max_size) {} size_t leaf_max_size; }; /** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */ struct SearchParams { /** Note: The first argument (checks_IGNORED_) is ignored, but kept for compatibility with the FLANN interface */ SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true ) : checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {} int checks; //!< Ignored parameter (Kept for compatibility with the FLANN interface). float eps; //!< search for eps-approximate neighbours (default: 0) bool sorted; //!< only for radius search, require neighbours sorted by distance (default: true) }; /** @} */ /** @addtogroup memalloc_grp Memory allocation * @{ */ /** * Allocates (using C's malloc) a generic type T. * * Params: * count = number of instances to allocate. * Returns: pointer (of type T*) to memory buffer */ template <typename T> inline T* allocate(size_t count = 1) { T* mem = static_cast<T*>( ::malloc(sizeof(T)*count)); return mem; } /** * Pooled storage allocator * * The following routines allow for the efficient allocation of storage in * small chunks from a specified pool. Rather than allowing each structure * to be freed individually, an entire pool of storage is freed at once. * This method has two advantages over just using malloc() and free(). First, * it is far more efficient for allocating small objects, as there is * no overhead for remembering all the information needed to free each * object or consolidating fragmented memory. Second, the decision about * how long to keep an object is made at the time of allocation, and there * is no need to track down all the objects to free them. * */ const size_t WORDSIZE = 16; const size_t BLOCKSIZE = 8192; class PooledAllocator { /* We maintain memory alignment to word boundaries by requiring that all allocations be in multiples of the machine wordsize. */ /* Size of machine word in bytes. Must be power of 2. */ /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ size_t remaining; /* Number of bytes left in current block of storage. */ void* base; /* Pointer to base of current block of storage. */ void* loc; /* Current location in block to next allocate memory. */ void internal_init() { remaining = 0; base = NULL; usedMemory = 0; wastedMemory = 0; } public: size_t usedMemory; size_t wastedMemory; /** Default constructor. Initializes a new pool. */ PooledAllocator() { internal_init(); } /** * Destructor. Frees all the memory allocated in this pool. */ ~PooledAllocator() { free_all(); } /** Frees all allocated memory chunks */ void free_all() { while (base != NULL) { void *prev = *(static_cast<void**>( base)); /* Get pointer to prev block. */ ::free(base); base = prev; } internal_init(); } /** * Returns a pointer to a piece of new memory of the given size in bytes * allocated from the pool. */ void* malloc(const size_t req_size) { /* Round size up to a multiple of wordsize. The following expression only works for WORDSIZE that is a power of 2, by masking last bits of incremented size to zero. */ const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); /* Check whether a new block must be allocated. Note that the first word of a block is reserved for a pointer to the previous block. */ if (size > remaining) { wastedMemory += remaining; /* Allocate new storage. */ const size_t blocksize = (size + sizeof(void*) + (WORDSIZE - 1) > BLOCKSIZE) ? size + sizeof(void*) + (WORDSIZE - 1) : BLOCKSIZE; // use the standard C malloc to allocate memory void* m = ::malloc(blocksize); if (!m) { fprintf(stderr, "Failed to allocate memory.\n"); return NULL; } /* Fill first word of new block with pointer to previous block. */ static_cast<void**>(m)[0] = base; base = m; size_t shift = 0; //int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); remaining = blocksize - sizeof(void*) - shift; loc = (static_cast<char*>(m) + sizeof(void*) + shift); } void* rloc = loc; loc = static_cast<char*>(loc) + size; remaining -= size; usedMemory += size; return rloc; } /** * Allocates (using this pool) a generic type T. * * Params: * count = number of instances to allocate. * Returns: pointer (of type T*) to memory buffer */ template <typename T> T* allocate(const size_t count = 1) { T* mem = static_cast<T*>(this->malloc(sizeof(T)*count)); return mem; } }; /** @} */ /** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff * @{ */ /** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors when DIM=-1. * Fixed size version for a generic DIM: */ template <int DIM, typename T> struct array_or_vector_selector { typedef std::array<T, DIM> container_t; }; /** Dynamic size version */ template <typename T> struct array_or_vector_selector<-1, T> { typedef std::vector<T> container_t; }; /** @} */ /** kd-tree base-class * * Contains the member functions common to the classes KDTreeSingleIndexAdaptor and KDTreeSingleIndexDynamicAdaptor_. * * \tparam Derived The name of the class which inherits this class. * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use, these are all classes derived from nanoflann::Metric * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points) * \tparam IndexType Will be typically size_t or int */ template<class Derived, typename Distance, class DatasetAdaptor, int DIM = -1, typename IndexType = size_t> class KDTreeBaseClass { public: /** Frees the previously-built index. Automatically called within buildIndex(). */ void freeIndex(Derived &obj) { obj.pool.free_all(); obj.root_node = NULL; obj.m_size_at_index_build = 0; } typedef typename Distance::ElementType ElementType; typedef typename Distance::DistanceType DistanceType; /*--------------------- Internal Data Structures --------------------------*/ struct Node { /** Union used because a node can be either a LEAF node or a non-leaf node, so both data fields are never used simultaneously */ union { struct leaf { IndexType left, right; //!< Indices of points in leaf node } lr; struct nonleaf { int divfeat; //!< Dimension used for subdivision. ElementType divlow, divhigh; //!< The values used for subdivision. } sub; } node_type; Node *child1, *child2; //!< Child nodes (both=NULL mean its a leaf node) }; typedef Node* NodePtr; struct Interval { ElementType low, high; }; /** * Array of indices to vectors in the dataset. */ std::vector<IndexType> vind; NodePtr root_node; size_t m_leaf_max_size; size_t m_size; //!< Number of current points in the dataset size_t m_size_at_index_build; //!< Number of points in the dataset when the index was built int dim; //!< Dimensionality of each data point /** Define "BoundingBox" as a fixed-size or variable-size container depending on "DIM" */ typedef typename array_or_vector_selector<DIM, Interval>::container_t BoundingBox; /** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */ typedef typename array_or_vector_selector<DIM, DistanceType>::container_t distance_vector_t; /** The KD-tree used to find neighbours */ BoundingBox root_bbox; /** * Pooled memory allocator. * * Using a pooled memory allocator is more efficient * than allocating memory directly when there is a large * number small of memory allocations. */ PooledAllocator pool; /** Returns number of points in dataset */ size_t size(const Derived &obj) const { return obj.m_size; } /** Returns the length of each point in the dataset */ size_t veclen(const Derived &obj) { return static_cast<size_t>(DIM>0 ? DIM : obj.dim); } /// Helper accessor to the dataset points: inline ElementType dataset_get(const Derived &obj, size_t idx, int component) const{ return obj.dataset.kdtree_get_pt(idx, component); } /** * Computes the inde memory usage * Returns: memory used by the index */ size_t usedMemory(Derived &obj) { return obj.pool.usedMemory + obj.pool.wastedMemory + obj.dataset.kdtree_get_point_count() * sizeof(IndexType); // pool memory and vind array memory } void computeMinMax(const Derived &obj, IndexType* ind, IndexType count, int element, ElementType& min_elem, ElementType& max_elem) { min_elem = dataset_get(obj, ind[0],element); max_elem = dataset_get(obj, ind[0],element); for (IndexType i = 1; i < count; ++i) { ElementType val = dataset_get(obj, ind[i], element); if (val < min_elem) min_elem = val; if (val > max_elem) max_elem = val; } } /** * Create a tree node that subdivides the list of vecs from vind[first] * to vind[last]. The routine is called recursively on each sublist. * * @param left index of the first vector * @param right index of the last vector */ NodePtr divideTree(Derived &obj, const IndexType left, const IndexType right, BoundingBox& bbox) { NodePtr node = obj.pool.template allocate<Node>(); // allocate memory /* If too few exemplars remain, then make this a leaf node. */ if ( (right - left) <= static_cast<IndexType>(obj.m_leaf_max_size) ) { node->child1 = node->child2 = NULL; /* Mark as leaf node. */ node->node_type.lr.left = left; node->node_type.lr.right = right; // compute bounding-box of leaf points for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { bbox[i].low = dataset_get(obj, obj.vind[left], i); bbox[i].high = dataset_get(obj, obj.vind[left], i); } for (IndexType k = left + 1; k < right; ++k) { for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { if (bbox[i].low > dataset_get(obj, obj.vind[k], i)) bbox[i].low = dataset_get(obj, obj.vind[k], i); if (bbox[i].high < dataset_get(obj, obj.vind[k], i)) bbox[i].high = dataset_get(obj, obj.vind[k], i); } } } else { IndexType idx; int cutfeat; ElementType cutval; middleSplit_(obj, &obj.vind[0] + left, right - left, idx, cutfeat, cutval, bbox); node->node_type.sub.divfeat = cutfeat; BoundingBox left_bbox(bbox); left_bbox[cutfeat].high = cutval; node->child1 = divideTree(obj, left, left + idx, left_bbox); BoundingBox right_bbox(bbox); right_bbox[cutfeat].low = cutval; node->child2 = divideTree(obj, left + idx, right, right_bbox); node->node_type.sub.divlow = left_bbox[cutfeat].high; node->node_type.sub.divhigh = right_bbox[cutfeat].low; for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low); bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high); } } return node; } void middleSplit_(Derived &obj, IndexType* ind, IndexType count, IndexType& index, int& cutfeat, ElementType& cutval, const BoundingBox& bbox) { const ElementType EPS = static_cast<ElementType>(0.00001); ElementType max_span = bbox[0].high-bbox[0].low; for (int i = 1; i < (DIM > 0 ? DIM : obj.dim); ++i) { ElementType span = bbox[i].high - bbox[i].low; if (span > max_span) { max_span = span; } } ElementType max_spread = -1; cutfeat = 0; for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { ElementType span = bbox[i].high-bbox[i].low; if (span >= (1 - EPS) * max_span) { ElementType min_elem, max_elem; computeMinMax(obj, ind, count, i, min_elem, max_elem); ElementType spread = max_elem - min_elem;; if (spread > max_spread) { cutfeat = i; max_spread = spread; } } } // split in the middle ElementType split_val = (bbox[cutfeat].low + bbox[cutfeat].high) / 2; ElementType min_elem, max_elem; computeMinMax(obj, ind, count, cutfeat, min_elem, max_elem); if (split_val < min_elem) cutval = min_elem; else if (split_val > max_elem) cutval = max_elem; else cutval = split_val; IndexType lim1, lim2; planeSplit(obj, ind, count, cutfeat, cutval, lim1, lim2); if (lim1 > count / 2) index = lim1; else if (lim2 < count / 2) index = lim2; else index = count/2; } /** * Subdivide the list of points by a plane perpendicular on axe corresponding * to the 'cutfeat' dimension at 'cutval' position. * * On return: * dataset[ind[0..lim1-1]][cutfeat]<cutval * dataset[ind[lim1..lim2-1]][cutfeat]==cutval * dataset[ind[lim2..count]][cutfeat]>cutval */ void planeSplit(Derived &obj, IndexType* ind, const IndexType count, int cutfeat, ElementType &cutval, IndexType& lim1, IndexType& lim2) { /* Move vector indices for left subtree to front of list. */ IndexType left = 0; IndexType right = count-1; for (;; ) { while (left <= right && dataset_get(obj, ind[left], cutfeat) < cutval) ++left; while (right && left <= right && dataset_get(obj, ind[right], cutfeat) >= cutval) --right; if (left > right || !right) break; // "!right" was added to support unsigned Index types std::swap(ind[left], ind[right]); ++left; --right; } /* If either list is empty, it means that all remaining features * are identical. Split in the middle to maintain a balanced tree. */ lim1 = left; right = count-1; for (;; ) { while (left <= right && dataset_get(obj, ind[left], cutfeat) <= cutval) ++left; while (right && left <= right && dataset_get(obj, ind[right], cutfeat) > cutval) --right; if (left > right || !right) break; // "!right" was added to support unsigned Index types std::swap(ind[left], ind[right]); ++left; --right; } lim2 = left; } DistanceType computeInitialDistances(const Derived &obj, const ElementType* vec, distance_vector_t& dists) const { assert(vec); DistanceType distsq = DistanceType(); for (int i = 0; i < (DIM>0 ? DIM : obj.dim); ++i) { if (vec[i] < obj.root_bbox[i].low) { dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].low, i); distsq += dists[i]; } if (vec[i] > obj.root_bbox[i].high) { dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].high, i); distsq += dists[i]; } } return distsq; } void save_tree(Derived &obj, FILE* stream, NodePtr tree) { save_value(stream, *tree); if (tree->child1 != NULL) { save_tree(obj, stream, tree->child1); } if (tree->child2 != NULL) { save_tree(obj, stream, tree->child2); } } void load_tree(Derived &obj, FILE* stream, NodePtr& tree) { tree = obj.pool.template allocate<Node>(); load_value(stream, *tree); if (tree->child1 != NULL) { load_tree(obj, stream, tree->child1); } if (tree->child2 != NULL) { load_tree(obj, stream, tree->child2); } } /** Stores the index in a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when loading the index object it must be constructed associated to the same source of data points used while building it. * See the example: examples/saveload_example.cpp * \sa loadIndex */ void saveIndex_(Derived &obj, FILE* stream) { save_value(stream, obj.m_size); save_value(stream, obj.dim); save_value(stream, obj.root_bbox); save_value(stream, obj.m_leaf_max_size); save_value(stream, obj.vind); save_tree(obj, stream, obj.root_node); } /** Loads a previous index from a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the index object must be constructed associated to the same source of data points used while building the index. * See the example: examples/saveload_example.cpp * \sa loadIndex */ void loadIndex_(Derived &obj, FILE* stream) { load_value(stream, obj.m_size); load_value(stream, obj.dim); load_value(stream, obj.root_bbox); load_value(stream, obj.m_leaf_max_size); load_value(stream, obj.vind); load_tree(obj, stream, obj.root_node); } }; /** @addtogroup kdtrees_grp KD-tree classes and adaptors * @{ */ /** kd-tree static index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. * * The class "DatasetAdaptor" must provide the following interface (can be non-virtual, inlined methods): * * \code * // Must return the number of data poins * inline size_t kdtree_get_point_count() const { ... } * * * // Must return the dim'th component of the idx'th point in the class: * inline T kdtree_get_pt(const size_t idx, int dim) const { ... } * * // Optional bounding-box computation: return false to default to a standard bbox computation loop. * // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) * template <class BBOX> * bool kdtree_get_bbox(BBOX &bb) const * { * bb[0].low = ...; bb[0].high = ...; // 0th dimension limits * bb[1].low = ...; bb[1].high = ...; // 1st dimension limits * ... * return true; * } * * \endcode * * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points) * \tparam IndexType Will be typically size_t or int */ template <typename Distance, class DatasetAdaptor, int DIM = -1, typename IndexType = size_t> class KDTreeSingleIndexAdaptor : public KDTreeBaseClass<KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>, Distance, DatasetAdaptor, DIM, IndexType> { public: /** Deleted copy constructor*/ KDTreeSingleIndexAdaptor(const KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>&) = delete; /** * The dataset used by this index */ const DatasetAdaptor &dataset; //!< The source of our data const KDTreeSingleIndexAdaptorParams index_params; Distance distance; typedef typename nanoflann::KDTreeBaseClass<nanoflann::KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>, Distance, DatasetAdaptor, DIM, IndexType> BaseClassRef; typedef typename BaseClassRef::ElementType ElementType; typedef typename BaseClassRef::DistanceType DistanceType; typedef typename BaseClassRef::Node Node; typedef Node* NodePtr; typedef typename BaseClassRef::Interval Interval; /** Define "BoundingBox" as a fixed-size or variable-size container depending on "DIM" */ typedef typename BaseClassRef::BoundingBox BoundingBox; /** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */ typedef typename BaseClassRef::distance_vector_t distance_vector_t; /** * KDTree constructor * * Refer to docs in README.md or online in https://github.com/jlblancoc/nanoflann * * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 for 3D points) * is determined by means of: * - The \a DIM template parameter if >0 (highest priority) * - Otherwise, the \a dimensionality parameter of this constructor. * * @param inputData Dataset with the input features * @param params Basically, the maximum leaf node size */ KDTreeSingleIndexAdaptor(const int dimensionality, const DatasetAdaptor& inputData, const KDTreeSingleIndexAdaptorParams& params = KDTreeSingleIndexAdaptorParams() ) : dataset(inputData), index_params(params), distance(inputData) { BaseClassRef::root_node = NULL; BaseClassRef::m_size = dataset.kdtree_get_point_count(); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; BaseClassRef::dim = dimensionality; if (DIM>0) BaseClassRef::dim = DIM; BaseClassRef::m_leaf_max_size = params.leaf_max_size; // Create a permutable array of indices to the input vectors. init_vind(); } /** * Builds the index */ void buildIndex() { BaseClassRef::m_size = dataset.kdtree_get_point_count(); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; init_vind(); this->freeIndex(*this); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; if(BaseClassRef::m_size == 0) return; computeBoundingBox(BaseClassRef::root_bbox); BaseClassRef::root_node = this->divideTree(*this, 0, BaseClassRef::m_size, BaseClassRef::root_bbox ); // construct the tree } /** \name Query methods * @{ */ /** * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * * \tparam RESULTSET Should be any ResultSet<DistanceType> * \return True if the requested neighbors could be found. * \sa knnSearch, radiusSearch */ template <typename RESULTSET> bool findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const { assert(vec); if (this->size(*this) == 0) return false; if (!BaseClassRef::root_node) throw std::runtime_error("[nanoflann] findNeighbors() called before building the index."); float epsError = 1 + searchParams.eps; distance_vector_t dists; // fixed or variable-sized container (depending on DIM) auto zero = static_cast<decltype(result.worstDist())>(0); assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim), zero); // Fill it with zeros. DistanceType distsq = this->computeInitialDistances(*this, vec, dists); searchLevel(result, vec, BaseClassRef::root_node, distsq, dists, epsError); // "count_leaf" parameter removed since was neither used nor returned to the user. return result.full(); } /** * Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1]. Their indices are stored inside * the result object. * \sa radiusSearch, findNeighbors * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. * \return Number `N` of valid points in the result set. Only the first `N` entries in `out_indices` and `out_distances_sq` will be valid. * Return may be less than `num_closest` only if the number of elements in the tree is less than `num_closest`. */ size_t knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const { nanoflann::KNNResultSet<DistanceType,IndexType> resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); this->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); return resultSet.size(); } /** * Find all the neighbors to \a query_point[0:dim-1] within a maximum radius. * The output is given as a vector of pairs, of which the first element is a point index and the second the corresponding distance. * Previous contents of \a IndicesDists are cleared. * * If searchParams.sorted==true, the output list is sorted by ascending distances. * * For a better performance, it is advisable to do a .reserve() on the vector if you have any wild guess about the number of expected matches. * * \sa knnSearch, findNeighbors, radiusSearchCustomCallback * \return The number of points within the given radius (i.e. indices.size() or dists.size() ) */ size_t radiusSearch(const ElementType *query_point, const DistanceType &radius, std::vector<std::pair<IndexType, DistanceType> >& IndicesDists, const SearchParams& searchParams) const { RadiusResultSet<DistanceType, IndexType> resultSet(radius, IndicesDists); const size_t nFound = radiusSearchCustomCallback(query_point, resultSet, searchParams); if (searchParams.sorted) std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter() ); return nFound; } /** * Just like radiusSearch() but with a custom callback class for each point found in the radius of the query. * See the source of RadiusResultSet<> as a start point for your own classes. * \sa radiusSearch */ template <class SEARCH_CALLBACK> size_t radiusSearchCustomCallback(const ElementType *query_point, SEARCH_CALLBACK &resultSet, const SearchParams& searchParams = SearchParams() ) const { this->findNeighbors(resultSet, query_point, searchParams); return resultSet.size(); } /** @} */ public: /** Make sure the auxiliary list \a vind has the same size than the current dataset, and re-generate if size has changed. */ void init_vind() { // Create a permutable array of indices to the input vectors. BaseClassRef::m_size = dataset.kdtree_get_point_count(); if (BaseClassRef::vind.size() != BaseClassRef::m_size) BaseClassRef::vind.resize(BaseClassRef::m_size); for (size_t i = 0; i < BaseClassRef::m_size; i++) BaseClassRef::vind[i] = i; } void computeBoundingBox(BoundingBox& bbox) { resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dataset.kdtree_get_bbox(bbox)) { // Done! It was implemented in derived class } else { const size_t N = dataset.kdtree_get_point_count(); if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but no data points found."); for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { bbox[i].low = bbox[i].high = this->dataset_get(*this, 0, i); } for (size_t k = 1; k < N; ++k) { for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { if (this->dataset_get(*this, k, i) < bbox[i].low) bbox[i].low = this->dataset_get(*this, k, i); if (this->dataset_get(*this, k, i) > bbox[i].high) bbox[i].high = this->dataset_get(*this, k, i); } } } } /** * Performs an exact search in the tree starting from a node. * \tparam RESULTSET Should be any ResultSet<DistanceType> * \return true if the search should be continued, false if the results are sufficient */ template <class RESULTSET> bool searchLevel(RESULTSET& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, distance_vector_t& dists, const float epsError) const { /* If this is a leaf node, then do check and return. */ if ((node->child1 == NULL) && (node->child2 == NULL)) { //count_leaf += (node->lr.right-node->lr.left); // Removed since was neither used nor returned to the user. DistanceType worst_dist = result_set.worstDist(); for (IndexType i = node->node_type.lr.left; i<node->node_type.lr.right; ++i) { const IndexType index = BaseClassRef::vind[i];// reorder... : i; DistanceType dist = distance.evalMetric(vec, index, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dist < worst_dist) { if(!result_set.addPoint(dist, BaseClassRef::vind[i])) { // the resultset doesn't want to receive any more points, we're done searching! return false; } } } return true; } /* Which child branch should be taken first? */ int idx = node->node_type.sub.divfeat; ElementType val = vec[idx]; DistanceType diff1 = val - node->node_type.sub.divlow; DistanceType diff2 = val - node->node_type.sub.divhigh; NodePtr bestChild; NodePtr otherChild; DistanceType cut_dist; if ((diff1 + diff2) < 0) { bestChild = node->child1; otherChild = node->child2; cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx); } else { bestChild = node->child2; otherChild = node->child1; cut_dist = distance.accum_dist( val, node->node_type.sub.divlow, idx); } /* Call recursively to search next level down. */ if(!searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError)) { // the resultset doesn't want to receive any more points, we're done searching! return false; } DistanceType dst = dists[idx]; mindistsq = mindistsq + cut_dist - dst; dists[idx] = cut_dist; if (mindistsq*epsError <= result_set.worstDist()) { if(!searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError)) { // the resultset doesn't want to receive any more points, we're done searching! return false; } } dists[idx] = dst; return true; } public: /** Stores the index in a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when loading the index object it must be constructed associated to the same source of data points used while building it. * See the example: examples/saveload_example.cpp * \sa loadIndex */ void saveIndex(FILE* stream) { this->saveIndex_(*this, stream); } /** Loads a previous index from a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the index object must be constructed associated to the same source of data points used while building the index. * See the example: examples/saveload_example.cpp * \sa loadIndex */ void loadIndex(FILE* stream) { this->loadIndex_(*this, stream); } }; // class KDTree /** kd-tree dynamic index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. * * The class "DatasetAdaptor" must provide the following interface (can be non-virtual, inlined methods): * * \code * // Must return the number of data poins * inline size_t kdtree_get_point_count() const { ... } * * // Must return the dim'th component of the idx'th point in the class: * inline T kdtree_get_pt(const size_t idx, int dim) const { ... } * * // Optional bounding-box computation: return false to default to a standard bbox computation loop. * // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) * template <class BBOX> * bool kdtree_get_bbox(BBOX &bb) const * { * bb[0].low = ...; bb[0].high = ...; // 0th dimension limits * bb[1].low = ...; bb[1].high = ...; // 1st dimension limits * ... * return true; * } * * \endcode * * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points) * \tparam IndexType Will be typically size_t or int */ template <typename Distance, class DatasetAdaptor, int DIM = -1, typename IndexType = size_t> class KDTreeSingleIndexDynamicAdaptor_ : public KDTreeBaseClass<KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM, IndexType>, Distance, DatasetAdaptor, DIM, IndexType> { public: /** * The dataset used by this index */ const DatasetAdaptor &dataset; //!< The source of our data KDTreeSingleIndexAdaptorParams index_params; std::vector<int> &treeIndex; Distance distance; typedef typename nanoflann::KDTreeBaseClass<nanoflann::KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM, IndexType>, Distance, DatasetAdaptor, DIM, IndexType> BaseClassRef; typedef typename BaseClassRef::ElementType ElementType; typedef typename BaseClassRef::DistanceType DistanceType; typedef typename BaseClassRef::Node Node; typedef Node* NodePtr; typedef typename BaseClassRef::Interval Interval; /** Define "BoundingBox" as a fixed-size or variable-size container depending on "DIM" */ typedef typename BaseClassRef::BoundingBox BoundingBox; /** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */ typedef typename BaseClassRef::distance_vector_t distance_vector_t; /** * KDTree constructor * * Refer to docs in README.md or online in https://github.com/jlblancoc/nanoflann * * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 for 3D points) * is determined by means of: * - The \a DIM template parameter if >0 (highest priority) * - Otherwise, the \a dimensionality parameter of this constructor. * * @param inputData Dataset with the input features * @param params Basically, the maximum leaf node size */ KDTreeSingleIndexDynamicAdaptor_(const int dimensionality, const DatasetAdaptor& inputData, std::vector<int>& treeIndex_, const KDTreeSingleIndexAdaptorParams& params = KDTreeSingleIndexAdaptorParams()) : dataset(inputData), index_params(params), treeIndex(treeIndex_), distance(inputData) { BaseClassRef::root_node = NULL; BaseClassRef::m_size = 0; BaseClassRef::m_size_at_index_build = 0; BaseClassRef::dim = dimensionality; if (DIM>0) BaseClassRef::dim = DIM; BaseClassRef::m_leaf_max_size = params.leaf_max_size; } /** Assignment operator definiton */ KDTreeSingleIndexDynamicAdaptor_ operator=( const KDTreeSingleIndexDynamicAdaptor_& rhs ) { KDTreeSingleIndexDynamicAdaptor_ tmp( rhs ); std::swap( BaseClassRef::vind, tmp.BaseClassRef::vind ); std::swap( BaseClassRef::m_leaf_max_size, tmp.BaseClassRef::m_leaf_max_size ); std::swap( index_params, tmp.index_params ); std::swap( treeIndex, tmp.treeIndex ); std::swap( BaseClassRef::m_size, tmp.BaseClassRef::m_size ); std::swap( BaseClassRef::m_size_at_index_build, tmp.BaseClassRef::m_size_at_index_build ); std::swap( BaseClassRef::root_node, tmp.BaseClassRef::root_node ); std::swap( BaseClassRef::root_bbox, tmp.BaseClassRef::root_bbox ); std::swap( BaseClassRef::pool, tmp.BaseClassRef::pool ); return *this; } /** * Builds the index */ void buildIndex() { BaseClassRef::m_size = BaseClassRef::vind.size(); this->freeIndex(*this); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; if(BaseClassRef::m_size == 0) return; computeBoundingBox(BaseClassRef::root_bbox); BaseClassRef::root_node = this->divideTree(*this, 0, BaseClassRef::m_size, BaseClassRef::root_bbox ); // construct the tree } /** \name Query methods * @{ */ /** * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * * \tparam RESULTSET Should be any ResultSet<DistanceType> * \return True if the requested neighbors could be found. * \sa knnSearch, radiusSearch */ template <typename RESULTSET> bool findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const { assert(vec); if (this->size(*this) == 0) return false; if (!BaseClassRef::root_node) return false; float epsError = 1 + searchParams.eps; distance_vector_t dists; // fixed or variable-sized container (depending on DIM) assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim), 0); // Fill it with zeros. DistanceType distsq = this->computeInitialDistances(*this, vec, dists); searchLevel(result, vec, BaseClassRef::root_node, distsq, dists, epsError); // "count_leaf" parameter removed since was neither used nor returned to the user. return result.full(); } /** * Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1]. Their indices are stored inside * the result object. * \sa radiusSearch, findNeighbors * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. * \return Number `N` of valid points in the result set. Only the first `N` entries in `out_indices` and `out_distances_sq` will be valid. * Return may be less than `num_closest` only if the number of elements in the tree is less than `num_closest`. */ size_t knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const { nanoflann::KNNResultSet<DistanceType,IndexType> resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); this->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); return resultSet.size(); } /** * Find all the neighbors to \a query_point[0:dim-1] within a maximum radius. * The output is given as a vector of pairs, of which the first element is a point index and the second the corresponding distance. * Previous contents of \a IndicesDists are cleared. * * If searchParams.sorted==true, the output list is sorted by ascending distances. * * For a better performance, it is advisable to do a .reserve() on the vector if you have any wild guess about the number of expected matches. * * \sa knnSearch, findNeighbors, radiusSearchCustomCallback * \return The number of points within the given radius (i.e. indices.size() or dists.size() ) */ size_t radiusSearch(const ElementType *query_point, const DistanceType &radius, std::vector<std::pair<IndexType,DistanceType> >& IndicesDists, const SearchParams& searchParams) const { RadiusResultSet<DistanceType,IndexType> resultSet(radius, IndicesDists); const size_t nFound = radiusSearchCustomCallback(query_point, resultSet, searchParams); if (searchParams.sorted) std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter() ); return nFound; } /** * Just like radiusSearch() but with a custom callback class for each point found in the radius of the query. * See the source of RadiusResultSet<> as a start point for your own classes. * \sa radiusSearch */ template <class SEARCH_CALLBACK> size_t radiusSearchCustomCallback(const ElementType *query_point, SEARCH_CALLBACK &resultSet, const SearchParams& searchParams = SearchParams() ) const { this->findNeighbors(resultSet, query_point, searchParams); return resultSet.size(); } /** @} */ public: void computeBoundingBox(BoundingBox& bbox) { resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dataset.kdtree_get_bbox(bbox)) { // Done! It was implemented in derived class } else { const size_t N = BaseClassRef::m_size; if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but no data points found."); for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { bbox[i].low = bbox[i].high = this->dataset_get(*this, BaseClassRef::vind[0], i); } for (size_t k = 1; k < N; ++k) { for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { if (this->dataset_get(*this, BaseClassRef::vind[k], i) < bbox[i].low) bbox[i].low = this->dataset_get(*this, BaseClassRef::vind[k], i); if (this->dataset_get(*this, BaseClassRef::vind[k], i) > bbox[i].high) bbox[i].high = this->dataset_get(*this, BaseClassRef::vind[k], i); } } } } /** * Performs an exact search in the tree starting from a node. * \tparam RESULTSET Should be any ResultSet<DistanceType> */ template <class RESULTSET> void searchLevel(RESULTSET& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, distance_vector_t& dists, const float epsError) const { /* If this is a leaf node, then do check and return. */ if ((node->child1 == NULL) && (node->child2 == NULL)) { //count_leaf += (node->lr.right-node->lr.left); // Removed since was neither used nor returned to the user. DistanceType worst_dist = result_set.worstDist(); for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right; ++i) { const IndexType index = BaseClassRef::vind[i];// reorder... : i; if(treeIndex[index] == -1) continue; DistanceType dist = distance.evalMetric(vec, index, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dist<worst_dist) { result_set.addPoint(dist, BaseClassRef::vind[i]); } } return; } /* Which child branch should be taken first? */ int idx = node->node_type.sub.divfeat; ElementType val = vec[idx]; DistanceType diff1 = val - node->node_type.sub.divlow; DistanceType diff2 = val - node->node_type.sub.divhigh; NodePtr bestChild; NodePtr otherChild; DistanceType cut_dist; if ((diff1 + diff2) < 0) { bestChild = node->child1; otherChild = node->child2; cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx); } else { bestChild = node->child2; otherChild = node->child1; cut_dist = distance.accum_dist( val, node->node_type.sub.divlow, idx); } /* Call recursively to search next level down. */ searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); DistanceType dst = dists[idx]; mindistsq = mindistsq + cut_dist - dst; dists[idx] = cut_dist; if (mindistsq*epsError <= result_set.worstDist()) { searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); } dists[idx] = dst; } public: /** Stores the index in a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when loading the index object it must be constructed associated to the same source of data points used while building it. * See the example: examples/saveload_example.cpp * \sa loadIndex */ void saveIndex(FILE* stream) { this->saveIndex_(*this, stream); } /** Loads a previous index from a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the index object must be constructed associated to the same source of data points used while building the index. * See the example: examples/saveload_example.cpp * \sa loadIndex */ void loadIndex(FILE* stream) { this->loadIndex_(*this, stream); } }; /** kd-tree dynaimic index * * class to create multiple static index and merge their results to behave as single dynamic index as proposed in Logarithmic Approach. * * Example of usage: * examples/dynamic_pointcloud_example.cpp * * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points) * \tparam IndexType Will be typically size_t or int */ template <typename Distance, class DatasetAdaptor,int DIM = -1, typename IndexType = size_t> class KDTreeSingleIndexDynamicAdaptor { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::DistanceType DistanceType; protected: size_t m_leaf_max_size; size_t treeCount; size_t pointCount; /** * The dataset used by this index */ const DatasetAdaptor &dataset; //!< The source of our data std::vector<int> treeIndex; //!< treeIndex[idx] is the index of tree in which point at idx is stored. treeIndex[idx]=-1 means that point has been removed. KDTreeSingleIndexAdaptorParams index_params; int dim; //!< Dimensionality of each data point typedef KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM> index_container_t; std::vector<index_container_t> index; public: /** Get a const ref to the internal list of indices; the number of indices is adapted dynamically as * the dataset grows in size. */ const std::vector<index_container_t> & getAllIndices() const { return index; } private: /** finds position of least significant unset bit */ int First0Bit(IndexType num) { int pos = 0; while(num&1) { num = num>>1; pos++; } return pos; } /** Creates multiple empty trees to handle dynamic support */ void init() { typedef KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM> my_kd_tree_t; std::vector<my_kd_tree_t> index_(treeCount, my_kd_tree_t(dim /*dim*/, dataset, treeIndex, index_params)); index=index_; } public: Distance distance; /** * KDTree constructor * * Refer to docs in README.md or online in https://github.com/jlblancoc/nanoflann * * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 for 3D points) * is determined by means of: * - The \a DIM template parameter if >0 (highest priority) * - Otherwise, the \a dimensionality parameter of this constructor. * * @param inputData Dataset with the input features * @param params Basically, the maximum leaf node size */ KDTreeSingleIndexDynamicAdaptor(const int dimensionality, const DatasetAdaptor& inputData, const KDTreeSingleIndexAdaptorParams& params = KDTreeSingleIndexAdaptorParams() , const size_t maximumPointCount = 1000000000U) : dataset(inputData), index_params(params), distance(inputData) { treeCount = std::log2(maximumPointCount); pointCount = 0U; dim = dimensionality; treeIndex.clear(); if (DIM > 0) dim = DIM; m_leaf_max_size = params.leaf_max_size; init(); int num_initial_points = dataset.kdtree_get_point_count(); if (num_initial_points > 0) { addPoints(0, num_initial_points - 1); } } /** Deleted copy constructor*/ KDTreeSingleIndexDynamicAdaptor(const KDTreeSingleIndexDynamicAdaptor<Distance, DatasetAdaptor, DIM, IndexType>&) = delete; /** Add points to the set, Inserts all points from [start, end] */ void addPoints(IndexType start, IndexType end) { int count = end - start + 1; treeIndex.resize(treeIndex.size() + count); for(IndexType idx = start; idx <= end; idx++) { int pos = First0Bit(pointCount); index[pos].vind.clear(); treeIndex[pointCount]=pos; for(int i = 0; i < pos; i++) { for(int j = 0; j < static_cast<int>(index[i].vind.size()); j++) { index[pos].vind.push_back(index[i].vind[j]); treeIndex[index[i].vind[j]] = pos; } index[i].vind.clear(); index[i].freeIndex(index[i]); } index[pos].vind.push_back(idx); index[pos].buildIndex(); pointCount++; } } /** Remove a point from the set (Lazy Deletion) */ void removePoint(size_t idx) { if(idx >= pointCount) return; treeIndex[idx] = -1; } /** * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * * \tparam RESULTSET Should be any ResultSet<DistanceType> * \return True if the requested neighbors could be found. * \sa knnSearch, radiusSearch */ template <typename RESULTSET> bool findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const { for(size_t i = 0; i < treeCount; i++) { index[i].findNeighbors(result, &vec[0], searchParams); } return result.full(); } }; /** An L2-metric KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage. * Each row in the matrix represents a point in the state space. * * Example of usage: * \code * Eigen::Matrix<num_t,Dynamic,Dynamic> mat; * // Fill out "mat"... * * typedef KDTreeEigenMatrixAdaptor< Eigen::Matrix<num_t,Dynamic,Dynamic> > my_kd_tree_t; * const int max_leaf = 10; * my_kd_tree_t mat_index(mat, max_leaf ); * mat_index.index->buildIndex(); * mat_index.index->... * \endcode * * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations. * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. */ template <class MatrixType, class Distance = nanoflann::metric_L2> struct KDTreeEigenMatrixAdaptor { typedef KDTreeEigenMatrixAdaptor<MatrixType,Distance> self_t; typedef typename MatrixType::Scalar num_t; typedef typename MatrixType::Index IndexType; typedef typename Distance::template traits<num_t,self_t>::distance_t metric_t; typedef KDTreeSingleIndexAdaptor< metric_t,self_t, MatrixType::ColsAtCompileTime,IndexType> index_t; index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index. /// Constructor: takes a const ref to the matrix object with the data points KDTreeEigenMatrixAdaptor(const MatrixType &mat, const int leaf_max_size = 10) : m_data_matrix(mat) { const IndexType dims = mat.cols(); index = new index_t( dims, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); index->buildIndex(); } public: /** Deleted copy constructor */ KDTreeEigenMatrixAdaptor(const self_t&) = delete; ~KDTreeEigenMatrixAdaptor() { delete index; } const MatrixType &m_data_matrix; /** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]). * Note that this is a short-cut method for index->findNeighbors(). * The user can also call index->... methods as desired. * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. */ inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const { nanoflann::KNNResultSet<num_t, IndexType> resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); } /** @name Interface expected by KDTreeSingleIndexAdaptor * @{ */ const self_t & derived() const { return *this; } self_t & derived() { return *this; } // Must return the number of data points inline size_t kdtree_get_point_count() const { return m_data_matrix.rows(); } // Returns the dim'th component of the idx'th point in the class: inline num_t kdtree_get_pt(const IndexType idx, int dim) const { return m_data_matrix.coeff(idx, IndexType(dim)); } // Optional bounding-box computation: return false to default to a standard bbox computation loop. // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) template <class BBOX> bool kdtree_get_bbox(BBOX& /*bb*/) const { return false; } /** @} */ }; // end of KDTreeEigenMatrixAdaptor /** @} */ /** @} */ // end of grouping } // end of NS #endif /* NANOFLANN_HPP_ */
76,763
38.366154
226
hpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/dependencies/program-options-lite/program_options_lite.cpp
/* The copyright in this software is being made available under the BSD * License, included below. This software may be subject to other third party * and contributor rights, including patent rights, and no such rights are * granted under this license. * * Copyright (c) 2010-2017, ITU/ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the ITU/ISO/IEC nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include "program_options_lite.h" #include <stdlib.h> #include <iomanip> #include <ios> #include <iostream> #include <fstream> #include <sstream> #include <string> #include <list> #include <map> #include <algorithm> using namespace std; //! \ingroup TAppCommon //! \{ namespace df { namespace program_options_lite { ErrorReporter default_error_reporter; ostream& ErrorReporter::error(const string& where) { is_errored = 1; if (!where.empty()) cerr << where << " error: "; else cerr << "Error: "; return cerr; } ostream& ErrorReporter::warn(const string& where) { if (!where.empty()) cerr << where << " warning: "; else cerr << "Warning: "; return cerr; } Options::~Options() { for(Options::NamesPtrList::iterator it = opt_list.begin(); it != opt_list.end(); it++) { delete *it; } } void Options::addOption(OptionBase *opt) { Names* names = new Names(); names->opt = opt; string& opt_string = opt->opt_string; size_t opt_start = 0; for (size_t opt_end = 0; opt_end != string::npos;) { opt_end = opt_string.find_first_of(',', opt_start); bool force_short = 0; if (opt_string[opt_start] == '-') { opt_start++; force_short = 1; } string opt_name = opt_string.substr(opt_start, opt_end - opt_start); if (force_short || opt_name.size() == 1) { names->opt_short.push_back(opt_name); opt_short_map[opt_name].push_back(names); } else { names->opt_long.push_back(opt_name); opt_long_map[opt_name].push_back(names); } opt_start += opt_end + 1; } bool need_section_update = false; if (!sections.empty()) { auto& curr_section = sections.back(); need_section_update = curr_section.second == opt_list.cend(); } opt_list.push_back(names); if (need_section_update) { auto& curr_section = sections.back(); curr_section.second = std::prev(opt_list.cend()); } } /* Helper method to initiate adding options to Options */ OptionSpecific Options::addOptions() { return OptionSpecific(*this); } static void setOptions(Options::NamesPtrList& opt_list, const string& value, ErrorReporter& error_reporter) { /* multiple options may be registered for the same name: * allow each to parse value */ for (Options::NamesPtrList::iterator it = opt_list.begin(); it != opt_list.end(); ++it) { (*it)->opt->parse(value, error_reporter); } } static const char spaces[41] = " "; /* format help text for a single option: * using the formatting: "-x, --long", * if a short/long option isn't specified, it is not printed */ static void doHelpOpt(ostream& out, const Options::Names& entry, unsigned pad_short = 0) { pad_short = min(pad_short, 8u); if (!entry.opt_short.empty()) { unsigned pad = max((int)pad_short - (int)entry.opt_short.front().size(), 0); out << "-" << entry.opt_short.front(); if (!entry.opt_long.empty()) { out << ", "; } out << &(spaces[40 - pad]); } else { out << " "; out << &(spaces[40 - pad_short]); } if (!entry.opt_long.empty()) { out << "--" << entry.opt_long.front(); out << '='; entry.opt->writeDefault(out); } } /* format the help text */ void doHelp(ostream& out, Options& opts, unsigned columns) { const unsigned pad_short = 3; /* first pass: work out the longest option name */ unsigned max_width = 0; for(Options::NamesPtrList::iterator it = opts.opt_list.begin(); it != opts.opt_list.end(); it++) { ostringstream line(ios_base::out); doHelpOpt(line, **it, pad_short); max_width = max(max_width, (unsigned) line.tellp()); } unsigned opt_width = min(max_width+2, 7+28u + pad_short) + 2; unsigned desc_width = columns - opt_width; /* second pass: write out formatted option and help text. * - align start of help text to start at opt_width * - if the option text is longer than opt_width, place the help * text at opt_width on the next line. */ auto section_it = opts.sections.begin(); for(Options::NamesPtrList::iterator it = opts.opt_list.begin(); it != opts.opt_list.end(); it++) { if (section_it != opts.sections.end() && it == section_it->second) { const auto& section_name = section_it->first.name; out << "\n " << section_name << "\n "; fill_n(std::ostreambuf_iterator<char>(out), section_name.size(), '-'); out << '\n'; section_it++; } ostringstream line(ios_base::out); line << " "; doHelpOpt(line, **it, pad_short); const string& opt_desc = (*it)->opt->opt_desc; if (opt_desc.empty()) { /* no help text: output option, skip further processing */ out << line.str() << endl; continue; } size_t currlength = size_t(line.tellp()); if (currlength >= opt_width) { /* if option text is too long (and would collide with the * help text, split onto next line and ensure a gap after * the previous option to increase readability */ out << endl; line << endl; currlength = 0; } /* split up the help text, taking into account new lines, * (add opt_width of padding to each new line) */ for (size_t newline_pos = 0, cur_pos = 0; cur_pos != string::npos; currlength = 0) { /* print any required padding space for vertical alignment */ line << &(spaces[40 - opt_width + currlength]); newline_pos = opt_desc.find_first_of('\n', newline_pos); if (newline_pos != string::npos) { /* newline found, print substring (newline needn't be stripped) */ newline_pos++; line << opt_desc.substr(cur_pos, newline_pos - cur_pos); cur_pos = newline_pos; continue; } if (cur_pos + desc_width > opt_desc.size()) { /* no need to wrap text, remainder is less than avaliable width */ line << opt_desc.substr(cur_pos); break; } /* find a suitable point to split text (avoid spliting in middle of word) */ size_t split_pos = opt_desc.find_last_of(' ', cur_pos + desc_width); if (split_pos != string::npos) { /* eat up multiple space characters */ split_pos = opt_desc.find_last_not_of(' ', split_pos) + 1; } /* bad split if no suitable space to split at. fall back to width */ bool bad_split = split_pos == string::npos || split_pos <= cur_pos; if (bad_split) { split_pos = cur_pos + desc_width; } line << opt_desc.substr(cur_pos, split_pos - cur_pos); /* eat up any space for the start of the next line */ if (!bad_split) { split_pos = opt_desc.find_first_not_of(' ', split_pos); } cur_pos = newline_pos = split_pos; if (cur_pos >= opt_desc.size()) { break; } line << endl; } out << line.str() << endl; } } /* dump configuration values */ static void dumpCfgRange( ostream& out, Options::NamesPtrList::const_iterator begin, Options::NamesPtrList::const_iterator end, int indent) { // find the width of the longest option name size_t max_opt_width = 0; for (auto it = begin; it != end; ++it) { size_t len = 0; const auto& entry = *it; if (!entry->opt_long.empty()) len = entry->opt_long.front().size(); else if (!entry->opt_short.empty()) len = entry->opt_short.front().size(); max_opt_width = max(max_opt_width, len); } for (auto it = begin; it != end; ++it) { out << &(spaces[40 - indent]); out << left << setw(max_opt_width); const auto& entry = *it; if (!entry->opt_long.empty()) out << entry->opt_long.front(); else if (!entry->opt_short.empty()) out << entry->opt_short.front(); else continue; out << " : "; entry->opt->writeValue(out); out << '\n'; } } /* dump configuration values */ void dumpCfg(ostream& out, const Options& opts, int indent) { dumpCfgRange(out, opts.opt_list.begin(), opts.opt_list.end(), indent); } void dumpCfg( ostream& out, const Options& opts, const char* section, int indent) { auto it = std::find_if(opts.sections.begin(), opts.sections.end(), [&](const Options::SectionPtr& sect){ return sect.first.name == section; }); if (it == opts.sections.end()) return; auto it_next = std::next(it); auto begin = it->second; auto end = it_next == opts.sections.end() ? opts.opt_list.end() : it_next->second; dumpCfgRange(out, begin, end, indent); } struct OptionWriter { OptionWriter(Options& rOpts, ErrorReporter& err) : opts(rOpts), error_reporter(err) {} virtual ~OptionWriter() {} virtual const string where() = 0; bool storePair(bool allow_long, bool allow_short, const string& name, const string& value); bool storePair(const string& name, const string& value) { return storePair(true, true, name, value); } Options& opts; ErrorReporter& error_reporter; }; bool OptionWriter::storePair(bool allow_long, bool allow_short, const string& name, const string& value) { bool found = false; Options::NamesMap::iterator opt_it; if (allow_long) { opt_it = opts.opt_long_map.find(name); if (opt_it != opts.opt_long_map.end()) { found = true; } } /* check for the short list */ if (allow_short && !(found && allow_long)) { opt_it = opts.opt_short_map.find(name); if (opt_it != opts.opt_short_map.end()) { found = true; } } if (!found) { error_reporter.error(where()) << "Unknown option `" << name << "' (value:`" << value << "')\n"; return false; } setOptions((*opt_it).second, value, error_reporter); return true; } struct ArgvParser : public OptionWriter { ArgvParser(Options& rOpts, ErrorReporter& rError_reporter) : OptionWriter(rOpts, rError_reporter) {} const string where() { return "command line"; } unsigned parseGNU(unsigned argc, const char* argv[]); unsigned parseSHORT(unsigned argc, const char* argv[]); }; /** * returns number of extra arguments consumed */ unsigned ArgvParser::parseGNU(unsigned argc, const char* argv[]) { /* gnu style long options can take the forms: * --option=arg * --option arg */ string arg(argv[0]); size_t arg_opt_start = arg.find_first_not_of('-'); size_t arg_opt_sep = arg.find_first_of('='); string option = arg.substr(arg_opt_start, arg_opt_sep - arg_opt_start); unsigned extra_argc_consumed = 0; if (arg_opt_sep == string::npos) { /* no argument found => argument in argv[1] (maybe) */ /* xxx, need to handle case where option isn't required */ #if 0 /* commented out, to return to true GNU style processing * where longopts have to include an =, otherwise they are * booleans */ if (argc == 1) { return 0; /* run out of argv for argument */ } extra_argc_consumed = 1; #endif if(!storePair(true, false, option, "1")) { return 0; } } else { /* argument occurs after option_sep */ string val = arg.substr(arg_opt_sep + 1); storePair(true, false, option, val); } return extra_argc_consumed; } unsigned ArgvParser::parseSHORT(unsigned argc, const char* argv[]) { /* short options can take the forms: * --option arg * -option arg */ string arg(argv[0]); size_t arg_opt_start = arg.find_first_not_of('-'); string option = arg.substr(arg_opt_start); /* lookup option */ /* argument in argv[1] */ /* xxx, need to handle case where option isn't required */ if (argc == 1) { error_reporter.error(where()) << "Not processing option `" << option << "' without argument\n"; return 0; /* run out of argv for argument */ } storePair(false, true, option, string(argv[1])); return 1; } list<const char*> scanArgv(Options& opts, unsigned argc, const char* argv[], ErrorReporter& error_reporter) { ArgvParser avp(opts, error_reporter); /* a list for anything that didn't get handled as an option */ list<const char*> non_option_arguments; for(unsigned i = 1; i < argc; i++) { if (argv[i][0] != '-') { non_option_arguments.push_back(argv[i]); continue; } if (argv[i][1] == 0) { /* a lone single dash is an argument (usually signifying stdin) */ non_option_arguments.push_back(argv[i]); continue; } if (argv[i][1] != '-') { /* handle short (single dash) options */ i += avp.parseSHORT(argc - i, &argv[i]); continue; } if (argv[i][2] == 0) { /* a lone double dash ends option processing */ while (++i < argc) { non_option_arguments.push_back(argv[i]); } break; } /* handle long (double dash) options */ i += avp.parseGNU(argc - i, &argv[i]); } return non_option_arguments; } struct CfgStreamParser : public OptionWriter { CfgStreamParser(const string& rName, Options& rOpts, ErrorReporter& rError_reporter) : OptionWriter(rOpts, rError_reporter) , name(rName) , linenum(0) {} const string name; int linenum; const string where() { ostringstream os; os << name << ":" << linenum; return os.str(); } void scanLine(string& line); void scanStream(istream& in); }; void CfgStreamParser::scanLine(string& line) { /* strip any leading whitespace */ size_t start = line.find_first_not_of(" \t\n\r"); if (start == string::npos) { /* blank line */ return; } if (line[start] == '#') { /* comment line */ return; } /* look for first whitespace or ':' after the option end */ size_t option_end = line.find_first_of(": \t\n\r",start); string option = line.substr(start, option_end - start); /* look for ':', eat up any whitespace first */ start = line.find_first_not_of(" \t\n\r", option_end); if (start == string::npos) { /* error: badly formatted line */ error_reporter.warn(where()) << "line formatting error\n"; return; } if (line[start] != ':') { /* error: badly formatted line */ error_reporter.warn(where()) << "line formatting error\n"; return; } /* look for start of value string -- eat up any leading whitespace */ start = line.find_first_not_of(" \t\n\r", ++start); if (start == string::npos) { /* error: badly formatted line */ error_reporter.warn(where()) << "line formatting error\n"; return; } /* extract the value part, which may contain embedded spaces * by searching for a word at a time, until we hit a comment or end of line */ size_t value_end = start; do { if (line[value_end] == '#') { /* rest of line is a comment */ value_end--; break; } value_end = line.find_first_of(" \t\n\r", value_end); /* consume any white space, incase there is another word. * any trailing whitespace will be removed shortly */ value_end = line.find_first_not_of(" \t\n\r", value_end); } while (value_end != string::npos); /* strip any trailing space from value*/ value_end = line.find_last_not_of(" \t\n\r", value_end); string value; if (value_end >= start) { value = line.substr(start, value_end +1 - start); } else { /* error: no value */ error_reporter.warn(where()) << "no value found\n"; return; } /* store the value in option */ storePair(true, false, option, value); } void CfgStreamParser::scanStream(istream& in) { do { linenum++; string line; getline(in, line); scanLine(line); } while(!!in); } /* for all options in opts, set their storage to their specified * default value */ void setDefaults(Options& opts) { for(Options::NamesPtrList::iterator it = opts.opt_list.begin(); it != opts.opt_list.end(); it++) { (*it)->opt->setDefault(); } } void parseConfigFile(Options& opts, const string& filename, ErrorReporter& error_reporter) { ifstream cfgstream(filename.c_str(), ifstream::in); if (!cfgstream) { error_reporter.error(filename) << "Failed to open config file\n"; return; } CfgStreamParser csp(filename, opts, error_reporter); csp.scanStream(cfgstream); } } } //! \}
20,109
29.059791
111
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/dependencies/program-options-lite/program_options_lite.h
/* The copyright in this software is being made available under the BSD * License, included below. This software may be subject to other third party * and contributor rights, including patent rights, and no such rights are * granted under this license. * * Copyright (c) 2010-2017, ITU/ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the ITU/ISO/IEC nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __PROGRAM_OPTIONS_LITE__ #define __PROGRAM_OPTIONS_LITE__ #include <array> #include <functional> #include <iostream> #include <sstream> #include <string> #include <list> #include <map> #include <vector> namespace df { namespace program_options_lite { struct Options; struct ParseFailure : public std::exception { ParseFailure(std::string arg0, std::string val0) throw() : arg(arg0), val(val0) {} ~ParseFailure() throw() {}; std::string arg; std::string val; const char* what() const throw() { return "Option Parse Failure"; } }; struct ErrorReporter { ErrorReporter() : is_errored(0) {} virtual ~ErrorReporter() {} virtual std::ostream& error(const std::string& where = ""); virtual std::ostream& warn(const std::string& where = ""); bool is_errored; }; extern ErrorReporter default_error_reporter; void doHelp(std::ostream& out, Options& opts, unsigned columns = 80); void dumpCfg(std::ostream& out, const Options& opts, int indent = 0); void dumpCfg(std::ostream& out, const Options& opts, const char* section, int indent = 0); std::list<const char*> scanArgv(Options& opts, unsigned argc, const char* argv[], ErrorReporter& error_reporter = default_error_reporter); void setDefaults(Options& opts); void parseConfigFile(Options& opts, const std::string& filename, ErrorReporter& error_reporter = default_error_reporter); /* Generic parsing */ template<typename T> inline void parse_into(T& dest, const std::string& src) { std::istringstream src_ss (src, std::istringstream::in); src_ss.exceptions(std::ios::failbit); src_ss >> dest; } /** OptionBase: Virtual base class for storing information relating to a * specific option This base class describes common elements. Type specific * information should be stored in a derived class. */ struct OptionBase { OptionBase(const std::string& name, const std::string& desc) : opt_string(name), opt_desc(desc) {}; virtual ~OptionBase() {} /* parse argument arg, to obtain a value for the option */ virtual void parse(const std::string& arg, ErrorReporter&) = 0; /* set the argument to the default value */ virtual void setDefault() = 0; /* write the default value to out */ virtual void writeDefault(std::ostream& out) = 0; /* write the current value to out */ virtual void writeValue(std::ostream& out) = 0; std::string opt_string; std::string opt_desc; }; /** Type specific option storage */ template<typename T, typename Enable = void> struct Option : public OptionBase { Option(const std::string& name, T& storage, T default_val, const std::string& desc) : OptionBase(name, desc), opt_storage(storage), opt_default_val(default_val) {} void parse(const std::string& arg, ErrorReporter&) { try { parse_into(opt_storage, arg); } catch (...) { throw ParseFailure(opt_string, arg); } } void setDefault() { opt_storage = opt_default_val; } void writeDefault(std::ostream& out) { out << opt_default_val; } void writeValue(std::ostream& out) { out << opt_storage; } T& opt_storage; T opt_default_val; }; template <typename T> struct option_detail_back_inserter { static constexpr bool is_container = true; static constexpr bool is_fixed_size = false; typedef std::back_insert_iterator<T> output_iterator; typedef typename T::const_iterator forward_iterator; static void clear(T& container) { container.clear(); } static output_iterator make_output_iterator(T& container) { return std::back_inserter(container); } }; template <class Container> struct option_detail; template <class T> struct option_detail { static constexpr bool is_container = false; }; template <typename... Ts> struct option_detail<std::vector<Ts...>> : public option_detail_back_inserter<std::vector<Ts...>> {}; template <typename... Ts> struct option_detail<std::list<Ts...>> : public option_detail_back_inserter<std::list<Ts...>> {}; template <typename... Ts> struct option_detail<std::array<Ts...>> { static constexpr bool is_container = true; static constexpr bool is_fixed_size = true; typedef typename std::array<Ts...> T; typedef typename std::array<Ts...>::iterator output_iterator; typedef typename std::array<Ts...>::const_iterator forward_iterator; static void clear(T& container) { container.clear(); }; static output_iterator make_output_iterator(T& container) { return container.begin(); } }; /** * Container type specific option storage. * * The option's argument is split by ',' and whitespace. Runs of * whitespace are ignored. Compare: * "a, b,c,,e" = {T1(a), T1(b), T1(c), T1(), T1(e)}, vs. * "a b c e" = {T1(a), T1(b), T1(c), T1(e)}. * * NB: each application of this option overwrites the previous instance, * in exactly the same way that normal (non-container) options to. */ template<template <class, class...> class TT, typename T1, typename... Ts> struct Option< TT<T1,Ts...>, typename std::enable_if<option_detail<TT<T1,Ts...>>::is_container>::type> : public OptionBase { typedef TT<T1,Ts...> T; typedef option_detail<T> detail; Option(const std::string& name, T& storage, T default_val, const std::string& desc) : OptionBase(name, desc), opt_storage(storage), opt_default_val(default_val) {} void parse(const std::string& arg, ErrorReporter&) { /* ensure that parsing overwrites any previous value */ detail::clear(opt_storage); auto it = detail::make_output_iterator(opt_storage); /* effectively map parse . split m/, /, @arg */ std::string::size_type pos = 0; do { /* skip over preceeding spaces */ pos = arg.find_first_not_of(" \t", pos); auto end = arg.find_first_of(", \t", pos); std::string sub_arg(arg, pos, end - pos); if (detail::is_fixed_size) { // todo(df): handle size check } try { T1 value; parse_into(value, sub_arg); *it++ = value; } catch (...) { throw ParseFailure(opt_string, sub_arg); } pos = end + 1; } while (pos != std::string::npos + 1); } void setDefault() { opt_storage = opt_default_val; } void writeDefault(std::ostream& out) { out << '"'; bool first = true; for (const auto& val : opt_default_val) { if (!first) out << ','; out << val; first = false; } out << '"'; } void writeValue(std::ostream& out) { bool first = true; for (const auto& val : opt_storage) { if (!first) out << ", "; out << val; first = false; } } T& opt_storage; T opt_default_val; }; /* string parsing is specialized -- copy the whole string, not just the * first word */ template<> inline void Option<std::string>::parse(const std::string& arg, ErrorReporter&) { opt_storage = arg; } /* strings are pecialized -- output whole string rather than treating as * a sequence of characters */ template<> inline void Option<std::string>::writeDefault(std::ostream& out) { out << '"' << opt_default_val << '"'; } /* strings are pecialized -- output whole string rather than treating as * a sequence of characters */ template<> inline void Option<std::string>::writeValue(std::ostream& out) { out << '"' << opt_storage << '"'; } /** Option class for argument handling using a user provided function */ struct OptionFunc : public OptionBase { typedef void (Func)(Options&, const std::string&, ErrorReporter&); OptionFunc(const std::string& name, Options& parent_, std::function<Func> func_, const std::string& desc) : OptionBase(name, desc), parent(parent_), func(func_) {} void parse(const std::string& arg, ErrorReporter& error_reporter) { func(parent, arg, error_reporter); } void setDefault() { return; } void writeDefault(std::ostream& out) { /* there is no default */ out << "..."; } void writeValue(std::ostream& out) { /* there is no vaule */ out << "..."; } private: Options& parent; std::function<Func> func; }; struct Section { Section(const std::string& name) : name(name) {} std::string name; }; class OptionSpecific; struct Options { ~Options(); OptionSpecific addOptions(); struct Names { Names() : opt(0) {}; ~Names() { if (opt) { delete opt; } } std::list<std::string> opt_long; std::list<std::string> opt_short; OptionBase* opt; }; void addOption(OptionBase *opt); typedef std::list<Names*> NamesPtrList; NamesPtrList opt_list; // beginning of each option section typedef std::pair<Section, NamesPtrList::const_iterator> SectionPtr; std::list<SectionPtr> sections; typedef std::map<std::string, NamesPtrList> NamesMap; NamesMap opt_long_map; NamesMap opt_short_map; }; /* Class with templated overloaded operator(), for use by Options::addOptions() */ class OptionSpecific { public: OptionSpecific(Options& parent_) : parent(parent_) {} /** * Add option described by name to the parent Options list, * with storage for the option's value * with default_val as the default value * with desc as an optional help description */ template<typename T> OptionSpecific& operator()(const std::string& name, T& storage, T default_val, const std::string& desc = "") { parent.addOption(new Option<T>(name, storage, default_val, desc)); return *this; } /** * Add option described by name to the parent Options list, * with desc as an optional help description * instead of storing the value somewhere, a function of type * OptionFunc::Func is called. It is upto this function to correctly * handle evaluating the option's value. */ OptionSpecific& operator()(const std::string& name, std::function<OptionFunc::Func> func, const std::string& desc = "") { parent.addOption(new OptionFunc(name, parent, func, desc)); return *this; } /** * Add a section header to the options list. */ OptionSpecific& operator()(const Section& section) { parent.sections.emplace_back(section, parent.opt_list.cend()); return *this; } private: Options& parent; }; } /* namespace: program_options_lite */ } /* namespace: df */ #endif
13,408
28.797778
142
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/dependencies/schroedinger/schroarith.c
#include <string.h> #define SCHRO_ARITH_DEFINE_INLINE #define TRUE 1 #define FALSE 0 #include "schroarith.h" static const uint16_t lut[256] = { //LUT corresponds to window = 16 @ p0=0.5 & 256 @ p=1.0 0, 2, 5, 8, 11, 15, 20, 24, 29, 35, 41, 47, 53, 60, 67, 74, 82, 89, 97, 106, 114, 123, 132, 141, 150, 160, 170, 180, 190, 201, 211, 222, 233, 244, 256, 267, 279, 291, 303, 315, 327, 340, 353, 366, 379, 392, 405, 419, 433, 447, 461, 475, 489, 504, 518, 533, 548, 563, 578, 593, 609, 624, 640, 656, 672, 688, 705, 721, 738, 754, 771, 788, 805, 822, 840, 857, 875, 892, 910, 928, 946, 964, 983, 1001, 1020, 1038, 1057, 1076, 1095, 1114, 1133, 1153, 1172, 1192, 1211, 1231, 1251, 1271, 1291, 1311, 1332, 1352, 1373, 1393, 1414, 1435, 1456, 1477, 1498, 1520, 1541, 1562, 1584, 1606, 1628, 1649, 1671, 1694, 1716, 1738, 1760, 1783, 1806, 1828, 1851, 1874, 1897, 1920, 1935, 1942, 1949, 1955, 1961, 1968, 1974, 1980, 1985, 1991, 1996, 2001, 2006, 2011, 2016, 2021, 2025, 2029, 2033, 2037, 2040, 2044, 2047, 2050, 2053, 2056, 2058, 2061, 2063, 2065, 2066, 2068, 2069, 2070, 2071, 2072, 2072, 2072, 2072, 2072, 2072, 2071, 2070, 2069, 2068, 2066, 2065, 2063, 2060, 2058, 2055, 2052, 2049, 2045, 2042, 2038, 2033, 2029, 2024, 2019, 2013, 2008, 2002, 1996, 1989, 1982, 1975, 1968, 1960, 1952, 1943, 1934, 1925, 1916, 1906, 1896, 1885, 1874, 1863, 1851, 1839, 1827, 1814, 1800, 1786, 1772, 1757, 1742, 1727, 1710, 1694, 1676, 1659, 1640, 1622, 1602, 1582, 1561, 1540, 1518, 1495, 1471, 1447, 1422, 1396, 1369, 1341, 1312, 1282, 1251, 1219, 1186, 1151, 1114, 1077, 1037, 995, 952, 906, 857, 805, 750, 690, 625, 553, 471, 376, 255 }; /* This is a copy of the above lookup-table, with the elements interleaved in a way that decreases the number of operations during decode. */ static const int16_t lut_interleaved[512] = { 255, 0, 376, -2, 471, -5, 553, -8, 625, -11, 690, -15, 750, -20, 805, -24, 857, -29, 906, -35, 952, -41, 995, -47, 1037, -53, 1077, -60, 1114, -67, 1151, -74, 1186, -82, 1219, -89, 1251, -97, 1282, -106, 1312, -114, 1341, -123, 1369, -132, 1396, -141, 1422, -150, 1447, -160, 1471, -170, 1495, -180, 1518, -190, 1540, -201, 1561, -211, 1582, -222, 1602, -233, 1622, -244, 1640, -256, 1659, -267, 1676, -279, 1694, -291, 1710, -303, 1727, -315, 1742, -327, 1757, -340, 1772, -353, 1786, -366, 1800, -379, 1814, -392, 1827, -405, 1839, -419, 1851, -433, 1863, -447, 1874, -461, 1885, -475, 1896, -489, 1906, -504, 1916, -518, 1925, -533, 1934, -548, 1943, -563, 1952, -578, 1960, -593, 1968, -609, 1975, -624, 1982, -640, 1989, -656, 1996, -672, 2002, -688, 2008, -705, 2013, -721, 2019, -738, 2024, -754, 2029, -771, 2033, -788, 2038, -805, 2042, -822, 2045, -840, 2049, -857, 2052, -875, 2055, -892, 2058, -910, 2060, -928, 2063, -946, 2065, -964, 2066, -983, 2068, -1001, 2069, -1020, 2070, -1038, 2071, -1057, 2072, -1076, 2072, -1095, 2072, -1114, 2072, -1133, 2072, -1153, 2072, -1172, 2071, -1192, 2070, -1211, 2069, -1231, 2068, -1251, 2066, -1271, 2065, -1291, 2063, -1311, 2061, -1332, 2058, -1352, 2056, -1373, 2053, -1393, 2050, -1414, 2047, -1435, 2044, -1456, 2040, -1477, 2037, -1498, 2033, -1520, 2029, -1541, 2025, -1562, 2021, -1584, 2016, -1606, 2011, -1628, 2006, -1649, 2001, -1671, 1996, -1694, 1991, -1716, 1985, -1738, 1980, -1760, 1974, -1783, 1968, -1806, 1961, -1828, 1955, -1851, 1949, -1874, 1942, -1897, 1935, -1920, 1920, -1935, 1897, -1942, 1874, -1949, 1851, -1955, 1828, -1961, 1806, -1968, 1783, -1974, 1760, -1980, 1738, -1985, 1716, -1991, 1694, -1996, 1671, -2001, 1649, -2006, 1628, -2011, 1606, -2016, 1584, -2021, 1562, -2025, 1541, -2029, 1520, -2033, 1498, -2037, 1477, -2040, 1456, -2044, 1435, -2047, 1414, -2050, 1393, -2053, 1373, -2056, 1352, -2058, 1332, -2061, 1311, -2063, 1291, -2065, 1271, -2066, 1251, -2068, 1231, -2069, 1211, -2070, 1192, -2071, 1172, -2072, 1153, -2072, 1133, -2072, 1114, -2072, 1095, -2072, 1076, -2072, 1057, -2071, 1038, -2070, 1020, -2069, 1001, -2068, 983, -2066, 964, -2065, 946, -2063, 928, -2060, 910, -2058, 892, -2055, 875, -2052, 857, -2049, 840, -2045, 822, -2042, 805, -2038, 788, -2033, 771, -2029, 754, -2024, 738, -2019, 721, -2013, 705, -2008, 688, -2002, 672, -1996, 656, -1989, 640, -1982, 624, -1975, 609, -1968, 593, -1960, 578, -1952, 563, -1943, 548, -1934, 533, -1925, 518, -1916, 504, -1906, 489, -1896, 475, -1885, 461, -1874, 447, -1863, 433, -1851, 419, -1839, 405, -1827, 392, -1814, 379, -1800, 366, -1786, 353, -1772, 340, -1757, 327, -1742, 315, -1727, 303, -1710, 291, -1694, 279, -1676, 267, -1659, 256, -1640, 244, -1622, 233, -1602, 222, -1582, 211, -1561, 201, -1540, 190, -1518, 180, -1495, 170, -1471, 160, -1447, 150, -1422, 141, -1396, 132, -1369, 123, -1341, 114, -1312, 106, -1282, 97, -1251, 89, -1219, 82, -1186, 74, -1151, 67, -1114, 60, -1077, 53, -1037, 47, -995, 41, -952, 35, -906, 29, -857, 24, -805, 20, -750, 15, -690, 11, -625, 8, -553, 5, -471, 2, -376, 0, -255 }; void schro_arith_decode_init (SchroArith * arith, SchroRdFn read_fn, void * read_fn_priv) { int size; memset (arith, 0, sizeof (SchroArith)); arith->range[0] = 0; arith->range[1] = 0xffff0000; arith->range_size = arith->range[1] - arith->range[0]; arith->code = 0; arith->cntr = 1; arith->read = read_fn; arith->io_priv = read_fn_priv; arith->code = arith->read(arith->io_priv) << 24; arith->code |= arith->read(arith->io_priv) << 16; memcpy (arith->lut, (void *) lut_interleaved, 512 * sizeof (int16_t)); } void schro_arith_encode_init (SchroArith * arith, SchroWrFn write_fn, void * write_fn_priv) { int i; memset (arith, 0, sizeof (SchroArith)); arith->range[0] = 0; arith->range[1] = 0xffff; arith->range_size = arith->range[1] - arith->range[0]; arith->code = 0; arith->first_byte = 1; arith->write = write_fn; arith->io_priv = write_fn_priv; for (i = 0; i < 256; i++) { arith->lut[i] = lut[i]; arith->lut[511 - i] = lut[255 - i]; } } void schro_arith_decode_flush (SchroArith * arith) { // perform an extra renormalisation to match encoding while (arith->range[1] <= 0x40000000) { if (!--arith->cntr) { arith->read(arith->io_priv); arith->cntr = 8; } arith->range[1] <<= 1; } } void schro_arith_flush (SchroArith * arith) { int extra_byte; int i; if (arith->cntr > 0) { extra_byte = TRUE; } else { extra_byte = FALSE; } for (i = 0; i < 16; i++) { if ((arith->range[0] | ((1 << (i + 1)) - 1)) > arith->range[1] - 1) break; } arith->range[0] |= ((1 << i) - 1); //arith->range[0] += arith->range[1] - 1; while (arith->cntr < 8) { arith->range[0] <<= 1; arith->range[0] |= 1; arith->cntr++; } if (arith->range[0] >= (1 << 24)) { arith->output_byte++; if (!arith->first_byte) arith->write(arith->output_byte, arith->io_priv); while (arith->carry) { arith->write(0x00, arith->io_priv); arith->carry--; } } else { if (!arith->first_byte) arith->write(arith->output_byte, arith->io_priv); while (arith->carry) { arith->write(0xff, arith->io_priv); arith->carry--; } } arith->write(arith->range[0] >> 16, arith->io_priv); arith->write(arith->range[0] >> 8, arith->io_priv); if (extra_byte) arith->write(arith->range[0], arith->io_priv); } /* wrappers */ void schro_arith_encode_bit (SchroArith * arith, uint16_t *probability, int value) { _schro_arith_encode_bit (arith, probability, value); } int schro_arith_decode_bit (SchroArith * arith, uint16_t *probability) { return _schro_arith_decode_bit (arith, probability); }
8,374
34.944206
86
c
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/dependencies/schroedinger/schroarith.h
#ifndef _SCHRO_ARITH_H_ #define _SCHRO_ARITH_H_ #define SCHRO_INTERNAL #include <stdint.h> #if __cplusplus extern "C" { #endif typedef uint8_t (*SchroRdFn)(void *); typedef void (*SchroWrFn)(uint8_t byte, void *); typedef struct _SchroArith SchroArith; struct _SchroArith { SchroRdFn read; SchroWrFn write; void * io_priv; uint32_t range[2]; uint32_t code; uint32_t range_size; int cntr; int carry; uint8_t first_byte; uint8_t output_byte; uint16_t lut[512]; }; void schro_arith_decode_init (SchroArith *arith, SchroRdFn read_fn, void * priv); void schro_arith_encode_init (SchroArith *arith, SchroWrFn write_fn, void * priv); void schro_arith_flush (SchroArith *arith); void schro_arith_decode_flush (SchroArith *arith); void schro_arith_encode_bit (SchroArith *arith, uint16_t *probability, int value); int schro_arith_decode_bit (SchroArith *arith, uint16_t *probability); #ifdef SCHRO_ARITH_DEFINE_INLINE static inline int _schro_arith_decode_bit (SchroArith *arith, uint16_t *probability) { unsigned int range_x_prob; unsigned int value; unsigned int lut_index; register unsigned int range = arith->range[1]; register unsigned int code_minus_low = arith->code; while (range <= 0x40000000) { if (!--arith->cntr) { code_minus_low |= arith->read(arith->io_priv) << 8; arith->cntr = 8; } range <<= 1; code_minus_low <<= 1; } range_x_prob = ((range >> 16) * (*probability)) & 0xFFFF0000; lut_index = (*probability)>>7 & ~1; value = (code_minus_low >= range_x_prob); (*probability) += arith->lut[lut_index | value]; if (value) { code_minus_low -= range_x_prob; range -= range_x_prob; } else { range = range_x_prob; } arith->range[1] = range; arith->code = code_minus_low; return value; } static inline void _schro_arith_encode_bit (SchroArith *arith, uint16_t *probability, int value) { unsigned int range; unsigned int probability0; unsigned int range_x_prob; probability0 = (*probability); range = arith->range[1]; range_x_prob = (range * probability0) >> 16; if (value) { arith->range[0] = arith->range[0] + range_x_prob; arith->range[1] -= range_x_prob; (*probability) -= arith->lut[(*probability)>>8]; } else { arith->range[1] = range_x_prob; (*probability) += arith->lut[255-((*probability)>>8)]; } while (arith->range[1] <= 0x4000) { arith->range[0] <<= 1; arith->range[1] <<= 1; arith->cntr++; if (arith->cntr == 8) { if (arith->range[0] < (1<<24) && (arith->range[0] + arith->range[1]) >= (1<<24)) { // NB: carry cannot occur on the first byte since: // - requires initial low=ff80, range=8000 for seq of False // - requires initial low=8000, range=8000 for seq of True // - requires initial low=0001, range=ffff for seq of True arith->carry++; } else { if (arith->range[0] >= (1<<24)) { // NB: output_byte is always valid here since: // - given the initial value of low = 0, range = ffff // - the largest value of low after the first bit=1 is ff00 (p=ff01) // - coding subsequent symbols cannot cause low > 7fff00 // => minimum initial value of low that can trigger this = 2 // (this is not a valid initial condition) // - with range = 8000 (the maximum renormalised range) // - minimum initial value of low to trigger = 8080 arith->output_byte++; while (arith->carry) { arith->write(arith->output_byte, arith->io_priv); arith->output_byte = 0x00; arith->carry--; } } else { while (arith->carry) { arith->write(arith->output_byte, arith->io_priv); arith->output_byte = 0xff; arith->carry--; } } if (!arith->first_byte) arith->write(arith->output_byte, arith->io_priv); else arith->first_byte = 0; arith->output_byte = arith->range[0] >> 16; } arith->range[0] &= 0xffff; arith->cntr = 0; } } } static inline int maxbit (unsigned int x) { #if 0 int i; for(i=0;x;i++){ x >>= 1; } return i; #else int i = 0; if (x == 0) return 0; if (x > 0x00ff) { i += 8; x >>= 8; } if (x > 0x000f) { i += 4; x >>= 4; } if (x > 0x0003) { i += 2; x >>= 2; } if (x > 0x0001) { i += 1; x >>= 1; } if (x > 0x0000) { i += 1; } return i; #endif } #else /* SCHRO_ARITH_DEFINE_INLINE */ int _schro_arith_decode_bit (SchroArith *arith, uint16_t *probability); void _schro_arith_encode_bit (SchroArith *arith, uint16_t *probability, int value) SCHRO_INTERNAL; #endif /* SCHRO_ARITH_DEFINE_INLINE */ #if __cplusplus } // extern "C" #endif #endif
4,857
23.169154
83
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/doc/README.about.md
General Information =================== Reference software is being made available to provide a reference implementation of the G-PCC standard being developed by MPEG (ISO/IEC SC29 WG11). One of the main goals of the reference software is to provide a basis upon which to conduct experiments in order to determine which coding tools provide desired coding performance. It is not meant to be a particularly efficient implementation of anything, and one may notice its apparent unsuitability for a particular use. It should not be construed to be a reflection of how complex a production-quality implementation of a future G-PCC standard would be. This document aims to provide guidance on the usage of the reference software. It is widely suspected to be incomplete and suggestions for improvements are welcome. Such suggestions and general inquiries may be sent to the general MPEG 3DGC email reflector at <mpeg-3dgc@gti.ssr.upm.es> (registration required). Bug reporting ------------- Bugs should be reported on the issue tracker set up at <http://mpegx.int-evry.fr/software/MPEG/PCC/TM/mpeg-pcc-tmc13/issues>.
1,117
43.72
74
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/doc/README.build.md
Obtaining the software ====================== The authoritative location of the software is the following git repository: <http://mpegx.int-evry.fr/software/MPEG/PCC/TM/mpeg-pcc-tmc13> Each released version may be identified by a version control system tag in the form `release-v${version}`. An example: ```console $ git clone http://mpegx.int-evry.fr/software/MPEG/PCC/TM/mpeg-pcc-tmc13.git $ cd mpeg-pcc-tmc13 $ git checkout release-v4.0 ``` > It is strongly advised to obtain the software using the version control > system rather than to download a zip (or other archive) of a particular > release. The build system uses the version control system to accurately > identify the version being built. Building -------- The codec is supported on Linux, OSX and Windows platforms. The build configuration is managed using CMake. > It is strongly advised to build the software in a separate build directory. ### Linux ```console $ mkdir build $ cd build $ cmake .. $ make $ tmc3/tmc3 --help ``` ### OSX ```console $ mkdir build $ cd build $ cmake .. -G Xcode $ xcodebuild $ tmc3/tmc3 --help ``` As an alternative, the generated XCode project may be opened and built from XCode itself. ### Windows ```console > md build > cd build > cmake .. -G "Visual Studio 15 2017 Win64" ``` Open the generated visual studio solution to build it.
1,349
21.131148
77
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/doc/README.options.md
General options --------------- ### `--help` Print a list of available command line (and configuration file) options along with their default values and exit. ### `--config=FILE`, `-c` This specifies a configuration file to be immediately loaded. ### `--mode=VALUE` This option selects the codec's mode of operation. A value of 0 enables encoding functionality. A value of 1 switches to decoding mode. I/O parameters -------------- ### `--firstFrameNum=INT-VALUE` The initial frame number of the input or output sequence. The software replaces any instance of a '%d' printf format directive with the current frame number when evaluating the following options: - uncompressedDataPath - reconstructedDataPath - postRecolourPath - preInvScalePath NB: When decoding, this option relates only to the output file names. In order to have the decoder produce identically numbered output ply files as the encoder input, specify the same value of firstFrameNum for the decoder. ### `--frameCount=INT-VALUE` (Encoder only) The number of frames to be encoded. ### `--uncompressedDataPath=FILE` (Encoder only) The input source point cloud to be compressed. The first instance of '%d' in FILE will be expanded with the current frame number. ### `--compressedStreamPath=FILE` The compressed bitstream file output when encoding or input when decoding. ### `--reconstructedDataPath=FILE` The reconstructed point cloud file. When encoding, the output is the locally decoded picture. It is expected that the reconstructed output of the encoder and decoder match exactly. The first instance of '%d' in FILE will be expanded with the current frame number. ### `--postRecolourPath=FILE` (Encoder only) As part of the encoding process, it may be necessary to re-colour the point cloud if the point geometry is altered. This diagnostic output file corresponds to the re-coloured point cloud prior to attribute coding without output geometry scaling. The first instance of '%d' in FILE will be expanded with the current frame number. ### `--preInvScalePath=FILE` (Decoder only) This diagnostic output corresponds to the decoded point cloud (geometry and attributes) prior to output geometry scaling. When compared to the output of `postRecolourPath`, the performance of attribute coding may be directly measured without being confounded by any geometry losses. The first instance of '%d' in FILE will be expanded with the current frame number. ### `--outputBinaryPly=0|1` Sets the output format of PLY files (Binary=1, ASCII=0). Reading and writing binary PLY files is more efficient than the ASCII variant, but are less suited to simple scripts and direct human inspection. If outputting non-integer point co-ordinates (eg, due to the output geometry scaling), the precision of the binary and ASCII versions are not identical. ### `--outputSystem=0|1` Controls the output scaling of the coded point cloud. | Value | Description | |:-----:| ----------------------------| | 0 | Conformance output | | 1 | External co-ordinate system | The conformance output scales the coded point cloud to the sequence co-ordinate system. The output point positions are not offset by the sequence origin. The external co-ordinate system output scales the point cloud to the defined external co-ordinate system (see `sequenceScale`, `externalScale`, and `outputUnitLength`). The output point positions are offset by the sequence origin, appropriately scaled. ### `--outputUnitLength=REAL-VALUE` The length of the output point cloud unit vector. Point clouds output by the encoder or decoder are rescaled to match this length. For example, `outputUnitLength=1000` outputs a point cloud with integer point positions representing millimetres. ### `--outputPrecisionBits=INT-VALUE` The number of fractional bits to retain when scaling from the coding co-ordinate system to the sequence co-ordinate system. The fractional bits are further retained when converting to the external co-ordinate system. The special value `outputPrecisionBits=-1` retains all fractional bits during the scaling process. ### `--convertPlyColourspace=0|1` Controls the conversion of ply RGB colour attributes to/from the colourspace set by an attribute's `colourMatrix` before attribute coding and after decoding. When disabled (0), or if there is no converter available for the requested `colourMatrix`, no conversion happens; however the `colourMatrix` value is still written to the bitstream. Decoder-specific options ======================== ### `--skipOctreeLayers=INT-VALUE` The option indicates the number of skipped lod layers from leaf lod. If aps.scalable_enable_flag is 1, the option is valid. Otherwise, the option is ignored. ### `--decodeMaxPoints=INT-VALUE` A value greater than zero controls the automatic derivation of `skipOctreeLayers` such that at most $n$ points are decoded. This option only has an effect if the bitstream contains per octree level point count metadata (see `pointCountMetadata`). Encoder-specific options ======================== Co-ordinate systems and pre-scaling ----------------------------------- ### `--srcUnit=0|1|metre` The physical unit used to interpret values of `srcUnitLength`. | Value | Description | |:-------:| ---------------| | 0 | dimensionless | | 1,metre | metre | ### `--srcUnitLength=REAL-VALUE` The length of the source point cloud unit vector. This value is used to define the unit vector length of the sequence co-ordinate system. It is not used to perform scaling by the encoder. For example, `srcUnitLength=1000` and `srcUnit=metre` indicates that integer positions in the source point cloud represent millimetres. ### `--inputScale=REAL-VALUE` A scale factor applied to point positions in the source point cloud prior to integer conversion. The `inputScale` changes the length of the source unit vectors (as set by `srcUnitLength`). For example, a point cloud may have a unit vector representing 1 metre (`srcUnitLength=1`) and contain points with a resolution of 1000 points per metre. Since the codec can only represent integer positions, without input scaling, it is coded with a precision of one metre. Setting `inputScale=1000` will increase the precision to 1 millimetre. ### `--codingScale=REAL-VALUE` A scale factor used to determine the length of the coding co-ordinate system unit vector. The scale factor is relative to `inputScale`. The input point cloud (after integer conversion) is scaled by `codingScale` and rounded to integer positions. If `codingScale` is greater than `sequenceScale`, the encoder will set `codingScale=sequenceScale`. A decoder will scale the coded point cloud by `sequenceScale/codingScale` prior to output. ### `--sequenceScale=REAL-VALUE` A scale factor used to determine the length of the sequence co-ordinate system unit vector. The scale factor is relative to `inputScale`. The input point cloud (after integer conversion) is scaled by the smallest of `sequenceScale` and `codingScale`. ### `--externalScale=REAL-VALUE` A scale factor used to define the length of the sequence co-ordinate system when `srcUnit` is dimensionless. The scale factor is relative to `inputScale`. The `externalScale` does not affect scaling of the input point cloud prior to coding. For example, a point cloud coded with `sequenceScale=0.25` and `externalScale=0.5` specifies that: - the input is scaled by 0.25 prior to coding, and - the decoder is informed that 1 sequence unit is equal to 2 external units. NB: a decoder is not required to scale the sequence co-ordinate system to an external co-ordinate system prior to output. ### `--autoSeqBbox=0|1` Automatically determine the sequence bounding box (`seqOrigin` and `seqSizeWhd`) using the first input frame. ### `--seqOrigin=x,y,z` Sets the origin of the sequence bounding box. The `seqOrigin` must be less than or equal to the lowest input point position. The origin is configured in the input co-ordinate system (after integer conversion). The encoder will adjust the origin according to `sequenceScale`. This option has no effect when `autoSeqBbox=1`. ### `--seqSizeWhd=w,h,d` Sets the size of the sequence bounding box. The size is configured in the input co-ordinate system (after integer conversion). The encoder will adjust the size according to `sequenceScale`. `seqSizeWhd=0,0,0` disables signalling the sequence bounding box size. This option has no effect when `autoSeqBbox=1`. ### `--mergeDuplicatedPoints=0|1` Controls the ability to code duplicate points. When duplicate point merging is enabled, bitstream syntax related to duplicate points is disabled and a pre-filtering process is used to remove co-located points. ### `--sortInputByAzimuth=0|1` Pre-sort the input point cloud according to azimuth angle with the origin `lidarHeadPosition`. Pre-sorting occurs prior to tile/slice partitioning. Input partitioning (slices & tiles) ----------------------------------- ### `--partitionMethod=0|2|3|4|5` Selects the partitioning method to map points to tiles and slices: | Value | Description | |:-----:| ----------------------------------------| | 0 | none (single slice) | | 2 | uniform partitioning along longest edge | | 3 | uniform octree partitions | | 4 | uniform square partitions | | 5 | n-point spans | Uniform longest edge partitioning slices the point cloud along the longest edge according to `partitionNumUniformGeom`. Uniform octree partitioning generates slices with the same size based on an octree partitioning of the point cloud according to `partitionOctreeDepth`. Uniform square partitioning generates cubic slices sized according to the shortest edge. N-point span partitioning divides the input point list (after input pre-sorting) into `sliceMaxPoints`-point sublists. Input order (after pre-sorting) is maintained. In all cases, a refinement process may merge or split slices in order to satisfy maximum or minimum points per slice constraints. ### `--partitionNumUniformGeom=INT-VALUE` Sets the number of slices to generate using `partitionMethod=2`. If equal to zero, the number of slices is the integer ratio of the longest to shortest edges of the point cloud bounding box. ### `--partitionOctreeDepth=INT-VALUE` Sets the depth of the octree for slice generation using `partitionMethod=3`. The input point cloud is decomposed using an octree with the configured depth. Each occupied leaf of the octree represents a single slice. ### `--sliceMaxPoints=INT-VALUE` Upper limit to the number of in each slice. Slices are split until this constraint is satisfied. ### `--sliceMinPoints=INT-VALUE` Minimum number of points in each slice. This soft limit is used to merge small slices together. ### `--tileSize=INT-VALUE` Tile dimension to use when performing initial partitioning. A value of zero disables tile partitioning. General options --------------- ### `--geometry_axis_order=INT-VALUE` Configures the order in which axes are internally coded. Changing the axis order does not change the orientation of the reconstructed point cloud. | Value | Coding order | |:-----:| -------------| | 0 | z, y, x | | 1 | x, y, z | | 2 | x, z, y | | 3 | y, z, x | | 4 | z, y, x | | 5 | z, x, y | | 6 | y, x, z | | 7 | x, y, z | ### `--disableAttributeCoding=0|1` This option instructs the encoder to ignore all options relating to attribute coding, as if they had never been configured. ### `--enforceLevelLimits=0|1` Controls the enforcement of level limits by the encoder. If a level limit is voilated, the encoder will abort. ### `--cabac_bypass_stream_enabled_flag=0|1` Controls the entropy coding method used for equi-probable (bypass) bins: | Value | Description | |:-----:| --------------------------------------| | 0 | bypass bins coded using CABAC | | 1 | bypass bins coded in bypass substream | ### `--entropyContinuationEnabled=0|1` Controls the propagation of entropy coding state (context values) between slices in the same frame. When enabled, each slice (except the first) has a coding dependency on the previous slice. Geometry coding --------------- ### `--geomTreeType=0|1` Selects the geometry tree coding method. | Value | Description | |:-----:| -------------------------| | 0 | Octree | | 1 | Predictive geometry tree | ### `--positionQuantisationEnabled=0|1` Enables in-loop quantisation and reconstruction of geometry positions. NB: All in-loop quantisation is independent (and happens after) any position scaling due to `positionQuantizationScale`. ### `--positionQuantisationMethod=0|1|2` Selects the method used to determine the QP value for each quantised tree node. | Value | Description | |:-----:| ---------------- | | 0 | Uniform | | 1 | Random | | 2 | By point density | The 'uniform' method sets every node QP to the slice QP. The 'random' method picks a uniformly distributed random QP for each node from the range of permitted values. The seed for random number generation may be set using the environment variable `SEED`. The 'point density' method varies the per-node qp according to the relative number of points in each node. The sparsest 5% of nodes use $sliceQp + qpPot$, the densest 40% of nodes use $sliceQp - qpPot$, and the remaining nodes use $sliceQp$, where qpPot is `8 >> positionQpMultiplierLog2`. ### `--positionBaseQp=INT-VALUE` The quantisation parameter used to quantise geometry positions. The effective QP may be varied according to `positionSliceQpOffset` and `positionQuantisationOctreeDepth`. A QP equal to 0 results in a scale factor of 1. ### `--positionQpMultiplierLog2=0|1|2|3` Controls the granularity of quantisation step sizes by limiting the number of QP values per step size doubling interval. There are $2^n$ QPs per step size doubling interval. ### `--positionIdcmQp=INT-VALUE` The quantisation parameter used to quantise directly coded (IDCM) point positions prior to reaching the `positionQuantisationOctreeDepth`. ### `--positionSliceQpOffset=INT-VALUE` A per-slice offset to be applied to `positionBaseQp`. ### `--positionQuantisationOctreeDepth=INT-VALUE` The depth in the octree at which per-node QP offsets are signalled. A non-normative encoder process determines the QP offset based upon the local density of the octree. A value of -1 disables signalling of per-node QP offsets. ### `--positionQuantisationSizeLog2=INT-VALUE` Sets the depth at which per-node QP offsets are signalled. The depth is the tree level with the configured node size. This value, if greater than 0, overrides `positionQuantisationOctreeDepth`. When non-cubic nodes are present, the depth is the tree level with the minimum node size dimension. ### `qtbtEnabled=0|1` Enables non-cubic geometry tree coding. When enabled, the geometry tree may have a cuboid bounding box. The partitioning of internal tree nodes at a particular depth are determined non-normatively by the encoder to be one of octree, quadtree or binary partitions. ### `maxNumQtBtBeforeOt=INT-VALUE` Limits the maximal number of quadtree and binary tree partitions used before the first octree partition. ### `minQtbtSizeLog2=INT-VALUE` Specifies the minimum size of quadtree and binary tree partitions. ### `--bitwiseOccupancyCoding=0|1` In octree geometry coding, there are both byte-wise and bit-wise tools to encode the occupancy data. This option selects between the two methods. ### `--neighbourAvailBoundaryLog2=INT-VALUE` Defines the volume within which octree nodes are considered available for use in occupancy contextualisation and intra occupancy prediction. A value less than 2 limits the use of neighbouring nodes to direct octree siblings. The software currently supports a maximum value of 8 or 9 when intra occupancy prediction is enabled or disabled respectively. ### `--inferredDirectCodingMode=0|1|2|3` Controls the degree to which early termination of the geometry octree is used to code isolated points. | Value | Extent of qualifying node criteria | |:-----:| -------------------------------------| | 0 | Disabled | | 1 | Fully isolated parent and child | | 2 | Partially isolated parent | | 3 | Unconstrained | ### `--jointTwoPointIdcm=0|1` Controls the method used to code the point positions of directly coded nodes containing two distinct points. When enabled, an implicit point order is used to improve coding efficiency. ### `--adjacentChildContextualization=0|1` Controls the contextualization of occupancy bits according to the state of adjacent children of neighbouring nodes. ### `--intra_pred_max_node_size_log2=INT-VALUE` Intra occupancy prediction uses an octree node's neighbours to predict its occupancy. The prediction mode is enabled for octree nodes smaller than or equal to the configured size. A value of 0 disables intra occupancy prediction. ### `--planarEnabled=0|1` Controls the use of planar coding mode for geometry occupancy. ### `--planarModeIdcmUse=0--32` Controls the frequency in 1/32 percent of IDCM eligibility. Set to zero, IDCM is disabled. Set to 32, IDCM is unconstrained. ### `--planarModeThreshold0=0--127` Controls the eligibility threshold of the first planar mode based upon local child node density. ### `--planarModeThreshold1=0--127` Controls the eligibility threshold of the second planar mode based upon local child node density. ### `--planarModeThreshold2=0--127` Controls the eligibility threshold of the third planar mode based upon local child node density. ### `--angularEnabled=0|1` Controls the use of the angular coding mode in geometry occupancy contextualisation. Angular coding mode uses a LiDaR head model prior to improve the compression of a LiDaR acquired point cloud. The angular mode requires that planar mode is enabled. ### `--lidarHeadPosition=x,y,z` Specifies the LiDaR head position for use by the angular mode in terms of the input (unquantised) point cloud co-ordinate system. ### `--numLasers=0--255` The number of known laser angles and positions for use in angular mode. ### `--lasersTheta=FLOAT-VALUE-LIST` The elevation angle, theta, of each known laser used by the angular mode. Each elevation angle is expressed in radians relative to the x-y plane (range: $\left[-\pi, \pi\right]$. The zero angle describes a horizontal laser. Positive angles represent an elevation above the horizontal. Negative angles represent an elevation below the horizontal. The software will convert the floating point angles to an 18 bit fixed point representation. ### `--lasersZ=FLOAT-VALUE-LIST` The vertical offset of each known laser used by the angular mode. Each offset is expressed along the z axis in the input point cloud co-ordinate system, corresponding to a vertical offset relative to the LiDAR head position. The software will convert the floating point offsets to a three bit fixed point representation. ### `--lasersNumPhiPerTurn=INT-VALUE-LIST` The maximum number of samples that can be acquired during a full rotation of each known laser used by the angular mode. ### `--planarBufferDisabled=0|1` Controls the deactivation of the planar mode buffer for angular mode. ### `--numOctreeEntropyStreams=INT-VALUE` The number of geometry sub-streams (suitable for parallel coding) used to encode the geometry octree. For example, a value of eight generates eight sub-streams, one for the initial tree, then one for each of the last seven tree levels. No parallel sub-streams are generated when *VALUE* is 1. NB: the reference software does not attempt to exploit any opportunities for parallelism generated by this feature. ### `--trisoupNodeSizeLog2=INT-VALUE|INT-VALUE-LIST` Controls the use of trisoup by setting the node size for triangle based surface reconstruction. The trisoup method terminates the octree coding at the given node size and continues by encoding triangles which are subsequently voxelised to produce points. A value less than 2 disables the use of trisoup. When a list of values is used, the n-th entry in the list controls the configuration of the n-th slice. The last entry is mapped to all remaining slices. ### `--trisoup_sampling_value=INT-VALUE` Controls the number of points generated by the trisoup triangle voxelisation process. Larger values reduce the number of points generated per triangle. | Value | Description | |:-----:| -------------------------------| | 0 | automatic (default) | | 1 | no sub-sampling | | N >=1 | point sampling period | The automatic mode will find the smallest sampling value that such that the number of generated points does not exceed the slice limit set by `sliceMaxPoints`. ### `--predGeomSort=INT-VALUE` Point order used to construct predictive geometry trees. Requires `geomTreeType=1`. | Value | Description | |:-----:| -------------------------------------- | | 0 | none | | 1 | morton order | | 2 | azimuth angle | | 3 | radial distance | | 4 | source azimuth angle (ply: laserangle) | ### `--predGeomAzimuthSortPrecision=INT-VALUE` Controls the precision used in azimuthal sorting of points prior to predictive tree construction. A value of 0 represents full-precision, otherwise larger values represent increasing precision. Requires `predGeomSort=2`. ### `--predGeomTreePtsMax=INT-VALUE` Maximum number of points per predictive geometry tree. A slice may contain more than one predictive geometry tree. Requires `geomTreeType=1`. ### `--positionBaseQpFreqLog2=INT-VALUE` Controls the number of predictive geometry tree nodes scaled by the same QP offset instance. QP offsets are signalled every $2^n$ nodes in tree traversal order. This configuration applies to all slices. Requires `positionQuantisationEnabled=1`. ### `--positionSliceQpFreqLog2=INT-VALUE` Identical to `positionBaseQpFreqLog2`, but controls per-slice configuration. ### `--positionAzimuthScaleLog2=INT-VALUE` Number of additional bits used to represent predictive geometry azimuth angles. Requires `angularEnabled=1`. ### `--positionRadiusInvScaleLog2=INT-VALUE` Degree of quantisation applied in the representation of angular predictive geometry radial distances. Requires `angularEnabled=1`. ### `--positionAzimuthSpeed=INT-VALUE` Step size used to linearly model progression of per-laser azimuthal angles during angular predictive geometry coding. Requires `angularEnabled=1`. ### `--predGeomAzimuthQuantization=1|0` Controls the use of radius dependent azimuth quantization in predictive geometry coding. Requires `angularEnabled=1` and `geomTreeType=1`. ### `--pointCountMetadata=0|1` Controls the addition of per octree layer point count metadata to each geometry slice. Attribute coding ---------------- The codec may be configured to represent one or more attributes. The configuration of each attribute is independent from all others. To configure coding of an attribute, first set the attribute options, then save the configuration using the `attribute` option. ### `--attribute=NAME` Saves the current attribute configuration for coding the named attribute. | Name | Description | |:----------- |---| | colour | r, g, and b properties as a tri-stimulus attribute | | reflectance | refc or reflectance property as a single-stimulus attribute | This option must be specified after the options corresponding to the attribute. ### `--defaultValue=INT-VALUE-LIST` The default value to use for attribute data in case of data loss. If unset, the implicit default attribute value is 2**(bitdepth-1). ### `--colourMatrix=INT-VALUE` Indicates the colourspace of the coded attribute values according to the ISO/IEC 23001-8 Codec Independent Code Points for ColourMatrix. When used in conjunction with `convertPlyColourspace=1`, a colourspace conversion will be performed at the input/output of the encoder and decoder if supported. | Value | RGB converter | Description | |:-----:|:-------------:|------------------------------------------ | | 0 | n/a | Direct coding (eg, RGB, XYZ) | | 1 | Yes | YCbCr ITU-R BT.709 | | 2 | n/a | Unspecified | | 3 | n/a | Reserved | | 4 | No | USA Title 47 CFR 73.682 (a)(20) | | 5 | No | YCbCr ITU-R BT.601 | | 6 | No | YCbCr SMPTE 170M | | 7 | No | YCbCr SMPTE 240M | | 8 | Yes (YCgCoR) | YCgCo / YCgCoR | | 9 | No | YCbCr ITU-R BT.2020 | | 10 | No | YCbCr ITU-R BT.2020 (constant luminance) | | 11 | No | YDzDx SMPTE ST 2085 | NB: the use of YCgCoR and `bitdepth=N` implies that the bitdepth of the chroma component bitdepth is N + 1. ### `--bitdepth=INT-VALUE` The bitdepth of the attribute data. NB, this is not necessarily the same as the bitdepth of the PLY property. ### `--attrScale=INT-VALUE` and `--attrOffset=INT-VALUE` Scale and offset used to interpret coded attribute values. The encoder derives the coded attribute value as $(attr - offset) / scale$. The encoder and decoder scale coded attributes for output as $attr × scale + offset$. NB: these parameters are only supported for reflectance attributes. ### `--transformType=0|1|2` Coding method to use for the current attribute: | Value | Description | |:-----:| ---------------------------------------------------------- | | 0 | Region Adaptive Hierarchical Transform (RAHT) | | 1 | Hierarchical neighbourhood prediction | | 2 | Hierarchical neighbourhood prediction as lifting transform | | 3 | Uncompressed (PCM) | ### `--rahtPredictionEnabled=0|1` Controls the use of transform domain prediction of RAHT coefficients from spatially upsampling the DC values of neighbouring parent nodes in the transform tree. ### `--rahtPredictionThreshold0=0--19` Controls a per-block threshold used to enable or disable the transform domain prediction of RAHT coefficients. This threshold specifies the number of parent neighbour points that must be present. ### `--rahtPredictionThreshold1=0--19` Controls a per-block threshold used to enable or disable the transform domain prediction of RAHT coefficients. This threshold specifies the number of neighbour points that must be present. ### `--numberOfNearestNeighboursInPrediction=INT-VALUE` Attribute's maximum number of nearest neighbours to be used for prediction. ### `--adaptivePredictionThreshold=INT-VALUE` Neighbouring attribute value difference that enables the use of direct predictor selection over the weighted average. If bitdepth is greater than 8, the threshold is scaled by 2**(bitDepth - 8). Applies to `transformType=0` only. ### `--predWeightBlending=0|1` When enabled, blends the distance derived weights of the three-neighbour predictor according to the relative distances between the neighbours. Applies to `transformType=0` only. ### `--direct_avg_predictor_disabled_flag=0|1` Controls the use of the neighbour average predictor when direct prediction is invoked. ### `--interComponentPredictionEnabled=0|1` Controls the use of an in-loop inter-component prediction of attribute residuals. When enabled, the secondary attribute residuals (e.g. red/blue) are predicted from the primary component (e.g. green). Applies to `transformType=0` and `attribute=color` only. ### `--lastComponentPredictionEnabled=0|1` Controls the use of an in-loop inter-component prediction of attribute coefficients. When enabled, the coefficient of the last component (e.g. Cr) of the secondary attribute is predicted from the corresponding first component (e.g. Cb) according to a simple model. Applies to `transformType=2` and `attribute=color` only. ### `--intraLoDSearchRange=INT-VALUE` Buffer range searched for nearest neighbours within the same level of detail. The value -1 configures a full-range search. ### `--interLoDSearchRange=INT-VALUE` Buffer range searched for nearest neighbours between different levels of detail. The value -1 configures a full-range search. ### `--max_num_direct_predictors=INT-VALUE` Maximum number of nearest neighbour candidates used in direct attribute prediction. ### `--lodDecimator=0|1|2` Controls the level-of-detail generation method: | Value | Description | |:-----:| ------------------------------------------------------- | | 0 | No decimation is performed | | 1 | Decimation by periodic lodSubsamplingPeriod | | 1 | Decimation by distance to lodSubsamplingPeriod centroid | ### `--intraLodPredictionSkipLayers=INT-VALUE` The number of detail levels where intra prediction is disabled, starting from the finest detail level. Applies to `transformType=0` only. | Value | Description | |:-----:| --------------------------------------- | | -1 | Disabled in all detail levels | | 0 | Enabled in all detail levels | | n | Disabled in n finest detail levels | ### `--aps_scalable_enabled_flag=0|1` Enable spatially scalable attribute encoding. The option is only valid when `transformType=2`, `positionQpMultiplierLog2=3`, `lodDecimator=0`, and `trisoupNodeSizeLog2=0`. ### `--max_neigh_range=INT-VALUE` Limits the distance between a point and the neighbours used for its prediction. The maximum distance is expressed in units of node diagonals and is scaled according to the current level of detail. ### `--levelOfDetailCount=INT-VALUE` Attribute's number of levels of detail. ### `--dist2=INT-VALUE` An initial squared distances used to generate successive levels of detail. When equal to zero, an initial value is automatically determined. ### `--dist2PercentileEstimate=FLOAT-VALUE` Percentile of per-point nearest neighbour distances used to estimate `dist2`. ### `--positionQuantizationScaleAdjustsDist2=0|1` Adjusts `dist2` according to `sequenceScale`. This option simplifies the specification of the per-attribute `dist2` parameter. The squared distance threshold used for generating levels-of-detail in attribute coding is dependent on the point cloud density and is therefore affected by geometry quantization. When this parameter is enabled, `dist2` values are scaled by `sequenceScale` squared, thereby allowing `dist2` to be specified as an intrinsic property of the source sequence. ### `--lodSubsamplingPeriod=INT-VALUE|INT-VALUE-LIST` A list of sampling periods used to generate successive levels of detail. ### `--canonical_point_order_flag=0|1` Controls the order used for attribute coding. The canonical (geometry decoding order) is usable only with LoD attribute coding and `levelOfDtailCount=0`. | Value | Description | |:-----:| ---------------------------------- | | 0 | Morton order | | 1 | Decoded geometry (canonical) order | ### `--spherical_coord_flag=0|1` Controls the conversion of point co-ordinates used in attribute coding from the Cartesian domain to a scaled spherical domain. ### `--attrSphericalMaxLog2=INT-VALUE` Override spherical co-ordinate normalization factor. This may be used to compensate any increased azimuth resolution when `predGeomAzimuthQuantization=1`. | Value | Description | |:-----:| ---------------------------------- | | 0 | Automatic calculation | | 1 | Override max value | Applies when `angularEnabled=1` and `predGeomAzimuthQuantization=1`. ### `--lod_neigh_bias=INT-VALUE-LIST` A set of three bias factors corresponding to the first, second and third geometry axes used to weight nearest neighbours during the LoD generation and weighting processes. The value `1,1,1` implies no bias. ### `--qp=INT-VALUE` Attribute's luma quantization parameter. ### `--qpChromaOffset=INT-VALUE` Attribute's chroma quantization quantization parameter relative to luma. Only applies when `attribute=colour`. ### `--aps_slice_qp_deltas_present_flag=0|1` Enables signalling of per-slice QP values. ### `--qpLayerOffsetsLuma=INT-VALUE-LIST` Attribute's per layer luma QP offsets. A layer is corresponds to a level-of-detail or RAHT transform block. ### `--qpLayerOffsetsChroma=INT-VALUE-LIST` Attribute's per layer chroma QP offsets. A layer is corresponds to a level-of-detail or RAHT transform block. Only applies when `attribute=colour`. ### `--quantNeighWeight=INT-VALUE-LIST` Three factors used to derive quantization weights when `transformType=1`. The quantization weights are determined by recursively distributing each coefficient's weight to each of its neighbours, i, scaled by $\texttt{quantNeighWeight}[i] \div 256$. Attribute recolouring (encoder only) ------------------------------------ The following options configure the recolouring module, used when resampling a point cloud, or if the geometry coding process invents new points. ### `--recolourSearchRange=INT-VALUE` Attribute space search range for optimal attribute transfer. ### `--recolourNumNeighboursFwd=INT-VALUE` Number of source points used at the neighborhood of a target point to create the forward points list. ### `--recolourNumNeighboursBwd=INT-VALUE` Number of target points used at the neighborhood of a source point to create the backward points list. ### `--recolourUseDistWeightedAvgFwd=0|1` Use distance-weighted average for forward list. ### `--recolourUseDistWeightedAvgBwd=0|1` Use distance-weighted average for backward list. ### `--recolourSkipAvgIfIdenticalSourcePointPresentFwd=0|1` Do not use forward points list if an identical source point exists. ### `--recolourSkipAvgIfIdenticalSourcePointPresentBwd=0|1` Do not use backward points list if an identical source point exists. ### `--recolourDistOffsetFwd=REAL-VALUE` Distance offset to avoid infinite weight when distance between a forward list point and the target is zero. ### `--recolourDistOffsetBwd=REAL-VALUE` Distance offset to avoid infinite weight when distance between a backward list point and target is zero. ### `--recolourMaxGeometryDist2Fwd=REAL-VALUE` Maximum allowed squared distance of a source point from target to get into the forward list. ### `--recolourMaxGeometryDist2Bwd=REAL-VALUE` Maximum allowed squared distance of a source point from target to get into the backward list. ### `--recolourMaxAttributeDist2Fwd=REAL-VALUE` Maximum allowed squared attribute value difference of a source point for inclusion in the forward list. ### `--recolourMaxAttributeDist2Bwd=REAL-VALUE` Maximum allowed squared attribute value difference of a source point for inclusion in the backward list.
35,671
38.243124
79
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/doc/README.usage.md
Using the codec =============== ``` ./tmc3 [--help] [-c config.cfg] [--parameter=value] ``` The encoder takes as input one or more PLY files describing a point cloud sequence with integer positions and, optionally, per-point integer colour and reflectance attributes. The output of the encoder is a binary bitstream encapsulated using the G-PCC annex-B format. Conversely, the decoder takes as input a compressed bitstream file in G-PCC annex-B format and produces one or more reconstructed PLY file with position and any present attribute values. The software may be configured using either command line arguments or from a configuration file specified using the `-c|--config=` option. Sample configuration files are provided in the cfg/ directory. The utility <scripts/gen-cfg.sh> may be used to generate per sequence and per rate point configuration files for a variety of common test conditions. Parameters are set by the last value encountered on the command line. Therefore if a setting is set via a configuration file, and then a subsequent command line parameter changes that same setting, the command line parameter value will be used.
1,153
37.466667
74
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/collect-tmc13.pl
#!/usr/bin/env perl # # This is an example to illustrate how to use the log parser. This # tool is compatible with the experiment structure in Makefile.tmc13-step. # # Usage: # collect-tmc13.pl <condition> <sequence> <variant> <base_path> <src_ply> # # Where: # <condition> is a CTC test condition name # <sequence> is the sequence name # <variant> is the CTC test point variant # <base_path> is the path used by MPEG::PCC::Parse::Experiment::Df # <src_ply> is the source ply # use strict; use FindBin; use lib "$FindBin::Bin"; use MPEG::PCC::Parse::Experiment::Df; # the set of output columns # NB: it seems pointless, but useful if this script were to process more # than one file at a time. my %columns; # process all the data (to get list of columns) my @results; # To process multiple files, turn the following into a loop: my ($condition, $sequence, $variant, $base_path, $src_ply) = @ARGV; my $line = readTmc3Results($base_path, $src_ply); @columns{keys %$line} = (); $line->{config} = $condition; $line->{sequence} = $sequence; $line->{variant} = $variant; push @results, $line; # output data BLOCK: { my @out_order_cols = ( qw{config sequence variant}, sort keys %columns, ); local $" = ','; print "@out_order_cols\n"; foreach my $line (@results) { print "@$line{@out_order_cols}\n"; } }
1,337
22.473684
75
pl
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/gen-cfg.md
Format and processing of cfg/*.yaml by `gen-cfg.pl` =================================================== All YAML config spec files are merged together (see merging rules) prior to processing. This has an important side-effect that two configuration categories with the same name will be merged together rather than being evaluated seperately. The YAML config spec contains two top-level structures: `categories`, a set of configuration categories, each containing a set of sequence names and common encoder and decoder options. All sequences in a category will use the same common options. `sequences`, a set of sequence names, each describing common values used in the generation of the configuration files in all categories. The main use is to specify properties of the source data, such as its location file location, or processing options specific only to the sequence. Configuration generation proceeds as follows: - For each category, a set of sequences are determined - For each sequence, a set of variants are determined - For each variant, configuration files are generated and written as: - `$prefix/$category/$sequence/$variant/encoder.cfg` - `$prefix/$category/$sequence/$variant/decoder.cfg` - `$prefix/$category/$sequence/$variant/pcerror.cfg` To generate an encoder, decoder or pcerror configuration, cfgOptions are gathered in the following order: - `encflags`/`decflags`/`pcerrorflags` from the global 'sequences' for the current sequence name - `encflags`/`decflags`/`pcerrorflags` for the current variant from the current category - `encflags`/`decflags`/`pcerrorflags` for the current variant from the current category-sequence - `encflags`/`decflags`/`pcerrorflags` set globally Semantics of a yaml-cfg-file ---------------------------- The following description of the YAML config spec uses the following conventions: - `.a=b` represents a map (associative array, dictionary, etc.,) with the key `a` and value `b`. YAML in-line style: `{ a: b }` - `[]` represents a list of values. YAML in-line style: `[ ... ]` - `$x` represents a value (the value may also be a key in a map). - `/` represents the top-level YAML document. ### Definition of $cfgOption A $cfgOption represents one of the following structures to generate a configuration option in the form `$key: $value` - `.$key=$value` — General case - `.$key=[].$variant=.$value` — Applies only to the given $variant ### Top-level definitions - `/.categories=.$categoryName=`... A configuration category - `/.sequences=.$sequenceName=`... A global sequence definition - `/.sequence-base-dir=$value` A global base directory that may be overriden by a sequences' `.base-dir` and `.base-norm-dir` values. - `/.pcerrorflags=[].$cfgOption` (optional) an ordered list of global options for pcerror.cfg - `/.encflags=[].$cfgOption` (optional) an ordered list of global options for encoder.cfg - `/.decflags=[].$cfgOption` (optional) an ordered list of global options for decoder.cfg ### Inside `/.sequences=.$sequenceName=`... - `.src=$value` The source PLY filename for encoding - `.src-dir=$value` (optional) The directory name containing the .src file - `.base-dir=$value` (optional) A path to a directory containing .src-dir - `.norm=$value` (optional) The source PLY filename with normals data - `.norm-dir=$value` (optional) The directory containing the .norm file - `.base-norm-dir=$value` (optional) A path to a directory containint `.norm-dir` - `.pcerrorflags=[].$cfgOption` (optional) an ordered list of sequence-global options for pcerror.cfg - `.encflags=[].$cfgOption` (optional) an ordered list of sequence-global options for encoder.cfg - `.decflags=[].$cfgOption` (optional) an ordered list of sequence-global options for decoder.cfg ### Inside `/.categories=.$categoryName=`... - `.encflags=[].$cfgOption` (optional) an ordered list of category-specific options for encoder.cfg - `.decflags=[].$cfgOption` (optional) an ordered list of category-specific options for decoder.cfg - `.pcerrorflags=[].$cfgOption` (optional) an ordered list of category-specific options for pcerror.cfg - `.sequences=...` A set of sequences to generate configurations for in the context of the current category ### Inside `/.categories=.$categoryName=.sequences=.$sequenceName=...` - `.encflags=[].$cfgOption` (optional) an ordered list of category-sequence-specific options for encoder.cfg - `.decflags=[].$cfgOption` (optional) an ordered list of category-sequence-specific options for decoder.cfg - `.pcerrorflags=[].$cfgOption` (optional) an ordered list of category-sequence-specific options for pcerror.cfg ## Merging rules Multiple YAML config spec files are recursively merged as follows: - src:* → dst:undef ⇒ assign src to dst - src:scalar → dst:scalar ⇒ replaced - src:hash → dst:hash ⇒ recursive merge of key-value pairs - src:list → dst:scalar ⇒ assign [src, dst] to dst - src:list → dst:list ⇒ assign [src, dst] to dst
5,123
32.058065
75
md
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/gen-cfg.pl
#!/usr/bin/env perl use 5.022; use Digest::MD5; use File::Path qw(make_path); use File::Basename qw(basename); use Getopt::Long; use List::Util qw{pairmap}; use List::MoreUtils qw{firstres}; use Module::Load::Conditional qw(can_load); use Pod::Usage; use YAML '0.50'; use strict; =head1 NAME gen-cfg.pl - Generate experiment configuration from yaml specification =head1 SYNOPSIS gen-cfg.pl [options] [yaml-config-spec ...] =head1 OPTIONS =over 4 =item B<--prefix>=dir Sets the output path for the generated configuration tree. =item B<--output-src-glob-sh> =item B<--no-output-src-glob-sh> (default) Do not generate src-glob.sh files describing source locations =item B<--skip-sequences-without-src> (default) =item B<--no-skip-sequences-without-src> Do not generate configuration files for sequences that have an empty 'src' field in the yaml specification. This option is permits a later yaml spec to effectively remove a sequence from being used in an experiment. It may be useful to disable this option when generating config files when the source location of the input data is not known. =item B<--only-seqs>=seq1[:seq2][:...] Generate configuration files for only the named sequences. =item B<--exclude-seqs>=seq1[:seq2][:...] Do not generate configuration files for the named sequences. =item B<--batch-template>=script.tt Generate a script from the template script.tt. The output is written to the current working directory. =item B<--experiment-name>=name A generic name that may be referenced by a batch template for any purpose. =back =head1 Config specification files =cut ## # Command line processing my $do_help = ''; my $output_src_glob_sh = 0; my $skip_sequences_without_src = 1; my $experiment_name = ''; my $batch_template = ''; my $prefix = '.'; my $only_seqs = ''; my $exclude_seqs = ''; GetOptions( 'help' => \$do_help, 'prefix=s' => \$prefix, 'output-src-glob-sh!' => \$output_src_glob_sh, 'skip-sequences-without-src!' => \$skip_sequences_without_src, 'batch-template=s' => \$batch_template, 'experiment-name=s' => \$experiment_name, 'only-seqs=s' => \$only_seqs, 'exlude-seqs=s' => \$exclude_seqs, ); ## # display help text and exit if asked, or if no config is provided pod2usage(0) if $do_help; pod2usage(1) unless @ARGV; # sanitise command line arguments my @only_seqs = split /:/, $only_seqs; my @exclude_seqs = split /:/, $exclude_seqs; ## # load all yaml snippets and merge into a single description # - disable automatic blessing in YAML >= 1.25 # - fallback to automatic blessing in YAML <= 1.24 # NB: yaml < 1.24 cannot use yaml_load with inline scalars # $YAML::LoadBlessed = 0; $YAML::TagClass->{conditional} = 'conditional'; delete $conditional::{yaml_load} unless eval { YAML->VERSION(1.24) } ; my @origins = @ARGV; my %cfg; while (@ARGV) { my $fname = shift @ARGV; my $cfg = YAML::LoadFile($fname) or die "$fname: $!"; merge(\%cfg, $cfg); } ## # dump the merged configuration (allows reproduction) # YAML::DumpFile("$prefix/config-merged.yaml", \%cfg); ## # generate encoder/decoder configuration files # # list of configured jobs my @jobs; # this just makes later code look simpler my $cfg = \%cfg; # iterate over each configuration and described sequences foreach my $cat_name (sort keys %{$cfg->{categories}}) { my $cat = $cfg->{categories}{$cat_name}; foreach my $seq_name (sort keys %{$cat->{sequences}}) { next if @only_seqs && !grep {$_ eq $seq_name} @only_seqs; next if grep {$_ eq $seq_name} @exclude_seqs; my $cat_seq = $cat->{sequences}{$seq_name}; my $seq = $cfg->{sequences}{$seq_name}; unless (exists $seq->{gops}) { genSeqVariants($cat, $cat_name, $cat_seq, $seq_name, $seq, $seq); next; } # split sequence into groups of pictures for parallel execution my $gop_idx = 0; foreach my $gop (@{$seq->{gops}}) { my $gop_idx = sprintf "%03d", $gop_idx++; my $gop_name = "${seq_name}_gop${gop_idx}"; genSeqVariants($cat, $cat_name, $cat_seq, $gop_name, $gop, $seq); } } } ## # write out batch-job configuration # if ($batch_template) { can_load(modules => {'Template' => undef}) || die $Module::Load::Conditional::ERROR; my $output = "$prefix/" . basename($batch_template,'.tt'); my $vars = { jobs => \@jobs, experiment_name => $experiment_name, }; my $tt = Template->new({ RELATIVE => 1, # allow relative paths as $batch_template ABSOLUTE => 1, # allow absolute paths too }) || die "$Template::ERROR\n"; $tt->process($batch_template, $vars, $output) || die $tt->error(), "\n"; } sub genSeqVariants { my ($cat, $cat_name, $cat_seq, $seq_name, $gop, $seq) = @_; # if sequence source isn't defined at top level, skip if ($skip_sequences_without_src) { next unless defined $gop->{src}; } # generate the list of variants (if any) my @variants = List::MoreUtils::uniq ( # $cat.sequences.$name.$variant: (grep { my $ref = $cat_seq->{$_}; ref $ref eq 'HASH' and (exists $ref->{encflags} || exists $ref->{decflags}) } keys %$cat_seq), # $cat.sequences.$name.encflags[].$param.$variant: variants_from_node($cat_seq->{encflags}), # $cat.encflags[].$param.$variant variants_from_node($cat->{encflags}), # $seq.$name.encflags[].$param.$variant variants_from_node($seq->{encflags}), ); # handle the case of no variants: single case with defaults push @variants, undef unless @variants; # for each variant, derive the encoder options # NB: in the case of no variants, $var = undef foreach my $var (sort @variants) { my $cfgdir = join '/', grep {defined} ($prefix,$cat_name,$seq_name,$var); print "$cfgdir\n"; make_path($cfgdir); push @jobs, "$cfgdir/"; ## # input sequence file name if ($gop->{src} && $output_src_glob_sh) { my $src_seq = join '/', grep {defined} ( (List::MoreUtils::firstval {defined} $seq->{'base-dir'}, $cfg->{'sequence-base-dir'}), $seq->{'src-dir'}, $gop->{src}, ); open my $fd, ">", "$cfgdir/src-glob.sh"; print $fd "$src_seq\n"; } if ($gop->{norm} && $output_src_glob_sh) { my $norm_seq = join '/', grep {defined} ( (List::MoreUtils::firstval {defined} $seq->{'base-norm-dir'}, $seq->{'base-dir'}, $cfg->{'sequence-base-dir'}), $seq->{'norm-dir'}, $gop->{norm}, ); open my $fd, ">", "$cfgdir/norm-glob.sh"; print $fd "$norm_seq\n"; } ## # make dictionary for any variable substitutions my $dict = dict_from_context($var, $cat_seq, $gop, $seq); ## # encoder configuration my @encflags = ( params_from_node($dict, $seq->{encflags}), params_from_node($dict, $cat->{encflags}, $var), params_from_node($dict, $cat_seq->{encflags}, $var), params_from_node($dict, $cat_seq->{$var}{encflags}), params_from_node($dict, $cfg->{encflags}), ); write_cfg("$cfgdir/encoder.cfg", \@encflags); ## # decoder configuration my @decflags = ( params_from_node($dict, $seq->{decflags}), params_from_node($dict, $cat->{decflags}, $var), params_from_node($dict, $cat_seq->{decflags}, $var), params_from_node($dict, $cat_seq->{$var}{decflags}), params_from_node($dict, $cfg->{decflags}), ); write_cfg("$cfgdir/decoder.cfg", \@decflags); ## # pcerror configuration my @pcerrorflags = ( params_from_node($dict, $seq->{pcerrorflags}), params_from_node($dict, $cat->{pcerrorflags}), params_from_node($dict, $cat_seq->{pcerrorflags}, $var), params_from_node($dict, $cat_seq->{$var}{pcerrorflags}), params_from_node($dict, $cfg->{pcerrorflags}), ); write_cfg("$cfgdir/pcerror.cfg", \@pcerrorflags) if (@pcerrorflags); } } ############################################################################# # utilities ## # keywise merge $src into $dst, following the following merge rules: # - * -> undef = copy # - scalar -> scalar = replace # - hash -> hash = recurse # - list -> scalar = merge unique items (scalars only) # - list -> list = merge unique items (scalars only) sub merge { my ($dst, $src) = @_; unless (defined $dst) { $$dst = $$src; return; } ## # overwrite existing scalar unless (ref $src) { $$dst = $src; return; } if (ref $src eq 'HASH') { foreach my $key (keys %$src) { ## # copy sub-tree if key does not exist unless (exists $$dst{$key}) { $$dst{$key} = $$src{$key}; next; } ## # recurse to merge sub-tree if (ref $$dst{$key}) { merge($$dst{$key}, $$src{$key}); } else { merge(\$$dst{$key}, $$src{$key}); } } return; } ## # merge arrays # -- this is really only for an array of scalars if (ref $src eq 'ARRAY') { my @vals; push @vals, $$dst if ref $dst eq 'SCALAR'; push @vals, @$dst if ref $dst eq 'ARRAY'; push @vals, @$src; $$dst = [List::MoreUtils::uniq(@vals)] if ref $dst eq 'SCALAR'; @$dst = List::MoreUtils::uniq(@vals) if ref $dst eq 'ARRAY'; return; } } sub variants_from_node { my ($node) = @_ or return (); return map {keys %$_} grep {ref $_ eq 'HASH'} map {values %$_} grep {ref $_ eq 'HASH'} map {ref $_ eq 'ARRAY' ? @$_ : $_} @{$node}; } sub params_from_node { my ($dict, $node, $variant) = @_; return () unless $node; # add a key-value pair to the array of parameters, first evaluating # the value to expand any variable substitutions sub push_eval(+$$$) { my ($aref, $dict, $key, $value) = @_; die "Not an array or arrayref" unless ref $aref eq 'ARRAY'; my ($evald, @substs) = eval_expr($value, $dict); if (0 && @substs) { # substitution happened => add comment annotations push @$aref, [""], ["# $key = $value, with " . join ", ", pairmap {"$a = $b"} @substs]; } push @$aref, [$key, $evald]; (); } my @params; my @todo = @$node; while (my $item = shift @todo) { # an unformatted string (not key:value) unless (ref $item) { push @params, [$item]; next; } if (ref $item eq 'HASH') { while (my ($key, $value) = each %$item) { unless (ref $value) { # key:value without variants push_eval @params, $dict, $key, $value; next; } if (ref $value eq 'HASH') { # key:value with variants push_eval @params, $dict, $key, $value->{$variant} if exists $value->{$variant}; next; } warn "unhandled node for $value"; } } if (ref $item eq 'ARRAY') { # if the first item of the array is a conditional, evaluate # it and conditionally add the array if (exists $item->[0] && ref $item->[0] eq 'conditional') { my ($evald, undef) = eval_expr(${$item->[0]}, $dict); next unless eval $evald; push @params, [""]; } unshift @todo, @$item; next; } } return @params; } ## # Build a dictionary of all variables that apply to the current variant # from the given context. sub dict_from_context { my ($variant, @context) = @_; my %dict; # discover all variables with earlier contexts having priority foreach my $context (reverse @context) { while (my ($var, $val) = each %$context) { my $type = ref $val; if (!$type) { # scalar, applies to all variants $dict{$var} = $val; } if ($type eq 'HASH') { # see if variant is specified $dict{$var} = $val->{$variant} if exists $val->{$variant}; } } } return \%dict; } ## # Return the exansion of $str given a dictionary of variables. sub eval_expr { my ($str, $dict) = @_; my @substs; # first find all variables and substitute their values # - substitute an empty string if not found while ($str =~ m/\$\{([^}]+)\}/gc) { my $var = $1; my $var_start = $-[0]; my $var_len = $+[0] - $-[0]; my $subst = "(undef)"; $subst = $dict->{$var} if exists $dict->{$var}; substr $str, $var_start, $var_len, $subst; pos $str = $var_start + length($subst); push @substs, ($var, $subst); } # finally evaluate any eval expressions pos $str = 0; while ($str =~ m/\$eval\{([^}]+)\}/gc) { my $expr = $1; my $expr_start = $-[0]; my $expr_len = $+[0] - $-[0]; my $val = eval "use POSIX qw{round signbit}; use List::Util qw{min max}; no strict; $expr"; if ($@) { print STDERR "err: $@\n$expr\n"; } substr $str, $expr_start, $expr_len, $val; pos $str = $expr_start + length($val); } return ($str, @substs); } ## # Print configuration @$opts, to $fd; with one entry per line and where # each entry in @$opts is either a [key, value] pair to be joined with # ": ", or just [key]. sub print_cfg { my ($fd, $opts) = @_; print $fd "# This file was automatically generated from:\n"; print $fd "# $_\n" foreach (@origins); local $\ = "\n"; foreach my $opt (@$opts) { print $fd join(": ", @$opt); } } ## # print config to file iff it differs from file's contents. # (ie, don't touch mtime if unchanged) sub write_cfg { my ($filename, $flags) = @_; # format config in memory my $new_cfg = ""; open my $fd, ">:encoding(utf8)", \$new_cfg; print_cfg($fd, $flags); close $fd; # hash it my $md5_new = Digest::MD5->new; $md5_new->add($new_cfg); my $md5_old = Digest::MD5->new; if (-f $filename) { open $fd, "<:encoding(utf8)", $filename; $md5_old->addfile($fd); close $fd; } if ($md5_new->digest ne $md5_old->digest) { print "writing $filename\n"; open $fd, ">:bytes", $filename; print $fd $new_cfg; close $fd; } } ## # a helper class to permit secure loading of untrusted yaml documents package conditional; sub yaml_load { my ($class, $node) = @_; unless (ref($node) eq 'SCALAR') { die '!conditional tag must be used with a scalar'; } return bless $node if ref($node) eq 'SCALAR'; }
13,526
23.416968
93
pl
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/gen-cfg.sh
#!/bin/bash # # Generate a configuration tree in $PWD from YAML files in the same # directory. set -e shopt -s nullglob script_dir="$(dirname $0)" geom="octree" attr="predlift" src_cfg_dir="" while (( $# )); do case $1 in --octree) geom="octree" ;; --predgeom) geom="predgeom" ;; --trisoup) geom="trisoup" ;; --raht) attr="raht" ;; --pred-lift) attr="predlift" ;; --all) all=1 ;; --cfgdir=*) src_cfg_dir="${1#--cfgdir=}/" ;; --) shift; break ;; --help|*) echo -e "usage:\n $0\n" \ " [[--octree|--predgeom|--trisoup] [--raht|--pred-lift] | --all]\n" \ " [--cfgdir=<dir>]" exit 1 esac shift; done extra_args=("$@") ## # NB: it is important that the configs in each config set are # capable of being merged together by gen-cfg.pl. Ie, no two # configs may have different definitions of one category. cfg_octree_predlift=( octree-liftt-ctc-lossless-geom-lossy-attrs.yaml octree-liftt-ctc-lossy-geom-lossy-attrs.yaml octree-predt-ctc-lossless-geom-lossless-attrs.yaml octree-predt-ctc-lossless-geom-nearlossless-attrs.yaml ) cfg_octree_raht=( octree-raht-ctc-lossless-geom-lossy-attrs.yaml octree-raht-ctc-lossy-geom-lossy-attrs.yaml ) cfg_predgeom_predlift=( "${cfg_octree_predlift[@]}" cfg-predgeom.yaml ) cfg_predgeom_raht=( "${cfg_octree_raht[@]}" cfg-predgeom.yaml ) cfg_trisoup_predlift=( trisoup-liftt-ctc-lossy-geom-lossy-attrs.yaml ) cfg_trisoup_raht=( trisoup-raht-ctc-lossy-geom-lossy-attrs.yaml ) do_one_cfgset() { local geom=$1 local attr=$2 outdir="$geom-$attr/" mkdir -p "$outdir" cfgset="cfg_${geom}_${attr}[@]" for f in ${!cfgset} do echo "${src_cfg_dir}$f -> $outdir" ... done # NB: specifying extra_args at the end does not affect option # processing since gen-cfg.pl is flexible in argument positions $script_dir/gen-cfg.pl \ --prefix="$outdir" --no-skip-sequences-without-src \ "${!cfgset/#/${src_cfg_dir}}" \ "${src_cfg_dir}sequences-cat1.yaml" \ "${src_cfg_dir}sequences-cat3.yaml" \ "${extra_args[@]}" rm -f "$outdir/config-merged.yaml" } if [[ "$all" != "1" ]] then do_one_cfgset "$geom" "$attr" else do_one_cfgset "octree" "predlift" do_one_cfgset "octree" "raht" do_one_cfgset "predgeom" "predlift" do_one_cfgset "predgeom" "raht" do_one_cfgset "trisoup" "predlift" do_one_cfgset "trisoup" "raht" fi
2,311
20.811321
75
sh
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/CSV.pm
package MPEG::PCC::CSV; use strict; use Text::CSV; use Exporter qw(import); our @EXPORT = qw( LoadFile ); sub LoadFile { my ($name) = @_; my $fh; if (ref $name eq 'GLOB') { $fh = $name; } else { open $fh, "<:encoding(utf8)", $name or die "$name: $!"; } # drop any comment lines at the start my $comment = ""; my $first_non_comment_line; while (my $line = <$fh>) { unless ($line =~ m{^\#}) { # Avoiding dependency from IO::Unread::unread $fh, $line; $first_non_comment_line = $line; last; } $comment .= $line; } my $csv = Text::CSV->new({binary => 1}); my $cols = do { open my $tmpfh, '<', \$first_non_comment_line or die; $csv->getline($tmpfh); }; # handle empty file case return wantarray ? ([], [], $comment) : [] unless defined $cols; for (my $i = 0; $i < scalar @$cols; $i++) { # add fake column names if any are missing next if defined $cols->[$i] && !($cols->[$i] =~ m{^\s*$}); $cols->[$i] = "_$i"; } $csv->column_names($cols); my $rows = $csv->getline_hr_all($fh); return wantarray ? ($rows, $cols, $comment) : $rows; } 1;
1,091
18.854545
65
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Collate.pm
package MPEG::PCC::Collate; use utf8; use strict; use warnings; use MPEG::PCC::Util qw{uniq}; use List::Util qw{min max pairmap pairgrep}; use Scalar::Util qw{looks_like_number}; use Exporter qw(import); our @EXPORT = qw( accumulateResult summariseResult ); our @out_order_cols = qw{ config sequence variant framenum enc.status dec.status dec.enc.match enc.wtime enc.utime dec.wtime dec.utime enc.ext.utime dec.ext.utime enc.ext.maxrssk dec.ext.maxrssk enc.ext.bits enc.bits enc.ext.bpp enc.bpp enc.bpp.geometry enc.bpp.colour enc.bpp.reflectance enc.bits.geometry enc.bits.colour enc.bits.reflectance enc.bits.frameindexs src.numpoints dec.numpoints src.framecount dec.framecount dec.y-psnr dec.cb-psnr dec.cr-psnr dec.reflectance-psnr dec.post-recolour.y-psnr dec.post-recolour.cb-psnr dec.post-recolour.cr-psnr dec.post-recolour.reflectance-psnr dec.y-hpsnr dec.cb-hpsnr dec.cr-hpsnr dec.reflectance-hpsnr dec.d1-psnr dec.d2-psnr }; our @fold_mean_geom = qw{ }; our @fold_mean_arith = qw{ enc.bpp enc.bpp.geometry enc.bpp.colour enc.bpp.reflectance dec.y-psnr dec.cb-psnr dec.cr-psnr dec.reflectance-psnr dec.d1-psnr dec.d2-psnr dec.post-recolour.y-psnr dec.post-recolour.cb-psnr dec.post-recolour.cr-psnr dec.post-recolour.reflectance-psnr }; our @fold_sum = (@fold_mean_arith, qw{ src.framecount dec.framecount enc.ext.utime enc.wtime enc.utime dec.ext.utime dec.wtime dec.utime enc.ext.bits enc.bits enc.bits.geometry enc.bits.colour enc.bits.reflectance enc.bits.frameindexs src.numpoints dec.numpoints }); our @fold_max = qw{ enc.ext.maxrssk dec.ext.maxrssk dec.y-hpsnr dec.cb-hpsnr dec.cr-hpsnr dec.reflectance-hpsnr }; our @fold_uniq = qw{ enc.status dec.status dec.enc.match }; ## # accumulate results for frame into %$result_acc # sub accumulateResult { my ($result_acc, $frame) = @_; # accumulate additively foreach my $key (@fold_sum) { next unless exists $frame->{$key} && $frame->{$key} ne ''; $$result_acc{"count.$key"} = 1 + ($$result_acc{"count.$key"} || 0); $$result_acc{$key} = $frame->{$key} + ($$result_acc{$key} || 0); delete $frame->{$key}; } # accumulate maximum foreach my $key (@fold_max) { next unless exists $frame->{$key}; next unless $frame->{$key}; $$result_acc{$key} = max($frame->{$key},$$result_acc{$key} || 0); delete $frame->{$key}; } # accumulate log for geometric mean calculation foreach my $key (@fold_mean_geom) { next unless exists $frame->{$key}; if (!$frame->{$key} > 0) { $$result_acc{"invalid.$key"} = 1; next; } $$result_acc{"count.$key"} = 1 + ($$result_acc{"count.$key"} || 0); $$result_acc{$key} = log($frame->{$key}) + ($$result_acc{$key} || 0); delete $frame->{$key}; } # accumulate unique values foreach my $key (@fold_uniq) { next unless exists $frame->{$key}; $$result_acc{$key} = [uniq($frame->{$key}, @{$$result_acc{$key}})]; delete $frame->{$key}; } $result_acc; } ## # reduce the accumulation state to summary results # sub summariseResult { my ($result_acc) = @_; # geometric means (relies on sum of logs) foreach my $key (@fold_mean_geom) { next unless exists $$result_acc{$key}; if ($$result_acc{"invalid.$key"}) { $$result_acc{$key} = 'NaN'; next; } $$result_acc{$key} = exp($$result_acc{$key} / $$result_acc{"count.$key"}); } # arithmetic mean foreach my $key (@fold_mean_arith) { $$result_acc{$key} = !$$result_acc{"count.$key"} ? undef : $$result_acc{$key} / $$result_acc{"count.$key"}; } # convert to friendly string foreach my $key (@fold_uniq) { $$result_acc{$key} = join ':', @{$$result_acc{$key}} if defined $$result_acc{$key}; } # ratio from totals $$result_acc{'enc.ext.bpp'} = $$result_acc{'enc.ext.bits'} / $$result_acc{'src.numpoints'} if $$result_acc{'src.numpoints'}; # tidyup any formatting map { if (looks_like_number($_)) { $_ = sprintf "%f", $_; s/\.?0*$//; } } values %$result_acc; return $result_acc; } 1;
3,962
24.242038
71
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Util.pm
package MPEG::PCC::Util; use strict; use warnings; use Exporter qw(import); our @EXPORT_OK = qw(uniq); ## # From List::MoreUtils::uniq, licensed as follows: # > This library is free software; you can redistribute it and/or modify # > it under the same terms as Perl itself, either Perl version 5.8.4 # > or, at your option, any later version of Perl 5 you may have # > available. sub uniq (@) { my %seen = (); my $k; my $seen_undef; grep { defined $_ ? not $seen{$k = $_}++ : not $seen_undef++ } @_; } 1;
512
21.304348
72
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Parse/PcError.pm
package MPEG::PCC::Parse::PcError; use strict; use warnings; use Exporter qw(import); our @EXPORT = qw(readDistortion); ## # mapping table for readDistortion our %readDistortion_key2key = ( 'h. (p2point)' => 'd1-hmse', # hausdorff error 'h.,PSNR (p2point)' => 'd1-hpsnr', # hausdorff error 'h. (p2plane)' => 'd2-hmse', # hausdorff error 'h.,PSNR (p2plane)' => 'd2-hpsnr', # hausdorff error 'mseF (p2point)' => 'd1-mse', 'mseF,PSNR (p2point)' => 'd1-psnr', 'mseF (p2plane)' => 'd2-mse', 'mseF,PSNR (p2plane)' => 'd2-psnr', 'c[0], F' => 'y-mse', 'c[1], F' => 'cb-mse', 'c[2], F' => 'cr-mse', 'c[0],PSNRF' => 'y-psnr', 'c[1],PSNRF' => 'cb-psnr', 'c[2],PSNRF' => 'cr-psnr', 'r, F' => 'reflectance-mse', 'r,PSNR F' => 'reflectance-psnr', 'h.c[0], F' => 'y-hmse', 'h.c[1], F' => 'cb-hmse', 'h.c[2], F' => 'cr-hmse', 'h.c[0],PSNRF' => 'y-hpsnr', 'h.c[1],PSNRF' => 'cb-hpsnr', 'h.c[2],PSNRF' => 'cr-hpsnr', 'h.r, F' => 'reflectance-hmse', 'h.r,PSNR F' => 'reflectance-hpsnr', ); ## # parse output of pc_error sub readDistortion { my ($file, $key_prefix) = @_; open my $fh, '<', $file or return {}; my %result; # skip over all the preamble, while (<$fh>) { if (m{^PCC quality measurement software, version (.*)}) { $result{"$key_prefix.dmetric.version"} = $1; next; } last if m{^3. Final \(symmetric\).}; } # read in the record our %readDistortion_key2key; while (<$fh>) { chomp; # change in indentation breaks the record last unless m{^ }; s/^\s*//; my ($key, $val) = split /\s*:\s*/; $result{$key_prefix.$readDistortion_key2key{$key}} = $val; } return \%result; } 1;
1,822
24.676056
60
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Parse/Ply.pm
package MPEG::PCC::Parse::Ply; use strict; use warnings; use Exporter qw(import); our @EXPORT = qw(readPly); ## # cache of ply data to reduce number of lookups our %ply_cache; ## # parse ply file for interesting parameters sub readPly { my ($file) = @_; our %ply_cache; return $ply_cache{$file} if exists $ply_cache{$file}; open my $fh, '<', $file or return (); # check it is a ply file return () unless (<$fh> =~ m{^ply}); my $numpoints; while (<$fh>) { chomp; # avoid the data section last if m{^end_header}; if (m{^element vertex (\d+)}) { $numpoints = $1; next; } } return $ply_cache{$file} = $numpoints; } 1;
644
15.538462
58
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Parse/Time.pm
package MPEG::PCC::Parse::Time; use strict; use warnings; use Exporter qw(import); our @EXPORT = qw(readTime); ## # parse output of /bin/time. # returns (user_time, maxrss) sub readTime { my ($file) = @_; open my $fh, '<', $file or return ('?'); chomp (my $line = <$fh>); my $utime; my $maxrssk; foreach (split / /, $line) { if (m{^(\d+\.\d+)user$}) { $utime = $1; next; } if (m{^(\d+)maxresident}) { $maxrssk = $1; next; } } return ($utime, $maxrssk); } 1;
479
15.551724
52
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Parse/Tmc3.pm
package MPEG::PCC::Parse::Tmc3; use strict; use warnings; use Exporter qw(import); our @EXPORT = qw(readEncLog readDecLog); ## # parse output of encoder log sub readEncLog { my ($file) = @_; open my $fh, '<', $file or return {}; my %result; while (<$fh>) { chomp; if (m{^(positions|colors|reflectances|\w+) bitstream size (\d+) B \((\d+(\.\d+(e[+-]\d+)?)?) bpp\)}) { my %map = ( positions => 'geometry', colors => 'colour', reflectances => 'reflectance', ); my $key = $map{$1} || $1; no warnings; $result{"enc.bits.$key"} += $2 * 8; $result{"enc.bpp.$key"} += $3; next; } if (m{^Total bitstream size (\d+) B}) { $result{'enc.bits'} = $1 * 8; next; } if (m{^Processing time \(wall\): (\d+(\.\d+)?) s}) { $result{'enc.wtime'} = $1; next; } if (m{^Processing time \(user\): (\d+(\.\d+)?) s}) { $result{'enc.utime'} = $1; next; } } return \%result; } ## # parse output of decoder log sub readDecLog { my ($file) = @_; open my $fh, '<', $file or return {}; my %result; while (<$fh>) { chomp; if (m{^Processing time \(wall\): (\d+(\.\d+)?) s}) { $result{'dec.wtime'} = $1; next; } if (m{^Processing time \(user\): (\d+(\.\d+)?) s}) { $result{'dec.utime'} = $1; next; } } return \%result; } 1;
1,309
15.794872
104
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Parse/Utils.pm
package MPEG::PCC::Parse::Utils; use strict; use warnings; use Exporter qw(import); our @EXPORT = qw(readFileFirstLine); ## # cat the first line of a file. sub readFileFirstLine { my ($file) = @_; open my $fh, '<', $file or return (); chomp (my $line = <$fh>); return $line; } 1;
290
13.55
36
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/scripts/MPEG/PCC/Parse/Experiment/Df.pm
package MPEG::PCC::Parse::Experiment::Df; use strict; use warnings; use MPEG::PCC::Parse::Tmc3; use MPEG::PCC::Parse::Time; use MPEG::PCC::Parse::Utils; use MPEG::PCC::Parse::Ply; use MPEG::PCC::Parse::PcError; use Exporter qw(import); our @EXPORT = qw( readTmc3Results readTmc3ResultsOneFrame readTmc3ResultsOneBin ); ## # One frame per file mode (ie not sequence encoding) sub readTmc3Results { my ($base_path, $src_file) = @_; my ($frame) = $src_file =~ m{/([^/]+)$}; my $ret_ply = readTmc3ResultsOneFrame(@_); my $ret_bin = readTmc3ResultsOneBin(@_); return {%$ret_ply, %$ret_bin, frame => $frame}; } ## # process a binary, allowing results to be aggregated sub readTmc3ResultsOneBin { my ($base_path) = @_; my $file_bytes = -s "$base_path.bin"; my ($enc_utime, $enc_maxrssk) = readTime("$base_path.bin.time"); my ($dec_utime, $dec_maxrssk) = readTime("$base_path.bin.decoded.time"); my $enc_log = readEncLog("$base_path.bin.log"); my $dec_log = readDecLog("$base_path.bin.decoded.log"); my $enc_status = readFileFirstLine("$base_path.bin.status"); my $dec_status = readFileFirstLine("$base_path.bin.decoded.status"); my %ret = ( "enc.ext.bits" => $file_bytes * 8, "enc.ext.utime" => $enc_utime, "dec.ext.utime" => $dec_utime, "enc.ext.maxrssk" => $enc_maxrssk, "dec.ext.maxrssk" => $dec_maxrssk, "enc.status" => $enc_status, "dec.status" => $dec_status, %$enc_log, %$dec_log, ); return \%ret; } ## # process a frame, allowing results to be aggregated sub readTmc3ResultsOneFrame { my ($base_path, $src_file) = @_; my ($num_src_points) = readPly("$src_file"); my ($num_dec_points) = readPly("$base_path.bin.decoded.ply"); my $distortion_e2e = readDistortion("$base_path.bin.decoded.pc_error", "dec."); my $distortion_prc = readDistortion( "$base_path.bin.decoded.pc_error_postrecolour", "dec.post-recolour."); my ($enc_md5,undef) = split / /, readFileFirstLine("$base_path.bin.ply.md5") // ""; my ($dec_md5,undef) = split / /, readFileFirstLine("$base_path.bin.decoded.ply.md5") // ""; my $dec_enc_match = "mismatch"; if (!$enc_md5 || !$dec_md5) { $dec_enc_match = "missing"; } elsif ($enc_md5 eq $dec_md5) { $dec_enc_match = "ok" } my %ret = ( "src.numpoints" => $num_src_points, "src.framecount" => 1, "dec.numpoints" => $num_dec_points, "dec.framecount" => 1, "dec.enc.match" => $dec_enc_match, %$distortion_e2e, %$distortion_prc, ); return \%ret; }
2,460
23.366337
73
pm
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/Attribute.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2019, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <memory> #include "hls.h" #include "PayloadBuffer.h" #include "PCCPointSet.h" #include "entropy.h" namespace pcc { //============================================================================ class AttributeContexts; //============================================================================ class AttributeDecoderIntf { public: virtual ~AttributeDecoderIntf(); virtual void decode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, int geom_num_points_minus1, int minGeomNodeSizeLog2, const char* payload, size_t payloadLen, AttributeContexts& ctxtMem, PCCPointSet3& pointCloud) = 0; // Indicates if the attribute decoder can decode the given aps virtual bool isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const = 0; }; //---------------------------------------------------------------------------- std::unique_ptr<AttributeDecoderIntf> makeAttributeDecoder(); //============================================================================ class AttributeEncoderIntf { public: virtual ~AttributeEncoderIntf(); virtual void encode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& attr_aps, AttributeBrickHeader& abh, AttributeContexts& ctxtMem, PCCPointSet3& pointCloud, PayloadBuffer* payload) = 0; // Indicates if the attribute decoder can decode the given aps virtual bool isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const = 0; }; //---------------------------------------------------------------------------- std::unique_ptr<AttributeEncoderIntf> makeAttributeEncoder(); //============================================================================ int estimateDist2( const PCCPointSet3& cloud, int samplingPeriod, int searchRange, float percentileEstimate); //============================================================================ } // namespace pcc
3,945
33.313043
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/AttributeCommon.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2019, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "AttributeCommon.h" #include "PCCTMC3Common.h" namespace pcc { //============================================================================ // AttributeLods methods void AttributeLods::generate( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, int geom_num_points_minus1, int minGeomNodeSizeLog2, const PCCPointSet3& cloud) { _aps = aps; _abh = abh; if (minGeomNodeSizeLog2 > 0) assert(aps.scalable_lifting_enabled_flag); buildPredictorsFast( aps, abh, cloud, minGeomNodeSizeLog2, geom_num_points_minus1, predictors, numPointsInLod, indexes); assert(predictors.size() == cloud.getPointCount()); for (auto& predictor : predictors) { predictor.computeWeights(); if (aps.attr_encoding == AttributeEncoding::kPredictingTransform) if (aps.pred_weight_blending_enabled_flag) predictor.blendWeights(cloud, indexes); } } //---------------------------------------------------------------------------- bool AttributeLods::isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const { // No LoDs cached => can be reused by anything if (numPointsInLod.empty()) return true; // If the other aps doesn't use LoDs, it is compatible. // Otherwise, if both use LoDs, check each parameter if (!(_aps.lodParametersPresent() && aps.lodParametersPresent())) return true; // NB: the following comparison order needs to be the same as the i/o // order otherwise comparisons may involve undefined values if ( _aps.num_pred_nearest_neighbours_minus1 != aps.num_pred_nearest_neighbours_minus1) return false; if (_aps.inter_lod_search_range != aps.inter_lod_search_range) return false; if (_aps.intra_lod_search_range != aps.intra_lod_search_range) return false; if (_aps.num_detail_levels_minus1 != aps.num_detail_levels_minus1) return false; if (_aps.lodNeighBias != aps.lodNeighBias) return false; // until this feature is stable, always generate LoDs. if (_aps.scalable_lifting_enabled_flag || aps.scalable_lifting_enabled_flag) return false; if (_aps.lod_decimation_type != aps.lod_decimation_type) return false; if (_aps.dist2 + _abh.attr_dist2_delta != aps.dist2 + abh.attr_dist2_delta) return false; if (_aps.lodSamplingPeriod != aps.lodSamplingPeriod) return false; if ( _aps.intra_lod_prediction_skip_layers != aps.intra_lod_prediction_skip_layers) return false; if (_aps.canonical_point_order_flag != aps.canonical_point_order_flag) return false; if ( _aps.pred_weight_blending_enabled_flag != aps.pred_weight_blending_enabled_flag) return false; return true; } //============================================================================ bool predModeEligibleColor( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexes, const PCCPredictor& predictor) { if (predictor.neighborCount <= 1 || !aps.max_num_direct_predictors) return false; Vec3<int64_t> minValue = {0, 0, 0}; Vec3<int64_t> maxValue = {0, 0, 0}; for (int i = 0; i < predictor.neighborCount; ++i) { const Vec3<attr_t> colorNeighbor = pointCloud.getColor(indexes[predictor.neighbors[i].predictorIndex]); for (size_t k = 0; k < 3; ++k) { if (i == 0 || colorNeighbor[k] < minValue[k]) { minValue[k] = colorNeighbor[k]; } if (i == 0 || colorNeighbor[k] > maxValue[k]) { maxValue[k] = colorNeighbor[k]; } } } auto maxDiff = (maxValue - minValue).max(); return maxDiff >= aps.adaptivePredictionThreshold(desc); } //---------------------------------------------------------------------------- bool predModeEligibleRefl( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexes, const PCCPredictor& predictor) { if (predictor.neighborCount <= 1 || !aps.max_num_direct_predictors) return false; int64_t minValue = 0; int64_t maxValue = 0; for (int i = 0; i < predictor.neighborCount; ++i) { const attr_t reflectanceNeighbor = pointCloud.getReflectance( indexes[predictor.neighbors[i].predictorIndex]); if (i == 0 || reflectanceNeighbor < minValue) { minValue = reflectanceNeighbor; } if (i == 0 || reflectanceNeighbor > maxValue) { maxValue = reflectanceNeighbor; } } auto maxDiff = maxValue - minValue; return maxDiff >= aps.adaptivePredictionThreshold(desc); } //============================================================================ } // namespace pcc
6,511
31.39801
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/AttributeCommon.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2019, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <stdint.h> #include <vector> #include "entropy.h" #include "hls.h" #include "PCCTMC3Common.h" namespace pcc { //============================================================================ class AttributeContexts { public: void reset(); protected: AdaptiveBitModel ctxRunLen[5]; AdaptiveBitModel ctxCoeffGtN[2][7]; AdaptiveBitModel ctxCoeffRemPrefix[2][3]; AdaptiveBitModel ctxCoeffRemSuffix[2][3]; }; //---------------------------------------------------------------------------- inline void AttributeContexts::reset() { this->~AttributeContexts(); new (this) AttributeContexts; } //============================================================================ struct AttributeLods { // Indicates if the generated LoDs are compatible with the provided aps bool isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const; bool empty() const { return numPointsInLod.empty(); }; void generate( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, int geom_num_points_minus1, int minGeomNodeSizeLog2, const PCCPointSet3& cloud); std::vector<PCCPredictor> predictors; std::vector<uint32_t> numPointsInLod; std::vector<uint32_t> indexes; private: // This is the aps that was used to generate the LoDs. It is used to check // if the generated LoDs are reusable. AttributeParameterSet _aps; // This is the abh that was used to generate the LoDs. It is used to check // if the generated LoDs are reusable. AttributeBrickHeader _abh; }; //============================================================================ bool predModeEligibleColor( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexes, const PCCPredictor& predictor); bool predModeEligibleRefl( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexes, const PCCPredictor& predictor); //============================================================================ } // namespace pcc
3,993
32.847458
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/AttributeDecoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "AttributeDecoder.h" #include "AttributeCommon.h" #include "DualLutCoder.h" #include "attribute_raw.h" #include "constants.h" #include "entropy.h" #include "hls.h" #include "io_hls.h" #include "RAHT.h" #include "FixedPoint.h" namespace pcc { //============================================================================ // An encapsulation of the entropy decoding methods used in attribute coding class PCCResidualsDecoder : protected AttributeContexts { public: PCCResidualsDecoder( const AttributeBrickHeader& abh, const AttributeContexts& ctxtMem); EntropyDecoder arithmeticDecoder; const AttributeContexts& getCtx() const { return *this; } void start(const SequenceParameterSet& sps, const char* buf, int buf_len); void stop(); int decodeRunLength(); int decodeSymbol(int k1, int k2, int k3); void decode(int32_t values[3]); int32_t decode(); }; //---------------------------------------------------------------------------- PCCResidualsDecoder::PCCResidualsDecoder( const AttributeBrickHeader& abh, const AttributeContexts& ctxtMem) : AttributeContexts(ctxtMem) {} //---------------------------------------------------------------------------- void PCCResidualsDecoder::start( const SequenceParameterSet& sps, const char* buf, int buf_len) { arithmeticDecoder.setBuffer(buf_len, buf); arithmeticDecoder.enableBypassStream(sps.cabac_bypass_stream_enabled_flag); arithmeticDecoder.start(); } //---------------------------------------------------------------------------- void PCCResidualsDecoder::stop() { arithmeticDecoder.stop(); } //---------------------------------------------------------------------------- int PCCResidualsDecoder::decodeRunLength() { int runLength = 0; auto* ctx = ctxRunLen; for (; runLength < 3; runLength++, ctx++) { int bin = arithmeticDecoder.decode(*ctx); if (!bin) return runLength; } for (int i = 0; i < 4; i++) { int bin = arithmeticDecoder.decode(*ctx); if (!bin) { runLength += arithmeticDecoder.decode(); return runLength; } runLength += 2; } runLength += arithmeticDecoder.decodeExpGolomb(2, *++ctx); return runLength; } //---------------------------------------------------------------------------- int PCCResidualsDecoder::decodeSymbol(int k1, int k2, int k3) { if (!arithmeticDecoder.decode(ctxCoeffGtN[0][k1])) return 0; if (!arithmeticDecoder.decode(ctxCoeffGtN[1][k2])) return 1; int coeff_abs_minus2 = arithmeticDecoder.decodeExpGolomb( 1, ctxCoeffRemPrefix[k3], ctxCoeffRemSuffix[k3]); return coeff_abs_minus2 + 2; } //---------------------------------------------------------------------------- void PCCResidualsDecoder::decode(int32_t value[3]) { value[1] = decodeSymbol(0, 0, 1); int b0 = value[1] == 0; int b1 = value[1] <= 1; value[2] = decodeSymbol(1 + b0, 1 + b1, 1); int b2 = value[2] == 0; int b3 = value[2] <= 1; value[0] = decodeSymbol(3 + (b0 << 1) + b2, 3 + (b1 << 1) + b3, 0); if (b0 && b2) value[0] += 1; if (value[0] && arithmeticDecoder.decode()) value[0] = -value[0]; if (value[1] && arithmeticDecoder.decode()) value[1] = -value[1]; if (value[2] && arithmeticDecoder.decode()) value[2] = -value[2]; } //---------------------------------------------------------------------------- int32_t PCCResidualsDecoder::decode() { auto mag = decodeSymbol(0, 0, 0) + 1; bool sign = arithmeticDecoder.decode(); return sign ? -mag : mag; } //============================================================================ // AttributeDecoderIntf AttributeDecoderIntf::~AttributeDecoderIntf() = default; //============================================================================ // AttributeDecoder factory std::unique_ptr<AttributeDecoderIntf> makeAttributeDecoder() { return std::unique_ptr<AttributeDecoder>(new AttributeDecoder()); } //============================================================================ // AttributeDecoder Members void AttributeDecoder::decode( const SequenceParameterSet& sps, const AttributeDescription& attr_desc, const AttributeParameterSet& attr_aps, const AttributeBrickHeader& abh, int geom_num_points_minus1, int minGeomNodeSizeLog2, const char* payload, size_t payloadLen, AttributeContexts& ctxtMem, PCCPointSet3& pointCloud) { if (attr_aps.attr_encoding == AttributeEncoding::kRaw) { AttrRawDecoder::decode( attr_desc, attr_aps, abh, payload, payloadLen, pointCloud); return; } QpSet qpSet = deriveQpSet(attr_desc, attr_aps, abh); PCCResidualsDecoder decoder(abh, ctxtMem); decoder.start(sps, payload, payloadLen); // generate LoDs if necessary if (attr_aps.lodParametersPresent() && _lods.empty()) _lods.generate( attr_aps, abh, geom_num_points_minus1, minGeomNodeSizeLog2, pointCloud); if (attr_desc.attr_num_dimensions_minus1 == 0) { switch (attr_aps.attr_encoding) { case AttributeEncoding::kRAHTransform: decodeReflectancesRaht(attr_desc, attr_aps, qpSet, decoder, pointCloud); break; case AttributeEncoding::kPredictingTransform: decodeReflectancesPred( attr_desc, attr_aps, abh, qpSet, decoder, pointCloud); break; case AttributeEncoding::kLiftingTransform: decodeReflectancesLift( attr_desc, attr_aps, abh, qpSet, geom_num_points_minus1, minGeomNodeSizeLog2, decoder, pointCloud); break; case AttributeEncoding::kRaw: // Already handled break; } } else if (attr_desc.attr_num_dimensions_minus1 == 2) { switch (attr_aps.attr_encoding) { case AttributeEncoding::kRAHTransform: decodeColorsRaht(attr_desc, attr_aps, qpSet, decoder, pointCloud); break; case AttributeEncoding::kPredictingTransform: decodeColorsPred(attr_desc, attr_aps, abh, qpSet, decoder, pointCloud); break; case AttributeEncoding::kLiftingTransform: decodeColorsLift( attr_desc, attr_aps, abh, qpSet, geom_num_points_minus1, minGeomNodeSizeLog2, decoder, pointCloud); break; case AttributeEncoding::kRaw: // Already handled break; } } else { assert( attr_desc.attr_num_dimensions_minus1 == 0 || attr_desc.attr_num_dimensions_minus1 == 2); } decoder.stop(); // save the context state for re-use by a future slice if required ctxtMem = decoder.getCtx(); } //---------------------------------------------------------------------------- bool AttributeDecoder::isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const { return _lods.isReusable(aps, abh); } //---------------------------------------------------------------------------- void AttributeDecoder::decodePredModeRefl( const AttributeParameterSet& aps, int32_t& coeff, PCCPredictor& predictor) { int coeffAbs = abs(coeff); int coeffSign = coeff < 0 ? -1 : 1; int mode; int maxcand = aps.max_num_direct_predictors + !aps.direct_avg_predictor_disabled_flag; switch (maxcand) { case 4: mode = coeffAbs & 3; coeff = coeffSign * (coeffAbs >> 2); break; case 3: mode = coeffAbs & 1; coeffAbs >>= 1; if (mode > 0) { mode += coeffAbs & 1; coeffAbs >>= 1; } coeff = coeffSign * coeffAbs; break; case 2: mode = coeffAbs & 1; coeff = coeffSign * (coeffAbs >> 1); break; default: mode = 0; } predictor.predMode = mode + aps.direct_avg_predictor_disabled_flag; } //---------------------------------------------------------------------------- void AttributeDecoder::decodeReflectancesPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud) { const size_t pointCount = pointCloud.getPointCount(); const int64_t maxReflectance = (1ll << desc.bitdepth) - 1; int zeroRunRem = 0; int quantLayer = 0; std::vector<int64_t> quantWeights; computeQuantizationWeights( _lods.predictors, quantWeights, aps.quant_neigh_weight); for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } const uint32_t pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); auto& predictor = _lods.predictors[predictorIndex]; predictor.predMode = 0; if (--zeroRunRem < 0) zeroRunRem = decoder.decodeRunLength(); int32_t attValue0 = 0; if (!zeroRunRem) attValue0 = decoder.decode(); if (predModeEligibleRefl(desc, aps, pointCloud, _lods.indexes, predictor)) decodePredModeRefl(aps, attValue0, predictor); attr_t& reflectance = pointCloud.getReflectance(pointIndex); const int64_t quantPredAttValue = predictor.predictReflectance(pointCloud, _lods.indexes); int64_t qStep = quant[0].stepSize(); int64_t weight = std::min(quantWeights[predictorIndex], qStep) >> kFixedPointWeightShift; int64_t delta = divExp2RoundHalfUp(quant[0].scale(attValue0), kFixedPointAttributeShift); delta /= weight; const int64_t reconstructedQuantAttValue = quantPredAttValue + delta; reflectance = attr_t(PCCClip(reconstructedQuantAttValue, int64_t(0), maxReflectance)); } } //---------------------------------------------------------------------------- void AttributeDecoder::decodePredModeColor( const AttributeParameterSet& aps, Vec3<int32_t>& coeff, PCCPredictor& predictor) { int signk1 = coeff[1] < 0 ? -1 : 1; int signk2 = coeff[2] < 0 ? -1 : 1; int coeffAbsk1 = abs(coeff[1]); int coeffAbsk2 = abs(coeff[2]); int mode; int maxcand = aps.max_num_direct_predictors + !aps.direct_avg_predictor_disabled_flag; switch (maxcand) { int parityk1, parityk2; case 4: parityk1 = coeffAbsk1 & 1; parityk2 = coeffAbsk2 & 1; coeff[1] = signk1 * (coeffAbsk1 >> 1); coeff[2] = signk2 * (coeffAbsk2 >> 1); mode = (parityk1 << 1) + parityk2; break; case 3: parityk1 = coeffAbsk1 & 1; coeff[1] = signk1 * (coeffAbsk1 >> 1); mode = parityk1; if (parityk1) { parityk2 = coeffAbsk2 & 1; coeff[2] = signk2 * (coeffAbsk2 >> 1); mode += parityk2; } break; case 2: parityk1 = coeffAbsk1 & 1; coeff[1] = signk1 * (coeffAbsk1 >> 1); mode = parityk1; break; default: assert(maxcand >= 2); mode = 0; } predictor.predMode = mode + aps.direct_avg_predictor_disabled_flag; } //---------------------------------------------------------------------------- void AttributeDecoder::decodeColorsPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud) { const size_t pointCount = pointCloud.getPointCount(); int64_t clipMax = (1 << desc.bitdepth) - 1; Vec3<int32_t> values; bool icpPresent = abh.icpPresent(desc, aps); auto icpCoeff = icpPresent ? abh.icpCoeffs[0] : 0; int lod = 0; int zeroRunRem = 0; int quantLayer = 0; std::vector<int64_t> quantWeights; computeQuantizationWeights( _lods.predictors, quantWeights, aps.quant_neigh_weight); for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } const uint32_t pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); auto& predictor = _lods.predictors[predictorIndex]; predictor.predMode = 0; if (--zeroRunRem < 0) zeroRunRem = decoder.decodeRunLength(); if (zeroRunRem) values[0] = values[1] = values[2] = 0; else decoder.decode(&values[0]); if (predModeEligibleColor(desc, aps, pointCloud, _lods.indexes, predictor)) decodePredModeColor(aps, values, predictor); Vec3<attr_t>& color = pointCloud.getColor(pointIndex); const Vec3<attr_t> predictedColor = predictor.predictColor(pointCloud, _lods.indexes); if (icpPresent && predictorIndex == _lods.numPointsInLod[lod]) icpCoeff = abh.icpCoeffs[++lod]; int64_t residual0 = 0; for (int k = 0; k < 3; ++k) { const auto& q = quant[std::min(k, 1)]; int64_t qStep = q.stepSize(); int64_t weight = std::min(quantWeights[predictorIndex], qStep) >> kFixedPointWeightShift; int64_t residual = divExp2RoundHalfUp(q.scale(values[k]), kFixedPointAttributeShift); residual /= weight; const int64_t recon = predictedColor[k] + residual + ((icpCoeff[k] * residual0 + 2) >> 2); color[k] = attr_t(PCCClip(recon, int64_t(0), clipMax)); if (!k && aps.inter_component_prediction_enabled_flag) residual0 = residual; } } } //---------------------------------------------------------------------------- void AttributeDecoder::decodeReflectancesRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud) { const int voxelCount = int(pointCloud.getPointCount()); std::vector<MortonCodeWithIndex> packedVoxel(voxelCount); for (int n = 0; n < voxelCount; n++) { packedVoxel[n].mortonCode = mortonAddr(pointCloud[n]); packedVoxel[n].index = n; } sort(packedVoxel.begin(), packedVoxel.end()); // Morton codes std::vector<int64_t> mortonCode(voxelCount); for (int n = 0; n < voxelCount; n++) { mortonCode[n] = packedVoxel[n].mortonCode; } // Entropy decode const int attribCount = 1; std::vector<int> coefficients(attribCount * voxelCount); std::vector<Qps> pointQpOffsets(voxelCount); int zeroRunRem = 0; for (int n = 0; n < voxelCount; ++n) { if (--zeroRunRem < 0) zeroRunRem = decoder.decodeRunLength(); uint32_t value = 0; if (!zeroRunRem) value = decoder.decode(); coefficients[n] = value; pointQpOffsets[n] = qpSet.regionQpOffset(pointCloud[packedVoxel[n].index]); } std::vector<int> attributes(attribCount * voxelCount); const int rahtPredThreshold[2] = {aps.raht_prediction_threshold0, aps.raht_prediction_threshold1}; regionAdaptiveHierarchicalInverseTransform( aps.raht_prediction_enabled_flag, rahtPredThreshold, qpSet, pointQpOffsets.data(), mortonCode.data(), attributes.data(), attribCount, voxelCount, coefficients.data()); const int64_t maxReflectance = (1 << desc.bitdepth) - 1; const int64_t minReflectance = 0; for (int n = 0; n < voxelCount; n++) { int64_t val = attributes[attribCount * n]; const attr_t reflectance = attr_t(PCCClip(val, minReflectance, maxReflectance)); pointCloud.setReflectance(packedVoxel[n].index, reflectance); } } //---------------------------------------------------------------------------- void AttributeDecoder::decodeColorsRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud) { const int voxelCount = int(pointCloud.getPointCount()); std::vector<MortonCodeWithIndex> packedVoxel(voxelCount); for (int n = 0; n < voxelCount; n++) { packedVoxel[n].mortonCode = mortonAddr(pointCloud[n]); packedVoxel[n].index = n; } sort(packedVoxel.begin(), packedVoxel.end()); // Morton codes std::vector<int64_t> mortonCode(voxelCount); for (int n = 0; n < voxelCount; n++) { mortonCode[n] = packedVoxel[n].mortonCode; } // Entropy decode const int attribCount = 3; std::vector<int> coefficients(attribCount * voxelCount); std::vector<Qps> pointQpOffsets(voxelCount); int zeroRunRem = 0; for (int n = 0; n < voxelCount; ++n) { if (--zeroRunRem < 0) zeroRunRem = decoder.decodeRunLength(); int32_t values[3] = {}; if (!zeroRunRem) decoder.decode(values); for (int d = 0; d < attribCount; ++d) { coefficients[voxelCount * d + n] = values[d]; } pointQpOffsets[n] = qpSet.regionQpOffset(pointCloud[packedVoxel[n].index]); } std::vector<int> attributes(attribCount * voxelCount); const int rahtPredThreshold[2] = {aps.raht_prediction_threshold0, aps.raht_prediction_threshold1}; regionAdaptiveHierarchicalInverseTransform( aps.raht_prediction_enabled_flag, rahtPredThreshold, qpSet, pointQpOffsets.data(), mortonCode.data(), attributes.data(), attribCount, voxelCount, coefficients.data()); int clipMax = (1 << desc.bitdepth) - 1; for (int n = 0; n < voxelCount; n++) { const int r = attributes[attribCount * n]; const int g = attributes[attribCount * n + 1]; const int b = attributes[attribCount * n + 2]; Vec3<attr_t> color; color[0] = attr_t(PCCClip(r, 0, clipMax)); color[1] = attr_t(PCCClip(g, 0, clipMax)); color[2] = attr_t(PCCClip(b, 0, clipMax)); pointCloud.setColor(packedVoxel[n].index, color); } } //---------------------------------------------------------------------------- void AttributeDecoder::decodeColorsLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, int geom_num_points_minus1, int minGeomNodeSizeLog2, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud) { const size_t pointCount = pointCloud.getPointCount(); std::vector<uint64_t> weights; if (!aps.scalable_lifting_enabled_flag) { PCCComputeQuantizationWeights(_lods.predictors, weights); } else { computeQuantizationWeightsScalable( _lods.predictors, _lods.numPointsInLod, geom_num_points_minus1 + 1, minGeomNodeSizeLog2, weights); } const size_t lodCount = _lods.numPointsInLod.size(); std::vector<Vec3<int64_t>> colors; colors.resize(pointCount); // decompress // Per level-of-detail coefficients for last component prediction int lod = 0; int8_t lastCompPredCoeff = 0; if (aps.last_component_prediction_enabled_flag) lastCompPredCoeff = abh.attrLcpCoeffs[0]; int zeroRunRem = 0; int quantLayer = 0; for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } if (predictorIndex == _lods.numPointsInLod[lod]) { lod++; if (aps.last_component_prediction_enabled_flag) lastCompPredCoeff = abh.attrLcpCoeffs[lod]; } const uint32_t pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); if (--zeroRunRem < 0) zeroRunRem = decoder.decodeRunLength(); int32_t values[3] = {}; if (!zeroRunRem) decoder.decode(values); const int64_t iQuantWeight = irsqrt(weights[predictorIndex]); auto& color = colors[predictorIndex]; int64_t scaled = quant[0].scale(values[0]); color[0] = divExp2RoundHalfInf(scaled * iQuantWeight, 40); scaled = quant[1].scale(values[1]); color[1] = divExp2RoundHalfInf(scaled * iQuantWeight, 40); scaled *= lastCompPredCoeff; scaled >>= 2; scaled += quant[1].scale(values[2]); color[2] = divExp2RoundHalfInf(scaled * iQuantWeight, 40); } // reconstruct for (size_t lodIndex = 1; lodIndex < lodCount; ++lodIndex) { const size_t startIndex = _lods.numPointsInLod[lodIndex - 1]; const size_t endIndex = _lods.numPointsInLod[lodIndex]; PCCLiftUpdate( _lods.predictors, weights, startIndex, endIndex, false, colors); PCCLiftPredict(_lods.predictors, startIndex, endIndex, false, colors); } int64_t clipMax = (1 << desc.bitdepth) - 1; for (size_t f = 0; f < pointCount; ++f) { const auto color0 = divExp2RoundHalfInf(colors[f], kFixedPointAttributeShift); Vec3<attr_t> color; for (size_t d = 0; d < 3; ++d) { color[d] = attr_t(PCCClip(color0[d], int64_t(0), clipMax)); } pointCloud.setColor(_lods.indexes[f], color); } } //---------------------------------------------------------------------------- void AttributeDecoder::decodeReflectancesLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, int geom_num_points_minus1, int minGeomNodeSizeLog2, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud) { const size_t pointCount = pointCloud.getPointCount(); std::vector<uint64_t> weights; if (!aps.scalable_lifting_enabled_flag) { PCCComputeQuantizationWeights(_lods.predictors, weights); } else { computeQuantizationWeightsScalable( _lods.predictors, _lods.numPointsInLod, geom_num_points_minus1 + 1, minGeomNodeSizeLog2, weights); } const size_t lodCount = _lods.numPointsInLod.size(); std::vector<int64_t> reflectances; reflectances.resize(pointCount); // decompress int zeroRunRem = 0; int quantLayer = 0; for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } const uint32_t pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); if (--zeroRunRem < 0) zeroRunRem = decoder.decodeRunLength(); int64_t detail = 0; if (!zeroRunRem) detail = decoder.decode(); const int64_t iQuantWeight = irsqrt(weights[predictorIndex]); auto& reflectance = reflectances[predictorIndex]; const int64_t delta = detail; const int64_t reconstructedDelta = quant[0].scale(delta); reflectance = divExp2RoundHalfInf(reconstructedDelta * iQuantWeight, 40); } // reconstruct for (size_t lodIndex = 1; lodIndex < lodCount; ++lodIndex) { const size_t startIndex = _lods.numPointsInLod[lodIndex - 1]; const size_t endIndex = _lods.numPointsInLod[lodIndex]; PCCLiftUpdate( _lods.predictors, weights, startIndex, endIndex, false, reflectances); PCCLiftPredict( _lods.predictors, startIndex, endIndex, false, reflectances); } const int64_t maxReflectance = (1 << desc.bitdepth) - 1; for (size_t f = 0; f < pointCount; ++f) { const auto refl = divExp2RoundHalfInf(reflectances[f], kFixedPointAttributeShift); pointCloud.setReflectance( _lods.indexes[f], attr_t(PCCClip(refl, int64_t(0), maxReflectance))); } } //============================================================================ } /* namespace pcc */
24,671
29.801498
79
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/AttributeDecoder.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <stdint.h> #include "Attribute.h" #include "AttributeCommon.h" #include "PayloadBuffer.h" #include "PCCTMC3Common.h" #include "quantization.h" namespace pcc { //============================================================================ // Opaque definitions (Internal detail) class PCCResidualsDecoder; //============================================================================ class AttributeDecoder : public AttributeDecoderIntf { public: void decode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, int geom_num_points_minus1, int minGeomNodeSizeLog2, const char* payload, size_t payloadLen, AttributeContexts& ctxtMem, PCCPointSet3& pointCloud) override; bool isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const override; protected: // todo(df): consider alternative encapsulation void decodeReflectancesLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, int geom_num_points_minus1, int minGeomNodeSizeLog2, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud); void decodeColorsLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, int geom_num_points_minus1, int minGeomNodeSizeLog2, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud); void decodeReflectancesPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud); void decodeColorsPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud); void decodeReflectancesRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud); void decodeColorsRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCResidualsDecoder& decoder, PCCPointSet3& pointCloud); static void decodePredModeColor( const AttributeParameterSet& aps, Vec3<int32_t>& coeff, PCCPredictor& predictor); static void decodePredModeRefl( const AttributeParameterSet& aps, int32_t& coeff, PCCPredictor& predictor); private: AttributeLods _lods; }; //============================================================================ } /* namespace pcc */
4,638
31.900709
79
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/AttributeEncoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "AttributeEncoder.h" #include "DualLutCoder.h" #include "attribute_raw.h" #include "constants.h" #include "entropy.h" #include "io_hls.h" #include "quantization.h" #include "RAHT.h" #include "FixedPoint.h" #include <algorithm> // todo(df): promote to per-attribute encoder parameter static const double kAttrPredLambdaR = 0.01; static const double kAttrPredLambdaC = 0.14; namespace pcc { //============================================================================ // An encapsulation of the entropy coding methods used in attribute coding class PCCResidualsEncoder : protected AttributeContexts { public: PCCResidualsEncoder( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const AttributeContexts& ctxtMem); EntropyEncoder arithmeticEncoder; const AttributeContexts& getCtx() const { return *this; } void start(const SequenceParameterSet& sps, int numPoints); int stop(); void encodeRunLength(int runLength); void encodeSymbol(uint32_t value, int k1, int k2, int k3); void encode(int32_t value0, int32_t value1, int32_t value2); void encode(int32_t value); int availPredModes; double bitsPtColor(Vec3<int32_t> value, int parity); double bitsPtRefl(int32_t value, int parity); // Encoder side residual cost calculation const int scaleRes = 1 << 20; const int windowLog2 = 6; int probResGt0[3]; //prob of residuals larger than 0: 1 for each component int probResGt1[3]; //prob of residuals larger than 1: 1 for each component void resStatUpdateColor(Vec3<int32_t> values); void resStatUpdateRefl(int32_t values); void resStatReset(); }; //---------------------------------------------------------------------------- PCCResidualsEncoder::PCCResidualsEncoder( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const AttributeContexts& ctxtMem) : AttributeContexts(ctxtMem) { availPredModes = aps.max_num_direct_predictors + !aps.direct_avg_predictor_disabled_flag; resStatReset(); } //---------------------------------------------------------------------------- void PCCResidualsEncoder::start(const SequenceParameterSet& sps, int pointCount) { // todo(df): remove estimate when arithmetic codec is replaced int maxAcBufLen = pointCount * 3 * 2 + 1024; arithmeticEncoder.setBuffer(maxAcBufLen, nullptr); arithmeticEncoder.enableBypassStream(sps.cabac_bypass_stream_enabled_flag); arithmeticEncoder.start(); } //---------------------------------------------------------------------------- int PCCResidualsEncoder::stop() { return arithmeticEncoder.stop(); } //---------------------------------------------------------------------------- void PCCResidualsEncoder::resStatReset() { for (int k = 0; k < 3; k++) probResGt0[k] = probResGt1[k] = (scaleRes >> 1); } //---------------------------------------------------------------------------- void PCCResidualsEncoder::resStatUpdateColor(Vec3<int32_t> value) { for (int k = 0; k < 3; k++) { probResGt0[k] += value[k] ? (scaleRes - probResGt0[k]) >> windowLog2 : -((probResGt0[k]) >> windowLog2); if (value[k]) probResGt1[k] += abs(value[k]) > 1 ? (scaleRes - probResGt1[k]) >> windowLog2 : -((probResGt1[k]) >> windowLog2); } } //---------------------------------------------------------------------------- void PCCResidualsEncoder::resStatUpdateRefl(int32_t value) { probResGt0[0] += value ? (scaleRes - probResGt0[0]) >> windowLog2 : -(probResGt0[0] >> windowLog2); if (value) probResGt1[0] += abs(value) > 1 ? (scaleRes - probResGt1[0]) >> windowLog2 : -(probResGt1[0] >> windowLog2); } //---------------------------------------------------------------------------- double PCCResidualsEncoder::bitsPtColor(Vec3<int32_t> value, int mode) { if (availPredModes == 4) { value[1] = 2 * abs(value[1]) + (mode >> 1); value[2] = 2 * abs(value[2]) + (mode & 1); } else if (availPredModes == 3) { value[1] = 2 * abs(value[1]) + (mode > 0); if (mode > 0) value[2] = 2 * abs(value[2]) + (mode - 1); } else if (availPredModes == 2) { value[1] = 2 * abs(value[1]) + (mode & 1); } int log2scaleRes = ilog2(uint32_t(scaleRes)); double bits = 0; for (int k = 0; k < 3; k++) { bits += value[k] ? log2scaleRes - log2(probResGt0[k]) : log2scaleRes - log2(scaleRes - probResGt0[k]); //Gt0 int mag = abs(value[k]); if (mag) { bits += mag > 1 ? log2scaleRes - log2(probResGt1[k]) : log2scaleRes - log2(scaleRes - probResGt1[k]); //Gt1 bits += 1; //sign bit. if (mag > 1) bits += 2.0 * log2(mag - 1.0) + 1.0; //EG0 approximation. } } return bits; } //---------------------------------------------------------------------------- double PCCResidualsEncoder::bitsPtRefl(int32_t value, int mode) { if (availPredModes == 4) { value = (abs(value) << 2) + (mode); } else if (availPredModes == 3) { if (mode > 0) value = (abs(value) << 1) + (mode - 1); value = (abs(value) << 1) + (mode > 0); } else if (availPredModes == 2) { value = (abs(value) << 1) + (mode & 1); } int log2scaleRes = ilog2((uint32_t)scaleRes); double bits = 0; bits += value ? log2scaleRes - log2(probResGt0[0]) : log2scaleRes - log2(scaleRes - probResGt0[0]); //Gt0 int mag = abs(value); if (mag) { bits += mag > 1 ? log2scaleRes - log2(probResGt1[0]) : log2scaleRes - log2(scaleRes - probResGt1[0]); //Gt1 bits += 1; //sign bit. if (mag > 1) bits += 2.0 * log2(mag - 1.0) + 1.0; //EG0 approximation. } return bits; } //---------------------------------------------------------------------------- void PCCResidualsEncoder::encodeRunLength(int runLength) { auto* ctx = ctxRunLen; for (int i = 0; i < std::min(3, runLength); i++, ctx++) arithmeticEncoder.encode(1, *ctx); if (runLength < 3) { arithmeticEncoder.encode(0, *ctx); return; } runLength -= 3; auto prefix = runLength >> 1; for (int i = 0; i < std::min(4, prefix); i++) arithmeticEncoder.encode(1, *ctx); if (runLength < 8) { arithmeticEncoder.encode(0, *ctx); arithmeticEncoder.encode(runLength & 1); return; } runLength -= 8; arithmeticEncoder.encodeExpGolomb(runLength, 2, *++ctx); } //---------------------------------------------------------------------------- void PCCResidualsEncoder::encodeSymbol(uint32_t value, int k1, int k2, int k3) { arithmeticEncoder.encode(value > 0, ctxCoeffGtN[0][k1]); if (!value) return; arithmeticEncoder.encode(--value > 0, ctxCoeffGtN[1][k2]); if (!value) return; arithmeticEncoder.encodeExpGolomb( --value, 1, ctxCoeffRemPrefix[k3], ctxCoeffRemSuffix[k3]); } //---------------------------------------------------------------------------- void PCCResidualsEncoder::encode(int32_t value0, int32_t value1, int32_t value2) { int mag0 = abs(value0); int mag1 = abs(value1); int mag2 = abs(value2); int b0 = (mag1 == 0); int b1 = (mag1 <= 1); int b2 = (mag2 == 0); int b3 = (mag2 <= 1); encodeSymbol(mag1, 0, 0, 1); encodeSymbol(mag2, 1 + b0, 1 + b1, 1); auto mag0minusX = b0 && b2 ? mag0 - 1 : mag0; assert(mag0minusX >= 0); encodeSymbol(mag0minusX, 3 + (b0 << 1) + b2, 3 + (b1 << 1) + b3, 0); if (mag0) arithmeticEncoder.encode(value0 < 0); if (mag1) arithmeticEncoder.encode(value1 < 0); if (mag2) arithmeticEncoder.encode(value2 < 0); } //---------------------------------------------------------------------------- void PCCResidualsEncoder::encode(int32_t value) { int mag = abs(value) - 1; encodeSymbol(mag, 0, 0, 0); arithmeticEncoder.encode(value < 0); } //============================================================================ // An encapsulation of the entropy coding methods used in attribute coding struct PCCResidualsEntropyEstimator { size_t freq0[kAttributeResidualAlphabetSize + 1]; size_t freq1[kAttributeResidualAlphabetSize + 1]; size_t symbolCount0; size_t symbolCount1; size_t isZero0Count; size_t isZero1Count; PCCResidualsEntropyEstimator() { init(); } void init(); double bitsDetail( const uint32_t detail, const size_t symbolCount, const size_t* const freq) const; double bits(const uint32_t value0) const; void update(const uint32_t value0); double bits( const uint32_t value0, const uint32_t value1, const uint32_t value2) const; void update(const uint32_t value0, const uint32_t value1, const uint32_t value2); }; //---------------------------------------------------------------------------- void PCCResidualsEntropyEstimator::init() { for (size_t i = 0; i <= kAttributeResidualAlphabetSize; ++i) { freq0[i] = 1; freq1[i] = 1; } symbolCount0 = kAttributeResidualAlphabetSize + 1; symbolCount1 = kAttributeResidualAlphabetSize + 1; isZero1Count = isZero0Count = symbolCount0 / 2; } //---------------------------------------------------------------------------- double PCCResidualsEntropyEstimator::bitsDetail( const uint32_t detail, const size_t symbolCount, const size_t* const freq) const { const uint32_t detailClipped = std::min(detail, uint32_t(kAttributeResidualAlphabetSize)); const double pDetail = PCCClip(double(freq[detailClipped]) / symbolCount, 0.001, 0.999); double bits = -log2(pDetail); if (detail >= kAttributeResidualAlphabetSize) { const double x = double(detail) - double(kAttributeResidualAlphabetSize); bits += 2.0 * std::floor(log2(x + 1.0)) + 1.0; } return bits; } //---------------------------------------------------------------------------- double PCCResidualsEntropyEstimator::bits(const uint32_t value0) const { const bool isZero0 = value0 == 0; const double pIsZero0 = isZero0 ? double(isZero0Count) / symbolCount0 : double(symbolCount0 - isZero0Count) / symbolCount0; double bits = -log2(PCCClip(pIsZero0, 0.001, 0.999)); if (!isZero0) { bits += bitsDetail(value0 - 1, symbolCount0, freq0); } return bits; } //---------------------------------------------------------------------------- void PCCResidualsEntropyEstimator::update(const uint32_t value0) { const bool isZero0 = value0 == 0; ++symbolCount0; if (!isZero0) { ++freq0[std::min(value0 - 1, uint32_t(kAttributeResidualAlphabetSize))]; } else { ++isZero0Count; } } //---------------------------------------------------------------------------- double PCCResidualsEntropyEstimator::bits( const uint32_t value0, const uint32_t value1, const uint32_t value2) const { const bool isZero0 = value0 == 0; const double pIsZero0 = isZero0 ? double(isZero0Count) / symbolCount0 : double(symbolCount0 - isZero0Count) / symbolCount0; double bits = -log2(PCCClip(pIsZero0, 0.001, 0.999)); if (!isZero0) { bits += bitsDetail(value0 - 1, symbolCount0, freq0); } const bool isZero1 = value1 == 0 && value2 == 0; const double pIsZero1 = isZero1 ? double(isZero1Count) / symbolCount0 : double(symbolCount0 - isZero1Count) / symbolCount0; bits -= log2(PCCClip(pIsZero1, 0.001, 0.999)); if (!isZero1) { bits += bitsDetail(value1, symbolCount1, freq1); bits += bitsDetail(value2, symbolCount1, freq1); } return bits; } //---------------------------------------------------------------------------- void PCCResidualsEntropyEstimator::update( const uint32_t value0, const uint32_t value1, const uint32_t value2) { const bool isZero0 = value0 == 0; ++symbolCount0; if (!isZero0) { ++freq0[std::min(value0 - 1, uint32_t(kAttributeResidualAlphabetSize))]; } else { ++isZero0Count; } const bool isZero1 = value1 == 0 && value2 == 0; symbolCount1 += 2; if (!isZero1) { ++freq1[std::min(value1, uint32_t(kAttributeResidualAlphabetSize))]; ++freq1[std::min(value2, uint32_t(kAttributeResidualAlphabetSize))]; } else { ++isZero1Count; } } //============================================================================ // AttributeEncoderIntf AttributeEncoderIntf::~AttributeEncoderIntf() = default; //============================================================================ // AttributeEncoder factory std::unique_ptr<AttributeEncoderIntf> makeAttributeEncoder() { return std::unique_ptr<AttributeEncoder>(new AttributeEncoder()); } //============================================================================ // AttributeEncoder Members void AttributeEncoder::encode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& attr_aps, AttributeBrickHeader& abh, AttributeContexts& ctxtMem, PCCPointSet3& pointCloud, PayloadBuffer* payload) { if (attr_aps.attr_encoding == AttributeEncoding::kRaw) { AttrRawEncoder::encode(sps, desc, attr_aps, abh, pointCloud, payload); return; } // Encoders are able to modify the slice header: _abh = &abh; QpSet qpSet = deriveQpSet(desc, attr_aps, abh); // generate LoDs if necessary if (attr_aps.lodParametersPresent() && _lods.empty()) _lods.generate( attr_aps, abh, pointCloud.getPointCount() - 1, 0, pointCloud); PCCResidualsEncoder encoder(attr_aps, abh, ctxtMem); encoder.start(sps, int(pointCloud.getPointCount())); if (desc.attr_num_dimensions_minus1 == 0) { switch (attr_aps.attr_encoding) { case AttributeEncoding::kRAHTransform: encodeReflectancesTransformRaht( desc, attr_aps, qpSet, pointCloud, encoder); break; case AttributeEncoding::kPredictingTransform: encodeReflectancesPred(desc, attr_aps, qpSet, pointCloud, encoder); break; case AttributeEncoding::kLiftingTransform: encodeReflectancesLift(desc, attr_aps, qpSet, pointCloud, encoder); break; case AttributeEncoding::kRaw: // Already handled break; } } else if (desc.attr_num_dimensions_minus1 == 2) { switch (attr_aps.attr_encoding) { case AttributeEncoding::kRAHTransform: encodeColorsTransformRaht(desc, attr_aps, qpSet, pointCloud, encoder); break; case AttributeEncoding::kPredictingTransform: encodeColorsPred(desc, attr_aps, qpSet, pointCloud, encoder); break; case AttributeEncoding::kLiftingTransform: encodeColorsLift(desc, attr_aps, qpSet, pointCloud, encoder); break; case AttributeEncoding::kRaw: // Already handled break; } } else { assert( desc.attr_num_dimensions_minus1 == 0 || desc.attr_num_dimensions_minus1 == 2); } uint32_t acDataLen = encoder.stop(); // write abh write(sps, attr_aps, abh, payload); _abh = nullptr; std::copy_n( encoder.arithmeticEncoder.buffer(), acDataLen, std::back_inserter(*payload)); // save the context state for re-use by a future slice if required ctxtMem = encoder.getCtx(); } //---------------------------------------------------------------------------- bool AttributeEncoder::isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const { return _lods.isReusable(aps, abh); } //---------------------------------------------------------------------------- int64_t AttributeEncoder::computeReflectanceResidual( const uint64_t reflectance, const uint64_t predictedReflectance, const Quantizer& quant) { const int64_t quantAttValue = reflectance; const int64_t quantPredAttValue = predictedReflectance; const int64_t delta = quant.quantize( (quantAttValue - quantPredAttValue) << kFixedPointAttributeShift); return delta; } //---------------------------------------------------------------------------- void AttributeEncoder::decidePredModeRefl( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexesLOD, const uint32_t predictorIndex, PCCPredictor& predictor, PCCResidualsEncoder& encoder, PCCResidualsEntropyEstimator& context, const Quantizer& quant) { uint64_t attrValue = pointCloud.getReflectance(indexesLOD[predictorIndex]); // base case: start with the first neighbour // NB: skip evaluation of mode 0 (weighted average of n neighbours) int startpredIndex = aps.direct_avg_predictor_disabled_flag; predictor.predMode = startpredIndex; uint64_t attrPred = predictor.predictReflectance(pointCloud, indexesLOD); int64_t attrResidualQuant = computeReflectanceResidual(attrValue, attrPred, quant); // NB: idxBits is not included in the score int mode = predictor.predMode - aps.direct_avg_predictor_disabled_flag; int64_t best_score = encoder.bitsPtRefl(attrResidualQuant, mode); for (int i = startpredIndex; i < predictor.neighborCount; i++) { if (i == aps.max_num_direct_predictors) break; attrPred = pointCloud.getReflectance( indexesLOD[predictor.neighbors[i].predictorIndex]); attrResidualQuant = computeReflectanceResidual(attrValue, attrPred, quant); mode = i + !aps.direct_avg_predictor_disabled_flag; int64_t score = encoder.bitsPtRefl(attrResidualQuant, mode); if (score < best_score) { best_score = score; predictor.predMode = i + 1; // NB: setting predictor.neighborCount = 1 will cause issues // with reconstruction. } } } //---------------------------------------------------------------------------- void AttributeEncoder::encodePredModeRefl( const AttributeParameterSet& aps, int predMode, int32_t& coeff) { int coeffSign = coeff < 0 ? -1 : 1; int coeffAbs = abs(coeff); int mode = predMode - aps.direct_avg_predictor_disabled_flag; int maxcand = aps.max_num_direct_predictors + !aps.direct_avg_predictor_disabled_flag; switch (maxcand) { case 4: coeff = coeffSign * ((coeffAbs << 2) + mode); break; case 3: if (mode > 0) coeffAbs = ((coeffAbs << 1) + (mode - 1)); coeffAbs = ((coeffAbs << 1) + (mode > 0)); coeff = coeffSign * coeffAbs; break; case 2: coeff = coeffSign * ((coeffAbs << 1) + mode); break; default: assert(mode == 0); } } //---------------------------------------------------------------------------- void AttributeEncoder::encodeReflectancesPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder) { const uint32_t pointCount = pointCloud.getPointCount(); const int64_t clipMax = (1ll << desc.bitdepth) - 1; PCCResidualsEntropyEstimator context; int zeroRunAcc = 0; std::vector<int> zerorun; zerorun.reserve(pointCount); std::vector<uint32_t> residual; residual.resize(pointCount); int quantLayer = 0; std::vector<int64_t> quantWeights; computeQuantizationWeights( _lods.predictors, quantWeights, aps.quant_neigh_weight); for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } const uint32_t pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); auto& predictor = _lods.predictors[predictorIndex]; predictor.predMode = 0; bool predModeEligible = predModeEligibleRefl(desc, aps, pointCloud, _lods.indexes, predictor); if (predModeEligible) decidePredModeRefl( desc, aps, pointCloud, _lods.indexes, predictorIndex, predictor, encoder, context, quant[0]); const uint64_t reflectance = pointCloud.getReflectance(pointIndex); const attr_t predictedReflectance = predictor.predictReflectance(pointCloud, _lods.indexes); const int64_t quantAttValue = reflectance; const int64_t quantPredAttValue = predictedReflectance; int64_t qStep = quant[0].stepSize(); int64_t weight = std::min(quantWeights[predictorIndex], qStep) >> kFixedPointWeightShift; const int64_t delta = quant[0].quantize( ((quantAttValue - quantPredAttValue) * weight) << kFixedPointAttributeShift); int32_t attValue0 = delta; int64_t reconstructedDelta = divExp2RoundHalfUp(quant[0].scale(delta), kFixedPointAttributeShift); reconstructedDelta /= weight; if (predModeEligible) encodePredModeRefl(aps, predictor.predMode, attValue0); const int64_t reconstructedQuantAttValue = quantPredAttValue + reconstructedDelta; const attr_t reconstructedReflectance = attr_t(PCCClip(reconstructedQuantAttValue, int64_t(0), clipMax)); if (!attValue0) ++zeroRunAcc; else { zerorun.push_back(zeroRunAcc); zeroRunAcc = 0; } residual[predictorIndex] = attValue0; pointCloud.setReflectance(pointIndex, reconstructedReflectance); encoder.resStatUpdateRefl(attValue0); } if (zeroRunAcc) zerorun.push_back(zeroRunAcc); int runIdx = 0; int zeroRunRem = 0; for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (--zeroRunRem < 0) { zeroRunRem = zerorun[runIdx++]; encoder.encodeRunLength(zeroRunRem); } if (!zeroRunRem) encoder.encode(residual[predictorIndex]); } } //---------------------------------------------------------------------------- Vec3<int64_t> AttributeEncoder::computeColorResiduals( const AttributeParameterSet& aps, const Vec3<attr_t> color, const Vec3<attr_t> predictedColor, const Vec3<int8_t> icpCoeff, const Quantizers& quant) { Vec3<int64_t> residuals; const int64_t quantAttValue = color[0]; const int64_t quantPredAttValue = predictedColor[0]; const int64_t delta = quant[0].quantize( (quantAttValue - quantPredAttValue) << kFixedPointAttributeShift); residuals[0] = delta; const int64_t residual0 = divExp2RoundHalfUp(quant[0].scale(delta), kFixedPointAttributeShift); for (size_t k = 1; k < 3; ++k) { const int64_t quantAttValue = color[k]; const int64_t quantPredAttValue = predictedColor[k]; if (aps.inter_component_prediction_enabled_flag) { auto err = quantAttValue - quantPredAttValue - ((icpCoeff[k] * residual0 + 2) >> 2); auto delta = quant[1].quantize(err << kFixedPointAttributeShift); residuals[k] = delta; } else { const int64_t delta = quant[1].quantize( (quantAttValue - quantPredAttValue) << kFixedPointAttributeShift); residuals[k] = delta; } } return residuals; } //---------------------------------------------------------------------------- void AttributeEncoder::decidePredModeColor( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexesLOD, const uint32_t predictorIndex, PCCPredictor& predictor, PCCResidualsEncoder& encoder, PCCResidualsEntropyEstimator& context, const Vec3<int8_t>& icpCoeff, const Quantizers& quant) { Vec3<attr_t> attrValue = pointCloud.getColor(indexesLOD[predictorIndex]); // base case: weighted average of n neighbours int startpredIndex = aps.direct_avg_predictor_disabled_flag; predictor.predMode = startpredIndex; Vec3<attr_t> attrPred = predictor.predictColor(pointCloud, indexesLOD); Vec3<int64_t> attrResidualQuant = computeColorResiduals(aps, attrValue, attrPred, icpCoeff, quant); auto attrDistortion = computeColorDistortions(desc, attrValue, attrPred, quant); double rate = encoder.bitsPtColor(attrResidualQuant, 0); double best_score = attrDistortion + rate * kAttrPredLambdaC * (quant[0].stepSize() >> kFixedPointAttributeShift); for (int i = startpredIndex; i < predictor.neighborCount; i++) { if (i == aps.max_num_direct_predictors) break; attrPred = pointCloud.getColor(indexesLOD[predictor.neighbors[i].predictorIndex]); attrResidualQuant = computeColorResiduals(aps, attrValue, attrPred, icpCoeff, quant); attrDistortion = computeColorDistortions(desc, attrValue, attrPred, quant); int sigIdx = i + !aps.direct_avg_predictor_disabled_flag; double rate = encoder.bitsPtColor(attrResidualQuant, sigIdx); double score = attrDistortion + rate * kAttrPredLambdaC * (quant[0].stepSize() >> kFixedPointAttributeShift); if (score < best_score) { best_score = score; predictor.predMode = i + 1; // NB: setting predictor.neighborCount = 1 will cause issues // with reconstruction. } } } //---------------------------------------------------------------------------- void AttributeEncoder::encodePredModeColor( const AttributeParameterSet& aps, int predMode, Vec3<int32_t>& coeff) { int signk1 = coeff[1] < 0 ? -1 : 1; int signk2 = coeff[2] < 0 ? -1 : 1; int coeffAbsk1 = abs(coeff[1]); int coeffAbsk2 = abs(coeff[2]); int mode = predMode - aps.direct_avg_predictor_disabled_flag; int maxcand = aps.max_num_direct_predictors + !aps.direct_avg_predictor_disabled_flag; assert(mode < maxcand); switch (maxcand) { int parityk1, parityk2; case 4: parityk1 = mode >> 1; parityk2 = mode & 1; coeff[1] = signk1 * ((coeffAbsk1 << 1) + parityk1); coeff[2] = signk2 * ((coeffAbsk2 << 1) + parityk2); break; case 3: parityk1 = mode ? 1 : 0; coeff[1] = signk1 * ((coeffAbsk1 << 1) + parityk1); if (parityk1) { parityk2 = mode - parityk1; coeff[2] = signk2 * ((coeffAbsk2 << 1) + parityk2); } break; case 2: parityk1 = mode; coeff[1] = signk1 * ((coeffAbsk1 << 1) + parityk1); break; default: assert(mode == 0); } } //---------------------------------------------------------------------------- std::vector<Vec3<int8_t>> AttributeEncoder::computeInterComponentPredictionCoeffs( const AttributeParameterSet& aps, const PCCPointSet3& pointCloud) { int maxNumDetailLevels = aps.maxNumDetailLevels(); assert(_lods.numPointsInLod.size() <= maxNumDetailLevels); // Two secondary colour components (positive sign set) // NB: k=0 is never used std::vector<Vec3<int8_t>> signs(maxNumDetailLevels, {0, 1, 1}); // Estimate residual using original neighbour as predictor const size_t pointCount = pointCloud.getPointCount(); std::vector<Vec3<int32_t>> residual(pointCount); for (size_t predIdx = 0; predIdx < pointCount; ++predIdx) { const auto pointIdx = _lods.indexes[predIdx]; auto& predictor = _lods.predictors[predIdx]; // taking first neighbor for simplicity predictor.predMode = 1; auto predAttr = predictor.predictColor(pointCloud, _lods.indexes); auto srcAttr = pointCloud.getColor(pointIdx); residual[predIdx] = Vec3<int>(srcAttr) - Vec3<int>(predAttr); // reset is needed as RD would be done later. predictor.predMode = 0; } const int nWeights = 8; const int nShift = 2; // from log2(nWeights >> 1) std::vector<Vec3<int64_t>> sumPredCoeff(nWeights, 0); Vec3<int64_t> sumOrigCoeff = 0; int lod = 0; for (size_t predIdx = 0; predIdx < pointCount; ++predIdx) { Vec3<int32_t> resid = residual[predIdx]; for (int w = 0; w < nWeights; w++) { for (int k = 1; k < 3; k++) sumPredCoeff[w][k] += abs(resid[k] - signs[lod][k] * (((w + 1) * resid[0] + 2) >> nShift)); } for (int k = 1; k < 3; k++) sumOrigCoeff[k] += abs(resid[k]); // at LoD transition, determine the sign coeff if (predIdx != _lods.numPointsInLod[lod] - 1) continue; // find the best weight for (int k = 1; k < 3; k++) { auto best = std::min_element( sumPredCoeff.begin(), sumPredCoeff.end(), [=](Vec3<int64_t>& a, Vec3<int64_t>& b) { return a[k] < b[k]; }); int coeff = 1 + std::distance(sumPredCoeff.begin(), best); signs[lod][k] *= coeff; assert(signs[lod][k] < nWeights + 1 && signs[lod][k] > -(nWeights + 1)); if ((*best)[k] > sumOrigCoeff[k]) signs[lod][k] = 0; } for (int w = 0; w < nWeights; w++) sumPredCoeff[w] = 0; sumOrigCoeff = 0; lod++; } // NB: there may be more coefficients than actual detail levels // Set any unused detail level coefficients to 0 for (; lod < maxNumDetailLevels; lod++) signs[lod] = 0; return signs; } //---------------------------------------------------------------------------- void AttributeEncoder::encodeColorsPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder) { const size_t pointCount = pointCloud.getPointCount(); int64_t clipMax = (1 << desc.bitdepth) - 1; Vec3<int32_t> values; PCCResidualsEntropyEstimator context; int zeroRunAcc = 0; std::vector<int> zerorun; std::vector<int32_t> residual[3]; for (int i = 0; i < 3; i++) { residual[i].resize(pointCount); } bool icpPresent = _abh->icpPresent(desc, aps); if (icpPresent) _abh->icpCoeffs = computeInterComponentPredictionCoeffs(aps, pointCloud); auto icpCoeff = icpPresent ? _abh->icpCoeffs[0] : 0; int lod = 0; int quantLayer = 0; std::vector<int64_t> quantWeights; computeQuantizationWeights( _lods.predictors, quantWeights, aps.quant_neigh_weight); for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } if (icpPresent && predictorIndex == _lods.numPointsInLod[lod]) icpCoeff = _abh->icpCoeffs[++lod]; const auto pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); auto& predictor = _lods.predictors[predictorIndex]; predictor.predMode = 0; bool predModeEligible = predModeEligibleColor(desc, aps, pointCloud, _lods.indexes, predictor); if (predModeEligible) decidePredModeColor( desc, aps, pointCloud, _lods.indexes, predictorIndex, predictor, encoder, context, icpCoeff, quant); const Vec3<attr_t> color = pointCloud.getColor(pointIndex); const Vec3<attr_t> predictedColor = predictor.predictColor(pointCloud, _lods.indexes); Vec3<attr_t> reconstructedColor; int64_t residual0 = 0; for (int k = 0; k < 3; ++k) { const auto& q = quant[std::min(k, 1)]; int64_t residual = color[k] - predictedColor[k]; int64_t qStep = q.stepSize(); int64_t weight = std::min(quantWeights[predictorIndex], qStep) >> kFixedPointWeightShift; int64_t residualQ = q.quantize((residual * weight) << kFixedPointAttributeShift); int64_t residualR = divExp2RoundHalfUp(q.scale(residualQ), kFixedPointAttributeShift); residualR /= weight; if (aps.inter_component_prediction_enabled_flag && k > 0) { residual = residual - ((icpCoeff[k] * residual0 + 2) >> 2); residualQ = q.quantize((residual * weight) << kFixedPointAttributeShift); residualR = divExp2RoundHalfUp(q.scale(residualQ), kFixedPointAttributeShift); residualR /= weight; residualR += ((icpCoeff[k] * residual0 + 2) >> 2); } if (k == 0) residual0 = residualR; values[k] = residualQ; int64_t recon = predictedColor[k] + residualR; reconstructedColor[k] = attr_t(PCCClip(recon, int64_t(0), clipMax)); } if (predModeEligible) encodePredModeColor(aps, predictor.predMode, values); pointCloud.setColor(pointIndex, reconstructedColor); encoder.resStatUpdateColor(values); if (!values[0] && !values[1] && !values[2]) { ++zeroRunAcc; } else { zerorun.push_back(zeroRunAcc); zeroRunAcc = 0; } for (int i = 0; i < 3; i++) { residual[i][predictorIndex] = values[i]; } } if (zeroRunAcc) zerorun.push_back(zeroRunAcc); int runIdx = 0; int zeroRunRem = 0; for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (--zeroRunRem < 0) { zeroRunRem = zerorun[runIdx++]; encoder.encodeRunLength(zeroRunRem); } if (!zeroRunRem) { for (size_t k = 0; k < 3; k++) values[k] = residual[k][predictorIndex]; encoder.encode(values[0], values[1], values[2]); } } } //---------------------------------------------------------------------------- void AttributeEncoder::encodeReflectancesTransformRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder) { const int voxelCount = int(pointCloud.getPointCount()); std::vector<MortonCodeWithIndex> packedVoxel(voxelCount); for (int n = 0; n < voxelCount; n++) { packedVoxel[n].mortonCode = mortonAddr(pointCloud[n]); packedVoxel[n].index = n; } sort(packedVoxel.begin(), packedVoxel.end()); // Allocate arrays. const int attribCount = 1; std::vector<int64_t> mortonCode(voxelCount); std::vector<int> attributes(attribCount * voxelCount); std::vector<int> coefficients(attribCount * voxelCount); std::vector<Qps> pointQpOffsets(voxelCount); // Populate input arrays. for (int n = 0; n < voxelCount; n++) { mortonCode[n] = packedVoxel[n].mortonCode; const auto reflectance = pointCloud.getReflectance(packedVoxel[n].index); attributes[attribCount * n] = reflectance; pointQpOffsets[n] = qpSet.regionQpOffset(pointCloud[packedVoxel[n].index]); } const int rahtPredThreshold[2] = {aps.raht_prediction_threshold0, aps.raht_prediction_threshold1}; // Transform. regionAdaptiveHierarchicalTransform( aps.raht_prediction_enabled_flag, rahtPredThreshold, qpSet, pointQpOffsets.data(), mortonCode.data(), attributes.data(), attribCount, voxelCount, coefficients.data()); // Entropy encode. int zeroRun = 0; for (int n = 0; n < voxelCount; ++n) { auto value = coefficients[n]; if (!value) ++zeroRun; else { encoder.encodeRunLength(zeroRun); encoder.encode(value); zeroRun = 0; } } if (zeroRun) encoder.encodeRunLength(zeroRun); const int64_t maxReflectance = (1 << desc.bitdepth) - 1; const int64_t minReflectance = 0; for (int n = 0; n < voxelCount; n++) { int64_t val = attributes[attribCount * n]; const attr_t reflectance = attr_t(PCCClip(val, minReflectance, maxReflectance)); pointCloud.setReflectance(packedVoxel[n].index, reflectance); } } //---------------------------------------------------------------------------- void AttributeEncoder::encodeColorsTransformRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder) { const int voxelCount = int(pointCloud.getPointCount()); std::vector<MortonCodeWithIndex> packedVoxel(voxelCount); for (int n = 0; n < voxelCount; n++) { packedVoxel[n].mortonCode = mortonAddr(pointCloud[n]); packedVoxel[n].index = n; } sort(packedVoxel.begin(), packedVoxel.end()); // Allocate arrays. const int attribCount = 3; std::vector<int64_t> mortonCode(voxelCount); std::vector<int> attributes(attribCount * voxelCount); std::vector<int> coefficients(attribCount * voxelCount); std::vector<Qps> pointQpOffsets(voxelCount); // Populate input arrays. for (int n = 0; n < voxelCount; n++) { mortonCode[n] = packedVoxel[n].mortonCode; const auto color = pointCloud.getColor(packedVoxel[n].index); attributes[attribCount * n] = color[0]; attributes[attribCount * n + 1] = color[1]; attributes[attribCount * n + 2] = color[2]; pointQpOffsets[n] = qpSet.regionQpOffset(pointCloud[packedVoxel[n].index]); } const int rahtPredThreshold[2] = {aps.raht_prediction_threshold0, aps.raht_prediction_threshold1}; // Transform. regionAdaptiveHierarchicalTransform( aps.raht_prediction_enabled_flag, rahtPredThreshold, qpSet, pointQpOffsets.data(), mortonCode.data(), attributes.data(), attribCount, voxelCount, coefficients.data()); // Entropy encode. int values[attribCount]; int zeroRun = 0; for (int n = 0; n < voxelCount; ++n) { for (int d = 0; d < attribCount; ++d) { values[d] = coefficients[voxelCount * d + n]; } if (!values[0] && !values[1] && !values[2]) ++zeroRun; else { encoder.encodeRunLength(zeroRun); encoder.encode(values[0], values[1], values[2]); zeroRun = 0; } } if (zeroRun) encoder.encodeRunLength(zeroRun); int clipMax = (1 << desc.bitdepth) - 1; for (int n = 0; n < voxelCount; n++) { const int r = attributes[attribCount * n]; const int g = attributes[attribCount * n + 1]; const int b = attributes[attribCount * n + 2]; Vec3<attr_t> color; color[0] = attr_t(PCCClip(r, 0, clipMax)); color[1] = attr_t(PCCClip(g, 0, clipMax)); color[2] = attr_t(PCCClip(b, 0, clipMax)); pointCloud.setColor(packedVoxel[n].index, color); } } //---------------------------------------------------------------------------- void AttributeEncoder::encodeColorsLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder) { const size_t pointCount = pointCloud.getPointCount(); std::vector<uint64_t> weights; if (!aps.scalable_lifting_enabled_flag) { PCCComputeQuantizationWeights(_lods.predictors, weights); } else { computeQuantizationWeightsScalable( _lods.predictors, _lods.numPointsInLod, pointCount, 0, weights); } const size_t lodCount = _lods.numPointsInLod.size(); std::vector<Vec3<int64_t>> colors; colors.resize(pointCount); for (size_t index = 0; index < pointCount; ++index) { const auto& color = pointCloud.getColor(_lods.indexes[index]); for (size_t d = 0; d < 3; ++d) { colors[index][d] = int32_t(color[d]) << kFixedPointAttributeShift; } } for (size_t i = 0; (i + 1) < lodCount; ++i) { const size_t lodIndex = lodCount - i - 1; const size_t startIndex = _lods.numPointsInLod[lodIndex - 1]; const size_t endIndex = _lods.numPointsInLod[lodIndex]; PCCLiftPredict(_lods.predictors, startIndex, endIndex, true, colors); PCCLiftUpdate( _lods.predictors, weights, startIndex, endIndex, true, colors); } // Per level-of-detail coefficients for last component prediction int8_t lastCompPredCoeff = 0; if (aps.last_component_prediction_enabled_flag) { _abh->attrLcpCoeffs = computeLastComponentPredictionCoeff(aps, colors); lastCompPredCoeff = _abh->attrLcpCoeffs[0]; } int zeroRun = 0; int quantLayer = 0; int lod = 0; for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } if (predictorIndex == _lods.numPointsInLod[lod]) { lod++; if (aps.last_component_prediction_enabled_flag) lastCompPredCoeff = _abh->attrLcpCoeffs[lod]; } const auto pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); const int64_t iQuantWeight = irsqrt(weights[predictorIndex]); const int64_t quantWeight = (weights[predictorIndex] * iQuantWeight + (1ull << 39)) >> 40; auto& color = colors[predictorIndex]; int values[3]; values[0] = quant[0].quantize(color[0] * quantWeight); int64_t scaled = quant[0].scale(values[0]); color[0] = divExp2RoundHalfInf(scaled * iQuantWeight, 40); values[1] = quant[1].quantize(color[1] * quantWeight); scaled = quant[1].scale(values[1]); color[1] = divExp2RoundHalfInf(scaled * iQuantWeight, 40); color[2] -= (lastCompPredCoeff * color[1]) >> 2; scaled *= lastCompPredCoeff; scaled >>= 2; values[2] = quant[1].quantize(color[2] * quantWeight); scaled += quant[1].scale(values[2]); color[2] = divExp2RoundHalfInf(scaled * iQuantWeight, 40); if (!values[0] && !values[1] && !values[2]) ++zeroRun; else { encoder.encodeRunLength(zeroRun); encoder.encode(values[0], values[1], values[2]); zeroRun = 0; } } if (zeroRun) encoder.encodeRunLength(zeroRun); // reconstruct for (size_t lodIndex = 1; lodIndex < lodCount; ++lodIndex) { const size_t startIndex = _lods.numPointsInLod[lodIndex - 1]; const size_t endIndex = _lods.numPointsInLod[lodIndex]; PCCLiftUpdate( _lods.predictors, weights, startIndex, endIndex, false, colors); PCCLiftPredict(_lods.predictors, startIndex, endIndex, false, colors); } int64_t clipMax = (1 << desc.bitdepth) - 1; for (size_t f = 0; f < pointCount; ++f) { const auto color0 = divExp2RoundHalfInf(colors[f], kFixedPointAttributeShift); Vec3<attr_t> color; for (size_t d = 0; d < 3; ++d) { color[d] = attr_t(PCCClip(color0[d], 0, clipMax)); } pointCloud.setColor(_lods.indexes[f], color); } } //---------------------------------------------------------------------------- std::vector<int8_t> AttributeEncoder::computeLastComponentPredictionCoeff( const AttributeParameterSet& aps, const std::vector<Vec3<int64_t>>& coeffs) { int maxNumDetailLevels = aps.maxNumDetailLevels(); assert(_lods.numPointsInLod.size() <= maxNumDetailLevels); std::vector<int8_t> signs(maxNumDetailLevels, 0); int64_t sumk1k2 = 0; int64_t sumk1k1 = 0; int lod = 0; for (size_t coeffIdx = 0; coeffIdx < coeffs.size(); ++coeffIdx) { auto& attr = coeffs[coeffIdx]; int mult = attr[1] * attr[2]; int mult2 = attr[1] * attr[1]; sumk1k2 += mult; sumk1k1 += mult2; // compute prediction coefficient at end of detail level if (coeffIdx != _lods.numPointsInLod[lod] - 1) continue; int scale = 0; if (sumk1k2 && sumk1k1) { // sign(sumk1k2) * sign(sumk1k1) int sign = (sumk1k2 < 0) ^ (sumk1k1 < 0) ? -1 : 1; scale = ((sumk1k2 << 2) + sign * (sumk1k1 >> 1)) / sumk1k1; } sumk1k2 = sumk1k1 = 0; // NB: coding range is limited to +-8 signs[lod] = PCCClip(scale, -8, 8); lod++; } // NB: there may be more coefficients than actual detail levels // Propagate the last value to all unused levels to minimise useless cost for (; lod < maxNumDetailLevels; lod++) signs[lod] = signs[lod - 1]; return signs; } //---------------------------------------------------------------------------- void AttributeEncoder::encodeReflectancesLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder) { const size_t pointCount = pointCloud.getPointCount(); std::vector<uint64_t> weights; if (!aps.scalable_lifting_enabled_flag) { PCCComputeQuantizationWeights(_lods.predictors, weights); } else { computeQuantizationWeightsScalable( _lods.predictors, _lods.numPointsInLod, pointCount, 0, weights); } const size_t lodCount = _lods.numPointsInLod.size(); std::vector<int64_t> reflectances; reflectances.resize(pointCount); for (size_t index = 0; index < pointCount; ++index) { reflectances[index] = int32_t(pointCloud.getReflectance(_lods.indexes[index])) << kFixedPointAttributeShift; } for (size_t i = 0; (i + 1) < lodCount; ++i) { const size_t lodIndex = lodCount - i - 1; const size_t startIndex = _lods.numPointsInLod[lodIndex - 1]; const size_t endIndex = _lods.numPointsInLod[lodIndex]; PCCLiftPredict(_lods.predictors, startIndex, endIndex, true, reflectances); PCCLiftUpdate( _lods.predictors, weights, startIndex, endIndex, true, reflectances); } // compress int zeroRun = 0; int quantLayer = 0; for (size_t predictorIndex = 0; predictorIndex < pointCount; ++predictorIndex) { if (predictorIndex == _lods.numPointsInLod[quantLayer]) { quantLayer = std::min(int(qpSet.layers.size()) - 1, quantLayer + 1); } const auto pointIndex = _lods.indexes[predictorIndex]; auto quant = qpSet.quantizers(pointCloud[pointIndex], quantLayer); const int64_t iQuantWeight = irsqrt(weights[predictorIndex]); const int64_t quantWeight = (weights[predictorIndex] * iQuantWeight + (1ull << 39)) >> 40; auto& reflectance = reflectances[predictorIndex]; const int64_t delta = quant[0].quantize(reflectance * quantWeight); const auto detail = delta; const int64_t reconstructedDelta = quant[0].scale(delta); reflectance = divExp2RoundHalfInf(reconstructedDelta * iQuantWeight, 40); if (!detail) ++zeroRun; else { encoder.encodeRunLength(zeroRun); encoder.encode(detail); zeroRun = 0; } } if (zeroRun) encoder.encodeRunLength(zeroRun); // reconstruct for (size_t lodIndex = 1; lodIndex < lodCount; ++lodIndex) { const size_t startIndex = _lods.numPointsInLod[lodIndex - 1]; const size_t endIndex = _lods.numPointsInLod[lodIndex]; PCCLiftUpdate( _lods.predictors, weights, startIndex, endIndex, false, reflectances); PCCLiftPredict( _lods.predictors, startIndex, endIndex, false, reflectances); } const int64_t maxReflectance = (1 << desc.bitdepth) - 1; for (size_t f = 0; f < pointCount; ++f) { const int64_t refl = divExp2RoundHalfInf(reflectances[f], kFixedPointAttributeShift); pointCloud.setReflectance( _lods.indexes[f], attr_t(PCCClip(refl, int64_t(0), maxReflectance))); } } //============================================================================ int AttributeEncoder::computeColorDistortions( const AttributeDescription& desc, const Vec3<attr_t> color, const Vec3<attr_t> predictedColor, const Quantizers& quant) { int64_t clipMax = (1 << desc.bitdepth) - 1; Vec3<attr_t> reconstructedColor; for (int k = 0; k < 3; ++k) { const auto& q = quant[std::min(k, 1)]; int64_t residual = color[k] - predictedColor[k]; int64_t residualQ = q.quantize(residual << kFixedPointAttributeShift); int64_t residualR = divExp2RoundHalfUp(q.scale(residualQ), kFixedPointAttributeShift); int64_t recon = predictedColor[k] + residualR; reconstructedColor[k] = attr_t(PCCClip(recon, int64_t(0), clipMax)); } int distortion = 0; for (int k = 0; k < 3; ++k) distortion += std::abs(color[k] - reconstructedColor[k]); return distortion; } //============================================================================ // estimation of dist2 int estimateDist2( const PCCPointSet3& cloud, int32_t samplingPeriod, int32_t searchRange, float percentileEstimate) { int32_t pointCount = cloud.getPointCount(); if (pointCount < 2) return 0; std::vector<int64_t> dists; dists.reserve(pointCount / samplingPeriod + 1); for (int32_t index = 0; index < pointCount; index += samplingPeriod) { auto k0 = std::max(0, index - searchRange); auto k1 = std::min(pointCount - 1, index + searchRange); auto d2 = std::numeric_limits<int64_t>::max(); for (auto k = k0; k <= k1; ++k) { if (k == index) continue; d2 = std::min(d2, (cloud[index] - cloud[k]).getNorm2<int64_t>()); } dists.push_back(d2); } int p = int(std::floor(dists.size() * percentileEstimate)); std::nth_element(dists.begin(), dists.begin() + p, dists.end()); int64_t dist2 = dists[p]; int shiftBits = 0; while ((int64_t(3) << (shiftBits << 1)) < dist2 && shiftBits < 20) ++shiftBits; return shiftBits; } //============================================================================ } /* namespace pcc */
49,585
30.806286
79
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/AttributeEncoder.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <stdint.h> #include <vector> #include "Attribute.h" #include "AttributeCommon.h" #include "PayloadBuffer.h" #include "PCCTMC3Common.h" #include "hls.h" #include "quantization.h" namespace pcc { //============================================================================ // Opaque definitions (Internal detail) class PCCResidualsEncoder; struct PCCResidualsEntropyEstimator; //============================================================================ class AttributeEncoder : public AttributeEncoderIntf { public: void encode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& attr_aps, AttributeBrickHeader& abh, AttributeContexts& ctxtMem, PCCPointSet3& pointCloud, PayloadBuffer* payload) override; bool isReusable( const AttributeParameterSet& aps, const AttributeBrickHeader& abh) const override; protected: // todo(df): consider alternative encapsulation void encodeReflectancesLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder); void encodeColorsLift( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder); void encodeReflectancesPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder); void encodeColorsPred( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder); void encodeReflectancesTransformRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder); void encodeColorsTransformRaht( const AttributeDescription& desc, const AttributeParameterSet& aps, const QpSet& qpSet, PCCPointSet3& pointCloud, PCCResidualsEncoder& encoder); static Vec3<int64_t> computeColorResiduals( const AttributeParameterSet& aps, const Vec3<attr_t> color, const Vec3<attr_t> predictedColor, const Vec3<int8_t> icpCoeff, const Quantizers& quant); static int computeColorDistortions( const AttributeDescription& desc, const Vec3<attr_t> color, const Vec3<attr_t> predictedColor, const Quantizers& quant); static void decidePredModeColor( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexesLOD, const uint32_t predictorIndex, PCCPredictor& predictor, PCCResidualsEncoder& encoder, PCCResidualsEntropyEstimator& context, const Vec3<int8_t>& icpCoeff, const Quantizers& quant); static void encodePredModeColor( const AttributeParameterSet& aps, int predMode, Vec3<int32_t>& coeff); static int64_t computeReflectanceResidual( const uint64_t reflectance, const uint64_t predictedReflectance, const Quantizer& quant); static void decidePredModeRefl( const AttributeDescription& desc, const AttributeParameterSet& aps, const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexesLOD, const uint32_t predictorIndex, PCCPredictor& predictor, PCCResidualsEncoder& encoder, PCCResidualsEntropyEstimator& context, const Quantizer& quant); static void encodePredModeRefl( const AttributeParameterSet& aps, int predMode, int32_t& coeff); private: std::vector<int8_t> computeLastComponentPredictionCoeff( const AttributeParameterSet& aps, const std::vector<Vec3<int64_t>>& coeffs); std::vector<Vec3<int8_t>> computeInterComponentPredictionCoeffs( const AttributeParameterSet& aps, const PCCPointSet3& pointCloud); private: // The current attribute slice header AttributeBrickHeader* _abh; AttributeLods _lods; }; //============================================================================ } /* namespace pcc */
6,011
31.852459
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/BitReader.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cstdint> #include <cstdlib> namespace pcc { //============================================================================ template<class ForwardIt> class BitReader { public: BitReader(ForwardIt bs, ForwardIt bs_end) : _bs(bs), _bsEnd(bs_end), _bitMask(), _buffer() {} // return the current bytestream read position; ForwardIt pos() { return _bs; } bool read(); uint64_t readUn(int num_bits); int64_t readSn(int num_bits); uint64_t readUe(); int64_t readSe(); float readF(); template<typename T> void read(T* value) { *value = T(read()); } template<typename T> void readUn(int num_bits, T* value) { *value = T(readUn(num_bits)); } template<typename T> void readSn(int num_bits, T* value) { *value = T(readSn(num_bits)); } template<typename T> void readUe(T* value) { *value = T(readUe()); } template<typename T> void readSe(T* value) { *value = T(readSe()); } template<typename T> void readF(T* value) { *value = T(readF()); } void byteAlign(); private: ForwardIt _bs; ForwardIt _bsEnd; int _bitMask; uint8_t _buffer; }; //============================================================================ template<typename T> BitReader<T> makeBitReader(T start, T end) { return BitReader<T>(start, end); } //============================================================================ template<class ForwardIt> bool BitReader<ForwardIt>::read() { if (_bitMask == 0) { if (_bs == _bsEnd) return false; _buffer = *_bs; _bs++; _bitMask = 1 << 7; } bool value = _buffer & _bitMask; _bitMask >>= 1; return value; } //---------------------------------------------------------------------------- template<class ForwardIt> void BitReader<ForwardIt>::byteAlign() { _bitMask = 0; return; } //---------------------------------------------------------------------------- template<class ForwardIt> uint64_t BitReader<ForwardIt>::readUn(int num_bits) { uint64_t value = 0; for (int i = 0; i < num_bits; i++) { value <<= 1; value |= unsigned(read()); } return value; } //---------------------------------------------------------------------------- template<class ForwardIt> int64_t BitReader<ForwardIt>::readSn(int num_bits) { int64_t value = readUn(num_bits); bool sign = read(); return sign ? -value : value; } //---------------------------------------------------------------------------- template<class ForwardIt> uint64_t BitReader<ForwardIt>::readUe() { int len = 0; while (!read()) len++; uint64_t value = (1ull << len) | readUn(len); return value - 1; } //---------------------------------------------------------------------------- template<class ForwardIt> int64_t BitReader<ForwardIt>::readSe() { uint64_t value = readUe(); bool sign = value & 1; value = (value + sign) >> 1; return sign ? value : -value; } //---------------------------------------------------------------------------- template<class ForwardIt> float BitReader<ForwardIt>::readF() { uint32_t bits = uint32_t(readUn(32)); char* data = reinterpret_cast<char*>(&bits); return *reinterpret_cast<float*>(data); } //============================================================================ } // namespace pcc
5,145
22.934884
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/BitWriter.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "PCCMisc.h" #include <cstdint> #include <cstdlib> namespace pcc { //============================================================================ template<class OutputIt> class BitWriter { public: BitWriter(OutputIt bs) : _bs(bs), _num_bits(), _buffer() {} void write(bool bit); // Write value using a fixed-width (num_bits) literal encoding (big endian). void writeUn(int num_bits, uint64_t value); template<typename T> void writeUn(int num_bits, const T& value) { writeUn(num_bits, uint64_t(value)); } // Write value using a fixed-width (num_bits+sign) literal encoding (big // endian). void writeSn(int num_bits, int64_t value); void writeUe(uint32_t value); // NB: probably best to think twice before using this method void writeUe64(uint64_t value); template<typename T> void writeUe(const T& value) { // Avoid accidental truncation constexpr bool use_writeUe64 = std::is_same<uint64_t, T>::value || std::is_same<int64_t, T>::value; static_assert(!use_writeUe64, "use explicit writeUe64()"); writeUe(uint32_t(value)); } void writeSe(int32_t value); void writeF(float value); void byteAlign(); private: OutputIt _bs; int _num_bits; uint8_t _buffer; }; //============================================================================ class InsertionCounter { public: InsertionCounter(int* counter) : _counter(counter) {} InsertionCounter& operator++(int) { return *this; } InsertionCounter& operator++() { return *this; } InsertionCounter& operator*() { return *this; } template<typename T> InsertionCounter& operator=(const T&) { (*_counter)++; return *this; } private: int* _counter; }; //============================================================================ template<typename T> BitWriter<T> makeBitWriter(T t) { return BitWriter<T>(t); } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::write(bool bit) { _buffer <<= 1; _buffer |= uint8_t(bit); _num_bits++; if (_num_bits == 8) { *_bs++ = static_cast<char>(_buffer); _buffer = 0; _num_bits = 0; } } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::byteAlign() { if (!_num_bits) return; _buffer <<= 8 - _num_bits; *_bs++ = static_cast<char>(_buffer); _buffer = 0; _num_bits = 0; } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::writeUn(int num_bits, uint64_t value) { if (!num_bits) return; for (uint64_t mask = uint64_t(1) << (num_bits - 1); mask; mask >>= 1) { write(!!(value & mask)); } } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::writeSn(int num_bits, int64_t value) { writeUn(num_bits, uint64_t(::llabs(value))); write(value < 0); } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::writeUe(uint32_t value) { value++; int len = ilog2(value); writeUn(len, 0); writeUn(len + 1, value); } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::writeUe64(uint64_t value) { value++; int len = ilog2(value); writeUn(len, 0); writeUn(len + 1, value); } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::writeSe(int32_t value) { bool sign = value > 0; value = uint32_t(::llabs(value)) << 1; writeUe(value - sign); } //---------------------------------------------------------------------------- template<class OutputIt> void BitWriter<OutputIt>::writeF(float value) { char* data = reinterpret_cast<char*>(&value); uint32_t val = *reinterpret_cast<uint32_t*>(data); writeUn(32, val); } //============================================================================ } // namespace pcc
5,981
25.469027
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/DualLutCoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "DualLutCoder.h" #include <algorithm> namespace pcc { //============================================================================ // :: FrequencySortingLut methods template<int _lutSize, int _alphabetSize> FrequencySortingLut<_lutSize, _alphabetSize>::FrequencySortingLut( int maxUpdatePeriod, int maxHistogramCount, const uint8_t initTable[_lutSize]) : _maxHistogramCount(maxHistogramCount), _maxUpdatePeriod(maxUpdatePeriod) { assert(maxHistogramCount <= kMaxHistogramCount); assert(maxUpdatePeriod <= kMaxUpdatePeriod); init(initTable); } //---------------------------------------------------------------------------- template<int _lutSize, int _alphabetSize> void FrequencySortingLut<_lutSize, _alphabetSize>::init( const uint8_t initTable[_lutSize]) { for (int k = 0; k < _alphabetSize; ++k) { _histogram[k] = 0; _toIndex[k] = kUndefinedIndex; } // Initialise LUT with a default mapping for (int k = 0; k < _lutSize; ++k) { int symbol = initTable ? initTable[k] : k; _toSymbol[k] = symbol; _toIndex[symbol] = k; _histogram[symbol] = 1; } } //---------------------------------------------------------------------------- template<int _lutSize, int _alphabetSize> void FrequencySortingLut<_lutSize, _alphabetSize>::update() { // NB: This expression limits the value of _maxUpdatePeriod to 0x33333333 _updatePeriod = std::min((5u * _updatePeriod) >> 2, _maxUpdatePeriod); _symbolsUntilUpdate = _updatePeriod; // Sort the symbols by occurrence. // NB: stability is guaranteed by including symbol value to break ties. uint32_t tmp[_alphabetSize]; for (int symbol = 0; symbol < _alphabetSize; ++symbol) tmp[symbol] = ((~_histogram[symbol]) << 8) + symbol; std::nth_element(tmp, tmp + _lutSize, tmp + _alphabetSize); std::sort(tmp, tmp + _lutSize); // Remove any existing mappings for (int k = 0; k < _lutSize; ++k) _toIndex[_toSymbol[k]] = kUndefinedIndex; // Re-populate the LUTs for (int k = 0; k < _lutSize; ++k) { uint32_t symbol = (tmp[k] & 255); _toSymbol[k] = symbol; _toIndex[symbol] = k; } // Reset the symbol counts if (_reset) { _reset = false; for (int k = 0; k < _alphabetSize; ++k) _histogram[k] = 0; for (int k = 0; k < _lutSize; ++k) _histogram[_toSymbol[k]] = 1; } } //---------------------------------------------------------------------------- template<int _lutSize, int _alphabetSize> void FrequencySortingLut<_lutSize, _alphabetSize>::pushSymbol(int symbol) { assert(unsigned(symbol) < _alphabetSize); if (++_histogram[symbol] == _maxHistogramCount) { for (int k = 0; k < _alphabetSize; ++k) _histogram[k] = _histogram[k] >> 1; } if (!(--_symbolsUntilUpdate)) update(); } //============================================================================ // :: LRU Cache methods template<int _cacheSize, int _alphabetSize> FrequentSymbolCache<_cacheSize, _alphabetSize>::FrequentSymbolCache() { for (int k = 0; k < _cacheSize; ++k) { _toSymbol[k] = k; _toIndex[k] = k; } for (int k = _cacheSize; k < _alphabetSize; ++k) _toIndex[k] = kUndefinedIndex; _last = 0; } //---------------------------------------------------------------------------- template<int _cacheSize, int _alphabetSize> void FrequentSymbolCache<_cacheSize, _alphabetSize>::pushSymbol(int symbol) { assert(symbol < _alphabetSize); const int index = _toIndex[symbol]; const int index0 = (_last++) % _cacheSize; const int symbol0 = _toSymbol[index0]; assert(symbol0 < _alphabetSize); std::swap(_toIndex[symbol], _toIndex[symbol0]); if (index == kUndefinedIndex) { _toSymbol[index0] = symbol; } else { std::swap(_toSymbol[index0], _toSymbol[index]); } } //============================================================================ // :: Entropy coding template<bool _limitedContextMode> DualLutCoder<_limitedContextMode>::DualLutCoder() : DualLutCoder(nullptr) {} template<> DualLutCoder<true>::DualLutCoder(const uint8_t initTable[32]) : _adaptiveLut(1024, 1024, initTable) {} template<> DualLutCoder<false>::DualLutCoder(const uint8_t initTable[32]) : _adaptiveLut(0x33333333, 1 << 24, initTable) {} //---------------------------------------------------------------------------- template<bool _limitedContextMode> void DualLutCoder<_limitedContextMode>::init(const uint8_t initTable[32]) { _adaptiveLut.init(initTable); } //---------------------------------------------------------------------------- template<bool _limitedContextMode> void DualLutCoder<_limitedContextMode>::resetLut() { _adaptiveLut.reset(); } //---------------------------------------------------------------------------- template<> void DualLutCoder<true>::encodeFrequencySortedLutIndex( int index, EntropyEncoder* entropy) { bool b4 = index & 1; bool b3 = (index >> 1) & 1; bool b2 = (index >> 2) & 1; bool b1 = (index >> 3) & 1; bool b0 = (index >> 4) & 1; entropy->encode(b0, _ctxLutIndex[0]); if (b0) { entropy->encode(b1); entropy->encode(b2); entropy->encode(b3); entropy->encode(b4); return; } entropy->encode(b1, _ctxLutIndex[1]); if (b1) { entropy->encode(b2); entropy->encode(b3); entropy->encode(b4); return; } entropy->encode(b2, _ctxLutIndex[2]); if (b2) { entropy->encode(b3); entropy->encode(b4); return; } entropy->encode(b3, _ctxLutIndex[3]); entropy->encode(b4, _ctxLutIndex[4]); } //---------------------------------------------------------------------------- template<> void DualLutCoder<false>::encodeFrequencySortedLutIndex( int index, EntropyEncoder* entropy) { entropy->encode((index >> 4) & 1, _ctxLutIndex[0]); entropy->encode((index >> 3) & 1, _ctxLutIndex[1 + (index >> 4)]); entropy->encode((index >> 2) & 1, _ctxLutIndex[3 + (index >> 3)]); entropy->encode((index >> 1) & 1, _ctxLutIndex[7 + (index >> 2)]); entropy->encode((index >> 0) & 1, _ctxLutIndex[15 + (index >> 1)]); } //---------------------------------------------------------------------------- template<bool _limitedContextMode> void DualLutCoder<_limitedContextMode>::encode(int value, EntropyEncoder* entropy) { // One of three coding methods are used: // - Encode position in LUT (if present) // - Encode position in Cache (if present) // - Encode symbol directly // LUT index coding int index = _adaptiveLut.getIndex(value); bool inLut = index != _adaptiveLut.end(); _adaptiveLut.pushSymbol(value); entropy->encode(inLut, _ctxLutHit); if (inLut) { encodeFrequencySortedLutIndex(index, entropy); return; } // Cache index coding index = _cache.getIndex(value); bool inCache = index != _cache.end(); _cache.pushSymbol(value); entropy->encode(inCache, _ctxCacheHit); if (inCache) { for (int i = 0; i < kLog2CacheSize; ++i) { entropy->encode(index & 1); index >>= 1; } return; } // Direct coding for (int i = 0; i < 8; ++i) { entropy->encode(value & 1, _ctxSymbolBit); value >>= 1; } } //---------------------------------------------------------------------------- template<> int DualLutCoder<true>::decodeFrequencySortedLutIndex(EntropyDecoder* entropy) { bool b0, b1, b2, b3, b4; b0 = entropy->decode(_ctxLutIndex[0]); if (b0) { b1 = entropy->decode(); b2 = entropy->decode(); b3 = entropy->decode(); b4 = entropy->decode(); } else { b1 = entropy->decode(_ctxLutIndex[1]); if (b1) { b2 = entropy->decode(); b3 = entropy->decode(); b4 = entropy->decode(); } else { b2 = entropy->decode(_ctxLutIndex[2]); if (b2) { b3 = entropy->decode(); b4 = entropy->decode(); } else { b3 = entropy->decode(_ctxLutIndex[3]); b4 = entropy->decode(_ctxLutIndex[4]); } } } return (b0 << 4) | (b1 << 3) | (b2 << 2) | (b3 << 1) | b4; } //---------------------------------------------------------------------------- template<> int DualLutCoder<false>::decodeFrequencySortedLutIndex(EntropyDecoder* entropy) { int index = 0; index = (index << 1) | entropy->decode(_ctxLutIndex[0]); index = (index << 1) | entropy->decode(_ctxLutIndex[1 + index]); index = (index << 1) | entropy->decode(_ctxLutIndex[3 + index]); index = (index << 1) | entropy->decode(_ctxLutIndex[7 + index]); index = (index << 1) | entropy->decode(_ctxLutIndex[15 + index]); return index; } //---------------------------------------------------------------------------- template<bool _limitedContextMode> int DualLutCoder<_limitedContextMode>::decode(EntropyDecoder* entropy) { int symbol; bool inLut = entropy->decode(_ctxLutHit); if (inLut) { int index = decodeFrequencySortedLutIndex(entropy); symbol = _adaptiveLut.getSymbol(index); } if (!inLut) { bool inCache = entropy->decode(_ctxCacheHit); if (inCache) { int index = 0; for (int i = 0; i < kLog2CacheSize; ++i) { index |= entropy->decode() << i; } symbol = _cache.getSymbol(index); } else { symbol = 0; for (int i = 0; i < 8; ++i) { symbol |= entropy->decode(_ctxSymbolBit) << i; } } _cache.pushSymbol(symbol); } _adaptiveLut.pushSymbol(symbol); return symbol; } //============================================================================ template class DualLutCoder<true>; template class DualLutCoder<false>; //============================================================================ } // namespace pcc
11,385
27.253102
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/DualLutCoder.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cassert> #include <cstdint> #include "entropy.h" namespace pcc { //============================================================================ // A forward and inverse symbol to frequency-sorted index lookup table. // // Maintains mappings for the most @_lutSize frequent symbols from an // @_alphabetSize alphabet. // // Mappings are recomputed using frequency statistics maintained for // each symbol on a periodic basis following symbol insertion according // to an exponential back-off and maximum update period. // template<int _lutSize, int _alphabetSize> class FrequencySortingLut { public: // Implementation detail assumes that: // _alphabetSize * _maxHistogramCount <= UINT32_MAX static_assert(_alphabetSize <= 256, "Symbols must be no more than 8-bit"); // It makes little sense for the LUT to be bigger than the alphabet // xxx static_assert(_lutSize <= _alphabetSize, "LUT is too large"); static const int kMaxUpdatePeriod = 0x33333333; static const int kMaxHistogramCount = 1 << 24; FrequencySortingLut( int maxUpdatePeriod = kMaxUpdatePeriod, int _maxHistogramCount = kMaxHistogramCount, const uint8_t buffer[_lutSize] = nullptr); FrequencySortingLut(const FrequencySortingLut&) = default; FrequencySortingLut(FrequencySortingLut&&) = default; FrequencySortingLut& operator=(const FrequencySortingLut&) = default; FrequencySortingLut& operator=(FrequencySortingLut&&) = default; void init(const uint8_t buffer[_lutSize]); void reset() { _reset = true; } // Update the histogram entry for @symbol and recompute LUT if sufficient // symbols have been processed. void pushSymbol(int symbol); // The sorted index for @symbol int getIndex(int symbol) const { assert(unsigned(symbol) < _alphabetSize); return _toIndex[symbol]; } // The @index -th symbol int getSymbol(int index) const { assert(unsigned(index) < _lutSize); return _toSymbol[index]; } int end() const { return kUndefinedIndex; } private: static const int kInitialUpdatePeriod = 16; static const int kUndefinedIndex = -1; // Re-compute the LUT based on symbol histogram. void update(); // per-symbol occurence counts int _histogram[_alphabetSize]; // mapping of symbol to LUT index // NB: type must allow storage of distinct kUndefinedIndex int8_t _toIndex[_alphabetSize]; // mapping of LUT index to symbol uint8_t _toSymbol[_lutSize]; int _maxHistogramCount; unsigned _maxUpdatePeriod; unsigned _updatePeriod = kInitialUpdatePeriod; unsigned _symbolsUntilUpdate = kInitialUpdatePeriod; bool _reset = false; }; //============================================================================ // A forward and inverse cache of recently used symbols. // // A least recently used eviction policy is used to update the cache. template<int _cacheSize, int _alphabetSize> class FrequentSymbolCache { public: // Implementation detail assumes 8-bit alphabet: static_assert(_alphabetSize <= 256, "Symbols must be no more than 8-bit"); // It makes little sense for the LUT to be bigger than the alphabet static_assert(_cacheSize <= _alphabetSize, "LUT is larger than alphabet?"); FrequentSymbolCache(); FrequentSymbolCache(const FrequentSymbolCache&) = default; FrequentSymbolCache(FrequentSymbolCache&&) = default; FrequentSymbolCache& operator=(const FrequentSymbolCache&) = default; FrequentSymbolCache& operator=(FrequentSymbolCache&&) = default; void pushSymbol(int symbol); int getIndex(int symbol) const { assert(unsigned(symbol) < _alphabetSize); return _toIndex[symbol]; } int getSymbol(int index) const { assert(unsigned(index) < _cacheSize); return _toSymbol[index]; } int end() const { return kUndefinedIndex; } private: static const int kUndefinedIndex = -1; // mapping of symbol to cached index int8_t _toIndex[_alphabetSize]; // mapping of cached index to symbol uint8_t _toSymbol[_cacheSize]; unsigned _last; }; //============================================================================ template<bool _limitedContextMode> class DualLutCoder { static const int kLog2CacheSize = 4; static const int kCacheSize = 1 << kLog2CacheSize; static const int kLog2LutSize = 5; static const int kLutSize = 1 << kLog2LutSize; static const int kNumLutContexts = _limitedContextMode ? 5 : 31; public: DualLutCoder(); DualLutCoder(const uint8_t initTable[kLutSize]); DualLutCoder(const DualLutCoder&) = default; DualLutCoder(DualLutCoder&&) = default; DualLutCoder& operator=(const DualLutCoder&) = default; DualLutCoder& operator=(DualLutCoder&&) = default; void init(const uint8_t initTable[32]); void resetLut(); void encode(int symbol, EntropyEncoder* arithmeticEncoder); int decode(EntropyDecoder* arithmeticDecoder); private: void encodeFrequencySortedLutIndex(int index, EntropyEncoder* entropy); int decodeFrequencySortedLutIndex(EntropyDecoder* entropy); // bool _limitedContextMode; FrequentSymbolCache<kCacheSize, 256> _cache; FrequencySortingLut<kLutSize, 256> _adaptiveLut; AdaptiveBitModelFast _ctxLutHit; AdaptiveBitModelFast _ctxCacheHit; AdaptiveBitModelFast _ctxSymbolBit; AdaptiveBitModelFast _ctxLutIndex[kNumLutContexts]; }; //============================================================================ } // namespace pcc
7,234
31.59009
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/FixedPoint.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2019, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "FixedPoint.h" namespace pcc { //============================================================================ void FixedPoint::operator/=(const FixedPoint& that) { if (this->val < 0) { if (that.val < 0) this->val = -(((-that.val) >> 1) + ((-this->val) << kFracBits)) / that.val; else this->val = -(((+that.val) >> 1) + ((-this->val) << kFracBits)) / that.val; } else { if (that.val < 0) this->val = +(((-that.val) >> 1) + ((+this->val) << kFracBits)) / that.val; else this->val = +(((+that.val) >> 1) + ((+this->val) << kFracBits)) / that.val; } } //============================================================================ } // namespace pcc
2,559
38.384615
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/FixedPoint.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2019, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cstdint> namespace pcc { //============================================================================ class FixedPoint { public: // Number of fractional bits in fixed point representation static const int kFracBits = 15; static const int kOneHalf = 1 << (kFracBits - 1); // Fixed point value int64_t val; FixedPoint() = default; FixedPoint(const FixedPoint&) = default; FixedPoint(FixedPoint&&) = default; FixedPoint& operator=(const FixedPoint&) = default; FixedPoint& operator=(FixedPoint&&) = default; FixedPoint(int val) { this->operator=(int64_t(val)); } FixedPoint(int64_t val) { this->operator=(val); } FixedPoint(double val) { this->val = int64_t(val * (1 << kFracBits)); } // return the rounded integer value int64_t round(); void operator=(const int64_t val); void operator+=(const FixedPoint& that); void operator-=(const FixedPoint& that); void operator*=(const FixedPoint& that); void operator/=(const FixedPoint& that); }; //============================================================================ inline int64_t FixedPoint::round() { if (this->val > 0) return (kOneHalf + this->val) >> kFracBits; return -((kOneHalf - this->val) >> kFracBits); } //---------------------------------------------------------------------------- inline void FixedPoint::operator=(int64_t val) { if (val > 0) this->val = val << kFracBits; else this->val = -((-val) << kFracBits); } //---------------------------------------------------------------------------- inline void FixedPoint::operator+=(const FixedPoint& that) { this->val += that.val; } //---------------------------------------------------------------------------- inline void FixedPoint::operator-=(const FixedPoint& that) { this->val -= that.val; } //---------------------------------------------------------------------------- inline void FixedPoint::operator*=(const FixedPoint& that) { this->val *= that.val; if (this->val < 0) this->val = -((kOneHalf - this->val) >> kFracBits); else this->val = +((kOneHalf + this->val) >> kFracBits); } //============================================================================ } // namespace pcc
4,047
30.874016
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/OctreeNeighMap.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "OctreeNeighMap.h" #include <iostream> #if WITH_MEMCHECK # include <valgrind/memcheck.h> #else # define VALGRIND_MAKE_MEM_UNDEFINED(a, b) (void)0 #endif namespace pcc { //============================================================================ void MortonMap3D::clearUpdates() { for (const auto byteIndex : _updates) { _buffer[byteIndex] = uint8_t(0); } _updates.resize(0); // Access to the child array is supposed to be guarded by checking the main // map first. It is therefore not necessary to clear the array between // updates. Setting it to zero just hides the issue -- ie, tools like // valgrind don't complain, but the logic error is still present. // // The following undoes the effect of any writes that have cleared the // undefined state. VALGRIND_MAKE_MEM_UNDEFINED(_childOccupancy.get(), _bufferSizeInBytes << 3); } //---------------------------------------------------------------------------- void MortonMap3D::clear() { memset(_buffer.get(), 0, _bufferSizeInBytes); _updates.resize(0); // See clearUpdates() VALGRIND_MAKE_MEM_UNDEFINED(_childOccupancy.get(), _bufferSizeInBytes << 3); } //============================================================================ void updateGeometryOccupancyAtlas( const Vec3<int32_t>& currentPosition, const int atlasShift, const pcc::ringbuf<PCCOctree3Node>& fifo, const pcc::ringbuf<PCCOctree3Node>::iterator& fifoCurrLvlEnd, MortonMap3D* occupancyAtlas, Vec3<int32_t>* atlasOrigin) { const uint32_t mask = (1 << occupancyAtlas->cubeSizeLog2()) - 1; const int shift = occupancyAtlas->cubeSizeLog2(); const int shiftX = (atlasShift & 4 ? 1 : 0); const int shiftY = (atlasShift & 2 ? 1 : 0); const int shiftZ = (atlasShift & 1 ? 1 : 0); const auto currentOrigin = currentPosition >> shift; // only refresh the atlas if the current position lies outside the // the current atlas. if (*atlasOrigin == currentOrigin) { return; } *atlasOrigin = currentOrigin; occupancyAtlas->clearUpdates(); for (auto it = fifo.begin(); it != fifoCurrLvlEnd; ++it) { if (currentOrigin != it->pos >> shift) break; const uint32_t x = (it->pos[0] & mask) >> shiftX; const uint32_t y = (it->pos[1] & mask) >> shiftY; const uint32_t z = (it->pos[2] & mask) >> shiftZ; occupancyAtlas->setByte(x, y, z, it->siblingOccupancy); } } //---------------------------------------------------------------------------- void updateGeometryOccupancyAtlasOccChild( const Vec3<int32_t>& pos, uint8_t childOccupancy, MortonMap3D* occupancyAtlas) { uint32_t mask = (1 << occupancyAtlas->cubeSizeLog2()) - 1; uint32_t x = pos[0] & mask; uint32_t y = pos[1] & mask; uint32_t z = pos[2] & mask; occupancyAtlas->setChildOcc(x, y, z, childOccupancy); } //---------------------------------------------------------------------------- // neighIdx: 0 => (x-1), 1 => (y-1), 2 => (z-1) // static GeometryNeighPattern updatePatternFromNeighOccupancy( const MortonMap3D& occupancyAtlas, int x, int y, int z, GeometryNeighPattern gnp, int neighIdx, bool codedAxisCurLvl) { static const uint8_t childMasks[] = { 0xf0 /* x-1 */, 0xcc /* y-1 */, 0xaa /* z-1 */ }; uint32_t patternBit = 1 << (1 << neighIdx); uint8_t childMask = childMasks[neighIdx]; // conversions between neighbour occupancy and adjacency: // x: >> 4, y: >> 2, z: >> 1 int adjacencyShift = 4 >> neighIdx; // Always inspect the adjacent children, taking into account that their // position changes depending upon whther the current axis is coded or not. if (!codedAxisCurLvl) { childMask ^= 0xff; adjacencyShift = 0; } if (gnp.neighPattern & patternBit) { uint8_t child_occ = occupancyAtlas.getChildOcc(x, y, z); gnp.adjNeighOcc[neighIdx] = child_occ; uint8_t child_unocc = ~child_occ; child_occ &= childMask; if (!child_occ) { /* neighbour is falsely occupied */ gnp.neighPattern ^= patternBit; } else { child_occ >>= adjacencyShift; gnp.adjacencyGt1 |= gnp.adjacencyGt0 & child_occ; gnp.adjacencyGt0 |= child_occ; } // map of children with any unoccupied adjacent child gnp.adjacencyUnocc |= (child_unocc & childMask) >> adjacencyShift; } return gnp; } //---------------------------------------------------------------------------- GeometryNeighPattern makeGeometryNeighPattern( bool adjacent_child_contextualization_enabled_flag, const Vec3<int32_t>& position, int codedAxesPrevLvl, int codedAxesCurLvl, const MortonMap3D& occupancyAtlas) { const int mask = occupancyAtlas.cubeSize() - 1; const int cubeSizeMinusOne = mask; const int32_t x = position[0] & mask; const int32_t y = position[1] & mask; const int32_t z = position[2] & mask; uint8_t neighPattern; const int sx = codedAxesPrevLvl & 4 ? 1 : 0; const int sy = codedAxesPrevLvl & 2 ? 1 : 0; const int sz = codedAxesPrevLvl & 1 ? 1 : 0; if ( x > 0 && x < cubeSizeMinusOne && y > 0 && y < cubeSizeMinusOne && z > 0 && z < cubeSizeMinusOne) { neighPattern = occupancyAtlas.get(x + 1, y, z, sx, sy, sz); neighPattern |= occupancyAtlas.get(x - 1, y, z, sx, sy, sz) << 1; neighPattern |= occupancyAtlas.get(x, y - 1, z, sx, sy, sz) << 2; neighPattern |= occupancyAtlas.get(x, y + 1, z, sx, sy, sz) << 3; neighPattern |= occupancyAtlas.get(x, y, z - 1, sx, sy, sz) << 4; neighPattern |= occupancyAtlas.get(x, y, z + 1, sx, sy, sz) << 5; } else { neighPattern = occupancyAtlas.getWithCheck(x + 1, y, z, sx, sy, sz); neighPattern |= occupancyAtlas.getWithCheck(x - 1, y, z, sx, sy, sz) << 1; neighPattern |= occupancyAtlas.getWithCheck(x, y - 1, z, sx, sy, sz) << 2; neighPattern |= occupancyAtlas.getWithCheck(x, y + 1, z, sx, sy, sz) << 3; neighPattern |= occupancyAtlas.getWithCheck(x, y, z - 1, sx, sy, sz) << 4; neighPattern |= occupancyAtlas.getWithCheck(x, y, z + 1, sx, sy, sz) << 5; } // Above, the neighbour pattern corresponds directly to the six same // sized neighbours of the given node. // The patten is then refined by examining the available children // of the same neighbours. // NB: the process of updating neighpattern below also derives // the occupancy contextualisation bits. GeometryNeighPattern gnp = {neighPattern, 0, 0, 0}; if (!adjacent_child_contextualization_enabled_flag) return gnp; if (x > 0) gnp = updatePatternFromNeighOccupancy( occupancyAtlas, x - 1, y, z, gnp, 0, codedAxesCurLvl & 4); if (y > 0) gnp = updatePatternFromNeighOccupancy( occupancyAtlas, x, y - 1, z, gnp, 1, codedAxesCurLvl & 2); if (z > 0) gnp = updatePatternFromNeighOccupancy( occupancyAtlas, x, y, z - 1, gnp, 2, codedAxesCurLvl & 1); return gnp; } //============================================================================ } // namespace pcc
8,738
33.270588
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/OctreeNeighMap.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "PCCMath.h" #include "geometry_octree.h" #include "ringbuf.h" #include "tables.h" #include <cassert> #include <memory> #include <vector> namespace pcc { //============================================================================ // Provides a mapping of (x,y,z) co-ordinate to a bit flag. // // Internal representation uses a morton code to access an array of flags. // // Updates to the array are made byte-wise, allowing 8 flags (in morton order) // to be stored in a single operation. class MortonMap3D { public: void resize(bool childOccupancyEnabled, const uint32_t cubeSizeLog2) { assert(cubeSizeLog2 < 10); const uint32_t halfCubeSizeLog2 = cubeSizeLog2 ? cubeSizeLog2 - 1 : 0; _cubeSizeLog2 = cubeSizeLog2; _cubeSize = 1 << cubeSizeLog2; _bufferSizeInBytes = 1 << (3 * cubeSizeLog2); _buffer.reset(new uint8_t[_bufferSizeInBytes]); if (childOccupancyEnabled) _childOccupancy.reset(new uint8_t[_bufferSizeInBytes << 3]); _updates.reserve(1 << 16); } int cubeSize() const { return _cubeSize; } int cubeSizeLog2() const { return _cubeSizeLog2; } // Removes all updates, zeros all map entries. Does not affect child map. void clear(); // Reverts all updates, zeroing affected map entries. // Only modified bytes are touched. void clearUpdates(); void setByte( const int32_t x, const int32_t y, const int32_t z, const uint8_t value) { assert( x >= 0 && y >= 0 && z >= 0 && x < _cubeSize && y < _cubeSize && z < _cubeSize); if (value) { const uint32_t byteIndex = getByteIndex(x, y, z); _buffer[byteIndex] = value; _updates.push_back(byteIndex); } } uint32_t get( const int32_t x, const int32_t y, const int32_t z, const int shiftX, const int shiftY, const int shiftZ) const { assert( x >= 0 && y >= 0 && z >= 0 && x < _cubeSize && y < _cubeSize && z < _cubeSize); return (_buffer[getByteIndex(x >> shiftX, y >> shiftY, z >> shiftZ)] >> getBitIndex(shiftX ? x : 0, shiftY ? y : 0, shiftZ ? z : 0)) & 1; } uint32_t getWithCheck( const int32_t x, const int32_t y, const int32_t z, const int shiftX, const int shiftY, const int shiftZ) const { if ( x < 0 || x >= _cubeSize || y < 0 || y >= _cubeSize || z < 0 || z >= _cubeSize) { return false; } return get(x, y, z, shiftX, shiftY, shiftZ); } uint32_t get(const int32_t x, const int32_t y, const int32_t z) const { assert( x >= 0 && y >= 0 && z >= 0 && x < _cubeSize && y < _cubeSize && z < _cubeSize); return get(x, y, z, 1, 1, 1); } uint32_t getWithCheck(const int32_t x, const int32_t y, const int32_t z) const { if ( x < 0 || x >= _cubeSize || y < 0 || y >= _cubeSize || z < 0 || z >= _cubeSize) { return false; } return getWithCheck(x, y, z, 1, 1, 1); } void setChildOcc(int32_t x, int32_t y, int32_t z, uint8_t childOccupancy) { _childOccupancy[getByteIndex(x, y, z)] = childOccupancy; } uint8_t getChildOcc(int32_t x, int32_t y, int32_t z) const { uint8_t childOccupancy = _childOccupancy[getByteIndex(x, y, z)]; return childOccupancy; } private: int32_t getBitIndex(const int32_t x, const int32_t y, const int32_t z) const { return (z & 1) + ((y & 1) << 1) + ((x & 1) << 2); } uint32_t getByteIndex(const int32_t x, const int32_t y, const int32_t z) const { return kMortonCode256X[x] | kMortonCode256Y[y] | kMortonCode256Z[z]; } int _cubeSize = 0; int _cubeSizeLog2 = 0; uint32_t _bufferSizeInBytes = 0; std::unique_ptr<uint8_t[]> _buffer; // A list of indexes in _buffer that are dirty std::vector<uint32_t> _updates; // Child occupancy values std::unique_ptr<uint8_t[]> _childOccupancy; }; //============================================================================ struct GeometryNeighPattern { // Mask indicating presence of neigbours of the corresponding tree node // 32 8 (y) // |/ // 2--n--1 (x) // /| // 4 16 (z) uint8_t neighPattern; // mask indicating the number of external child neighbours uint8_t adjacencyGt0; uint8_t adjacencyGt1; // mask indicating unoccupied external child neighbours uint8_t adjacencyUnocc; // occupancy map of {x-1, y-1, z-1} neighbours. uint8_t adjNeighOcc[3]; }; //============================================================================ // determine the occupancy pattern of the six neighbours of the node at // @position. If @adjacent_child_contextualization_enabled_flag is true, // the occupancy state of previously coded neighbours is used to refine // the neighbour pattern and derive external adjacency counts for each child. GeometryNeighPattern makeGeometryNeighPattern( bool adjacent_child_contextualization_enabled_flag, const Vec3<int32_t>& currentPosition, int codedAxesPrevLvl, int codedAxesCurLvl, const MortonMap3D& occupancyAtlas); // populate (if necessary) the occupancy atlas with occupancy information // from @fifo. void updateGeometryOccupancyAtlas( const Vec3<int32_t>& position, const int atlasShift, const ringbuf<PCCOctree3Node>& fifo, const ringbuf<PCCOctree3Node>::iterator& fifoCurrLvlEnd, MortonMap3D* occupancyAtlas, Vec3<int32_t>* atlasOrigin); void updateGeometryOccupancyAtlasOccChild( const Vec3<int32_t>& pos, uint8_t childOccupancy, MortonMap3D* occupancyAtlas); } // namespace pcc
7,339
30.502146
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PCCMath.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef PCCMath_h #define PCCMath_h #include <assert.h> #include <cstddef> #include <iostream> #include <limits> #include <math.h> #include <string.h> #include <type_traits> #include "PCCMisc.h" #include "tables.h" #include <algorithm> namespace pcc { /// Vector dim 3 template<typename T> class Vec3 { public: T* begin() { return &data[0]; } const T* begin() const { return &data[0]; } T* end() { return &data[3]; } const T* end() const { return &data[3]; } T& operator[](size_t i) { assert(i < 3); return data[i]; } const T& operator[](size_t i) const { assert(i < 3); return data[i]; } size_t getElementCount() const { return 3; } T& x() { return data[0]; } T& y() { return data[1]; } T& z() { return data[2]; } const T& x() const { return data[0]; } const T& y() const { return data[1]; } const T& z() const { return data[2]; } T& s() { return data[0]; } T& t() { return data[1]; } T& v() { return data[2]; } const T& s() const { return data[0]; } const T& t() const { return data[1]; } const T& v() const { return data[2]; } template<typename ResultT> ResultT getNorm2() const { return Vec3<ResultT>(*this) * Vec3<ResultT>(*this); } T getNorm1() const { return std::abs(data[0]) + std::abs(data[1]) + std::abs(data[2]); } T getNormInf() const { return std::max(data[2], std::max(abs(data[0]), abs(data[1]))); } // The minimum element T min() const { return std::min({data[0], data[1], data[2]}); } // The maximum element T max() const { return std::max({data[0], data[1], data[2]}); } // Applies std::abs to each element Vec3 abs() const { return {std::abs(data[0]), std::abs(data[1]), std::abs(data[2])}; } Vec3& operator=(const Vec3& rhs) { memcpy(data, rhs.data, sizeof(data)); return *this; } template<typename U> Vec3& operator+=(const typename pcc::Vec3<U>& rhs) { data[0] += rhs[0]; data[1] += rhs[1]; data[2] += rhs[2]; return *this; } template<typename U> Vec3& operator-=(const typename pcc::Vec3<U>& rhs) { data[0] -= rhs[0]; data[1] -= rhs[1]; data[2] -= rhs[2]; return *this; } template<typename U> Vec3& operator-=(U a) { data[0] -= a; data[1] -= a; data[2] -= a; return *this; } template<typename U> Vec3& operator+=(U a) { data[0] += a; data[1] += a; data[2] += a; return *this; } Vec3& operator<<=(int val) { data[0] <<= val; data[1] <<= val; data[2] <<= val; return *this; } Vec3& operator<<=(Vec3<int> val) { data[0] <<= val[0]; data[1] <<= val[1]; data[2] <<= val[2]; return *this; } Vec3& operator>>=(int val) { data[0] >>= val; data[1] >>= val; data[2] >>= val; return *this; } Vec3& operator>>=(Vec3<int> val) { data[0] >>= val[0]; data[1] >>= val[1]; data[2] >>= val[2]; return *this; } template<typename U> Vec3& operator/=(U a) { assert(a != 0); data[0] /= a; data[1] /= a; data[2] /= a; return *this; } template<typename U> Vec3& operator*=(U a) { data[0] *= a; data[1] *= a; data[2] *= a; return *this; } template<typename U> Vec3& operator=(const U a) { data[0] = a; data[1] = a; data[2] = a; return *this; } template<typename U> Vec3& operator=(const U* rhs) { data[0] = rhs[0]; data[1] = rhs[1]; data[2] = rhs[2]; return *this; } Vec3 operator-() const { return Vec3<T>(-data[0], -data[1], -data[2]); } template<typename U> friend Vec3<typename std::common_type<T, U>::type> operator+(const Vec3& lhs, const typename pcc::Vec3<U>& rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs[0] + rhs[0], lhs[1] + rhs[1], lhs[2] + rhs[2]); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator+(const U lhs, const Vec3& rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs + rhs[0], lhs + rhs[1], lhs + rhs[2]); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator+(const Vec3& lhs, const U rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs[0] + rhs, lhs[1] + rhs, lhs[2] + rhs); } template<typename U> friend Vec3<typename std::common_type<T, U>::type> operator-(const Vec3& lhs, const typename pcc::Vec3<U>& rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs[0] - rhs[0], lhs[1] - rhs[1], lhs[2] - rhs[2]); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator-(const U lhs, const Vec3& rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs - rhs[0], lhs - rhs[1], lhs - rhs[2]); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator-(const Vec3& lhs, const U rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs[0] - rhs, lhs[1] - rhs, lhs[2] - rhs); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator*(const U lhs, const Vec3& rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs * rhs[0], lhs * rhs[1], lhs * rhs[2]); } // todo(df): make this elementwise? template<typename U> friend typename std::common_type<T, U>::type operator*(pcc::Vec3<T> lhs, const pcc::Vec3<U>& rhs) { return (lhs[0] * rhs[0] + lhs[1] * rhs[1] + lhs[2] * rhs[2]); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator*(const Vec3& lhs, const U rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs[0] * rhs, lhs[1] * rhs, lhs[2] * rhs); } template<typename U> friend Vec3<typename std::enable_if< std::is_arithmetic<U>::value, typename std::common_type<T, U>::type>::type> operator/(const Vec3& lhs, const U rhs) { assert(rhs != 0); return Vec3<typename std::common_type<T, U>::type>( lhs[0] / rhs, lhs[1] / rhs, lhs[2] / rhs); } friend Vec3 operator<<(const Vec3& lhs, int val) { return Vec3<T>(lhs[0] << val, lhs[1] << val, lhs[2] << val); } friend Vec3 operator<<(const Vec3& lhs, const Vec3<int>& val) { return Vec3<T>(lhs[0] << val[0], lhs[1] << val[1], lhs[2] << val[2]); } friend Vec3 operator>>(const Vec3& lhs, int val) { return Vec3<T>(lhs[0] >> val, lhs[1] >> val, lhs[2] >> val); } friend Vec3 operator>>(const Vec3& lhs, const Vec3<int>& val) { return Vec3<T>(lhs[0] >> val[0], lhs[1] >> val[1], lhs[2] >> val[2]); } bool operator<(const Vec3& rhs) const { if (data[0] == rhs[0]) { if (data[1] == rhs[1]) { return (data[2] < rhs[2]); } return (data[1] < rhs[1]); } return (data[0] < rhs[0]); } bool operator>(const Vec3& rhs) const { if (data[0] == rhs[0]) { if (data[1] == rhs[1]) { return (data[2] > rhs[2]); } return (data[1] > rhs[1]); } return (data[0] > rhs[0]); } bool operator==(const Vec3& rhs) const { return (data[0] == rhs[0] && data[1] == rhs[1] && data[2] == rhs[2]); } bool operator!=(const Vec3& rhs) const { return (data[0] != rhs[0] || data[1] != rhs[1] || data[2] != rhs[2]); } friend std::ostream& operator<<(std::ostream& os, const Vec3& vec) { os << vec[0] << " " << vec[1] << " " << vec[2]; return os; } friend std::istream& operator>>(std::istream& is, Vec3& vec) { is >> vec[0] >> vec[1] >> vec[2]; return is; } Vec3(const T a) { data[0] = data[1] = data[2] = a; } Vec3(const T x, const T y, const T z) { data[0] = x; data[1] = y; data[2] = z; } Vec3(const Vec3& vec) { data[0] = vec.data[0]; data[1] = vec.data[1]; data[2] = vec.data[2]; } template<typename U> Vec3(const typename pcc::Vec3<U>& vec) { data[0] = vec.data[0]; data[1] = vec.data[1]; data[2] = vec.data[2]; } Vec3() = default; ~Vec3(void) = default; private: T data[3]; template<typename U> friend class pcc::Vec3; }; template<typename T> struct Box3 { Vec3<T> min; Vec3<T> max; Box3() = default; Box3(T min, T max) : min(min), max(max) {} Box3(const Vec3<T>& min, const Vec3<T>& max) : min(min), max(max) {} template<typename ForwardIt> Box3(ForwardIt begin, ForwardIt end) : Box3(std::numeric_limits<T>::max(), std::numeric_limits<T>::lowest()) { for (auto it = begin; it != end; ++it) { auto& pt = *it; for (int k = 0; k < 3; ++k) { if (pt[k] > max[k]) max[k] = pt[k]; if (pt[k] < min[k]) min[k] = pt[k]; } } } bool contains(const Vec3<T> point) const { return !( point.x() < min.x() || point.x() > max.x() || point.y() < min.y() || point.y() > max.y() || point.z() < min.z() || point.z() > max.z()); } Box3 merge(const Box3& box) { min.x() = std::min(min.x(), box.min.x()); min.y() = std::min(min.y(), box.min.y()); min.z() = std::min(min.z(), box.min.z()); max.x() = std::max(max.x(), box.max.x()); max.y() = std::max(max.y(), box.max.y()); max.z() = std::max(max.z(), box.max.z()); return box; } bool intersects(const Box3& box) const { return max.x() >= box.min.x() && min.x() <= box.max.x() && max.y() >= box.min.y() && min.y() <= box.max.y() && max.z() >= box.min.z() && min.z() <= box.max.z(); } template<typename SquaredT> SquaredT getDist2(const Vec3<T>& point) const { using U = SquaredT; U dx = U(std::max(std::max(min[0] - point[0], T()), point[0] - max[0])); U dy = U(std::max(std::max(min[1] - point[1], T()), point[1] - max[1])); U dz = U(std::max(std::max(min[2] - point[2], T()), point[2] - max[2])); return dx * dx + dy * dy + dz * dz; } T getDist1(const Vec3<T>& point) const { T dx = T(std::max(std::max(min[0] - point[0], T()), point[0] - max[0])); T dy = T(std::max(std::max(min[1] - point[1], T()), point[1] - max[1])); T dz = T(std::max(std::max(min[2] - point[2], T()), point[2] - max[2])); return dx + dy + dz; } void insert(const Vec3<T>& point) { min.x() = std::min(min.x(), point.x()); min.y() = std::min(min.y(), point.y()); min.z() = std::min(min.z(), point.z()); max.x() = std::max(max.x(), point.x()); max.y() = std::max(max.y(), point.y()); max.z() = std::max(max.z(), point.z()); } friend std::ostream& operator<<(std::ostream& os, const Box3& box) { os << box.min[0] << " " << box.min[1] << " " << box.min[2] << " " << box.max[0] << " " << box.max[1] << " " << box.max[2]; return os; } friend std::istream& operator>>(std::istream& is, Box3& box) { is >> box.min[0] >> box.min[1] >> box.min[2] >> box.max[0] >> box.max[1] >> box.max[2]; return is; } }; //--------------------------------------------------------------------------- // element-wise multiplication of two vectors template<typename T, typename U> Vec3<typename std::common_type<T, U>::type> times(Vec3<T> lhs, const Vec3<U>& rhs) { return Vec3<typename std::common_type<T, U>::type>( lhs[0] * rhs[0], lhs[1] * rhs[1], lhs[2] * rhs[2]); } //--------------------------------------------------------------------------- typedef DEPRECATED_MSVC Vec3<double> PCCVector3D DEPRECATED; typedef DEPRECATED_MSVC Vec3<double> PCCPoint3D DEPRECATED; typedef DEPRECATED_MSVC Box3<double> PCCBox3D DEPRECATED; typedef DEPRECATED_MSVC Vec3<uint8_t> PCCColor3B DEPRECATED; template<typename T> using PCCVector3 DEPRECATED = Vec3<T>; //=========================================================================== struct Rational { int numerator; int denominator; Rational() : Rational(0, 1){}; Rational(int numerator) : Rational(numerator, 1){}; Rational(int numerator, int denominator) : numerator(numerator), denominator(denominator) {} Rational(float val); Rational(double val); operator double() const { return double(numerator) / double(denominator); } operator float() const { return float(numerator) / float(denominator); } }; //--------------------------------------------------------------------------- inline Rational reciprocal(const Rational x) { return Rational(x.denominator, x.numerator); } //=========================================================================== template<typename T> T PCCClip(const T& n, const T& lower, const T& upper) { return std::max(lower, std::min(n, upper)); } template<typename T> bool PCCApproximatelyEqual( T a, T b, T epsilon = std::numeric_limits<double>::epsilon()) { return fabs(a - b) <= ((fabs(a) < fabs(b) ? fabs(b) : fabs(a)) * epsilon); } //--------------------------------------------------------------------------- inline int64_t mortonAddr(const int32_t x, const int32_t y, const int32_t z) { assert(x >= 0 && y >= 0 && z >= 0); int64_t answer = kMortonCode256X[(x >> 16) & 0xFF] | kMortonCode256Y[(y >> 16) & 0xFF] | kMortonCode256Z[(z >> 16) & 0xFF]; answer = answer << 24 | kMortonCode256X[(x >> 8) & 0xFF] | kMortonCode256Y[(y >> 8) & 0xFF] | kMortonCode256Z[(z >> 8) & 0xFF]; answer = answer << 24 | kMortonCode256X[x & 0xFF] | kMortonCode256Y[y & 0xFF] | kMortonCode256Z[z & 0xFF]; return answer; } //--------------------------------------------------------------------------- // Convert a vector position to morton order address. template<typename T> int64_t mortonAddr(const Vec3<T>& vec) { return mortonAddr(int(vec.x()), int(vec.y()), int(vec.z())); } //--------------------------------------------------------------------------- inline int64_t PCCClip(const int64_t& n, const int64_t& lower, const int64_t& upper) { return std::max(lower, std::min(n, upper)); } //--------------------------------------------------------------------------- // Integer division of @x by 2^shift, truncating towards zero. template<typename T> inline T divExp2(T x, int shift) { return x >= 0 ? x >> shift : -(-x >> shift); } //--------------------------------------------------------------------------- // Integer division of @x by 2^shift, rounding intermediate half values // to +Inf. inline int64_t divExp2RoundHalfUp(int64_t x, int shift) { if (!shift) return x; int64_t half = 1ll << (shift - 1); return (x + half) >> shift; } //--------------------------------------------------------------------------- // Integer division of @scalar by 2^shift, rounding intermediate half values // away from zero. inline int64_t divExp2RoundHalfInf(int64_t scalar, int shift) { if (!shift) return scalar; int64_t s0 = 1ll << (shift - 1); return scalar >= 0 ? (s0 + scalar) >> shift : -((s0 - scalar) >> shift); } //--------------------------------------------------------------------------- // Integer division of @scalar by 2^shift, rounding intermediate half values // away from zero. inline uint64_t divExp2RoundHalfInf(uint64_t scalar, int shift) { if (!shift) return scalar; return ((1ull << (shift - 1)) + scalar) >> shift; } //--------------------------------------------------------------------------- // Component-wise integer division of @vec by 2^shift, rounding intermediate // half values away from zero. template<typename T> inline Vec3<T> divExp2RoundHalfInf(Vec3<T> vec, int shift) { for (int k = 0; k < 3; ++k) vec[k] = divExp2RoundHalfInf(vec[k], shift); return vec; } //--------------------------------------------------------------------------- extern const uint16_t kDivApproxDivisor[256]; //--------------------------------------------------------------------------- inline int64_t divInvDivisorApprox(const uint64_t b, int32_t& log2InvScale) { assert(b > 0); const int32_t lutSizeLog2 = 8; const auto n = std::max(0, ilog2(b) + 1 - lutSizeLog2); const auto index = (b + ((1ull << n) >> 1)) >> n; assert(unsigned(index) <= (1 << lutSizeLog2)); log2InvScale = n + (lutSizeLog2 << 1); return kDivApproxDivisor[index - 1] + 1; } //--------------------------------------------------------------------------- inline int64_t divApprox(const int64_t a, const uint64_t b, const int32_t log2Scale) { assert(abs(a) < (1ull << 46)); int32_t log2InvScale; const int64_t invB = divInvDivisorApprox(b, log2InvScale); return (invB * a) >> (log2InvScale - log2Scale); } //--------------------------------------------------------------------------- template<unsigned NIter = 1> inline int64_t recipApprox(int64_t b, int32_t& log2Scale) { int log2ScaleOffset = 0; int32_t log2bPlusOne = ilog2(uint64_t(b)) + 1; if (log2bPlusOne > 31) { b >>= log2bPlusOne - 31; log2ScaleOffset -= log2bPlusOne - 31; } if (log2bPlusOne < 31) { b <<= 31 - log2bPlusOne; log2ScaleOffset += 31 - log2bPlusOne; } // Initial approximation: 48/17 - 32/17 * b with 28 bits decimal prec int64_t bRecip = ((0x2d2d2d2dLL << 31) - 0x1e1e1e1eLL * b) >> 28; for (unsigned i = 0; i < NIter; ++i) bRecip += bRecip * ((1LL << 31) - (b * bRecip >> 31)) >> 31; log2Scale = (31 << 1) - log2ScaleOffset; return bRecip; } //--------------------------------------------------------------------------- inline Vec3<int64_t> divApprox(const Vec3<int64_t> a, const uint64_t b, const int32_t log2Scale) { assert(abs(a[0]) < (1ull << 46)); assert(abs(a[1]) < (1ull << 46)); assert(abs(a[2]) < (1ull << 46)); int32_t log2InvScale; const int64_t invB = divInvDivisorApprox(b, log2InvScale); const int32_t n = log2InvScale - log2Scale; Vec3<int64_t> res; res[0] = (invB * a[0]) >> n; res[1] = (invB * a[1]) >> n; res[2] = (invB * a[2]) >> n; return res; } //--------------------------------------------------------------------------- inline Vec3<int64_t> divApproxRoundHalfInf( const Vec3<int64_t> a, const uint64_t b, const int32_t log2Scale) { assert(abs(a[0]) < (1ull << 46)); assert(abs(a[1]) < (1ull << 46)); assert(abs(a[2]) < (1ull << 46)); int32_t log2InvScale; const int64_t invB = divInvDivisorApprox(b, log2InvScale); const int32_t n = log2InvScale - log2Scale; Vec3<int64_t> res; res[0] = divExp2RoundHalfInf(invB * a[0], n); res[1] = divExp2RoundHalfInf(invB * a[1], n); res[2] = divExp2RoundHalfInf(invB * a[2], n); return res; } //--------------------------------------------------------------------------- inline int32_t isin0(const int32_t x, const int32_t log2Scale) { assert(log2Scale >= kLog2ISineAngleScale); assert(x >= 0); assert(x <= (1 << (log2Scale - 2))); const auto ds = log2Scale - kLog2ISineAngleScale; const auto b = (1 << ds); const auto i0 = (x >> ds); const auto x0 = i0 << ds; const auto d1 = x - x0; assert(i0 <= (1 << kLog2ISineAngleScale) >> 2); return kISine[i0] + ((d1 * (kISine[i0 + 1] - kISine[i0]) + (b >> 1)) >> ds); } //--------------------------------------------------------------------------- inline int32_t icos0(const int32_t x, const int32_t log2Scale) { assert(x >= 0); assert(x <= (1 << (log2Scale - 2))); return isin0((1 << (log2Scale - 2)) - x, log2Scale); } //--------------------------------------------------------------------------- inline int32_t isin(int32_t x, const int32_t log2Scale) { const auto L = 1 << (log2Scale - 1); x = std::min(std::max(x, -L), L); assert(abs(x) <= (1 << (log2Scale - 1))); const auto Q0 = 1 << (log2Scale - 2); if (x >= Q0) { return isin0((1 << (log2Scale - 1)) - x, log2Scale); } else if (x >= 0) { return isin0(x, log2Scale); } else if (x >= -Q0) { return -isin0(-x, log2Scale); } else { return -isin0((1 << (log2Scale - 1)) + x, log2Scale); } } //--------------------------------------------------------------------------- inline int32_t icos(int32_t x, const int32_t log2Scale) { const auto Q0 = 1 << (log2Scale - 2); const auto ax = std::min(abs(x), (1 << (log2Scale - 1))); return ax <= Q0 ? icos0(ax, log2Scale) : -icos0((1 << (log2Scale - 1)) - ax, log2Scale); } //--------------------------------------------------------------------------- } /* namespace pcc */ #endif /* PCCMath_h */
22,361
25.653159
79
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PCCMisc.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <array> #include <cstddef> #include <cstdint> #include <iterator> #include <utility> #include <string> #include <vector> #if _MSC_VER # define DEPRECATED_MSVC __declspec(deprecated) # define DEPRECATED #else # define DEPRECATED_MSVC # define DEPRECATED __attribute__((deprecated)) #endif #if _MSC_VER && !defined(__attribute__) # define __attribute__(...) #endif namespace pcc { const uint32_t PCC_UNDEFINED_INDEX = -1; enum PCCEndianness { PCC_BIG_ENDIAN = 0, PCC_LITTLE_ENDIAN = 1 }; inline PCCEndianness PCCSystemEndianness() { uint32_t num = 1; return (*(reinterpret_cast<char*>(&num)) == 1) ? PCC_LITTLE_ENDIAN : PCC_BIG_ENDIAN; } //--------------------------------------------------------------------------- // Replace any occurence of %d with formatted number. The %d format // specifier may use the formatting conventions of snprintf(). std::string expandNum(const std::string& src, int num); //--------------------------------------------------------------------------- // Population count -- return the number of bits set in @x. // inline int popcnt(uint32_t x) { x = x - ((x >> 1) & 0x55555555u); x = (x & 0x33333333u) + ((x >> 2) & 0x33333333u); return ((x + (x >> 4) & 0xF0F0F0Fu) * 0x1010101u) >> 24; } //--------------------------------------------------------------------------- // Population count -- return the number of bits set in @x. // inline int popcnt(uint8_t x) { uint32_t val = x * 0x08040201u; val >>= 3; val &= 0x11111111u; val *= 0x11111111u; return val >> 28; } //--------------------------------------------------------------------------- // Test if population count is greater than 1. // Returns non-zero if true. // inline uint32_t popcntGt1(uint32_t x) { return x & (x - 1); } //--------------------------------------------------------------------------- // Round @x up to next power of two. // inline uint32_t ceilpow2(uint32_t x) { x--; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); return x + 1; } //--------------------------------------------------------------------------- // Round @x up to next power of two. // inline uint64_t ceilpow2(uint64_t x) { x--; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); x = x | (x >> 32); return x + 1; } //--------------------------------------------------------------------------- // Compute \left\floor \text{log}_2(x) \right\floor. // NB: ilog2(0) = -1. inline int ilog2(uint32_t x) { x = ceilpow2(x + 1) - 1; return popcnt(x) - 1; } //--------------------------------------------------------------------------- // Compute \left\floor \text{log}_2(x) \right\floor. // NB: ilog2(0) = -1. inline int ilog2(uint64_t x) { x = ceilpow2(x + 1) - 1; return popcnt(uint32_t(x >> 32)) + popcnt(uint32_t(x)) - 1; } //--------------------------------------------------------------------------- // Compute \left\ceil \text{log}_2(x) \right\ceil. // NB: ceillog2(0) = 32. inline int ceillog2(uint32_t x) { return ilog2(x - 1) + 1; } //--------------------------------------------------------------------------- // The number of bits required to represent x. // NB: x must be >= 0. // NB: numBits(0) = 1. inline int numBits(int x) { return std::max(0, ilog2(uint32_t(x))) + 1; } //--------------------------------------------------------------------------- // rotate left by n bits. // If n is negative, the effect is to rotate right. // NB: Signed types are treated as unsigned. template<typename T> T rotateLeft(T val, int n) { n &= sizeof(T) * 8 - 1; using unsigned_t = typename std::make_unsigned<T>::type; return (unsigned_t(val) << n) | (unsigned_t(val) >> (8 * sizeof(val) - n)); } //--------------------------------------------------------------------------- // rotate right by n bits. // If n is negative, the effect is to rotate left. // NB: Signed types are treated as unsigned. template<typename T> T rotateRight(T val, int n) { n &= sizeof(T) * 8 - 1; using unsigned_t = typename std::make_unsigned<T>::type; return (unsigned_t(val) >> n) | (unsigned_t(val) << (8 * sizeof(val) - n)); } //--------------------------------------------------------------------------- // Compute an approximation of \left\floor \sqrt{x} \right\floor uint32_t isqrt(uint64_t x) __attribute__((const)); //--------------------------------------------------------------------------- // Compute an approximation of reciprocal sqrt uint64_t irsqrt(uint64_t a64) __attribute__((const)); //--------------------------------------------------------------------------- // Compute an approximation of atan2 int iatan2(int y, int x); //--------------------------------------------------------------------------- // Decrement the @axis-th dimension of 3D morton code @x. // inline int64_t morton3dAxisDec(int64_t val, int axis) { const int64_t mask0 = 0x9249249249249249llu << axis; return ((val & mask0) - 1 & mask0) | (val & ~mask0); } //--------------------------------------------------------------------------- // add the three dimentional addresses @a + @b; // inline uint64_t morton3dAdd(uint64_t a, uint64_t b) { uint64_t mask = 0x9249249249249249llu; uint64_t val = 0; for (int i = 0; i < 3; i++) { val |= (a | ~mask) + (b & mask) & mask; mask <<= 1; } return val; } //--------------------------------------------------------------------------- // Sort the elements in range [@first, @last) using a counting sort. // // The value of each element is determined by the function // @value_of(RandomIt::value_type) and must be in the range [0, Radix). // // A supplied output array of @counts represents the histogram of values, // and may be used to calculate the output position of each value span. // // NB: This is an in-place implementation and is not a stable sort. template<class RandomIt, class ValueOp, std::size_t Radix> void countingSort( RandomIt first, RandomIt last, std::array<int, Radix>& counts, ValueOp value_of) { // step 1: count each radix for (auto it = first; it != last; ++it) { counts[value_of(*it)]++; } // step 2: determine the output offsets std::array<RandomIt, Radix> ptrs = {{first}}; for (int i = 1; i < Radix; i++) { ptrs[i] = std::next(ptrs[i - 1], counts[i - 1]); } // step 3: re-order, completing each radix in turn. RandomIt ptr_orig_last = first; for (int i = 0; i < Radix; i++) { std::advance(ptr_orig_last, counts[i]); while (ptrs[i] != ptr_orig_last) { int radix = value_of(*ptrs[i]); std::iter_swap(ptrs[i], ptrs[radix]); ++ptrs[radix]; } } } //--------------------------------------------------------------------------- struct NoOp { template<typename... Args> void operator()(Args...) {} }; //--------------------------------------------------------------------------- template<typename It, typename ValueOp, typename AccumOp> void radixSort8WithAccum(int maxValLog2, It begin, It end, ValueOp op, AccumOp acc) { std::array<int, 8> counts = {}; countingSort(begin, end, counts, [=](decltype(*begin)& it) { return op(maxValLog2, it); }); acc(maxValLog2, counts); if (--maxValLog2 < 0) return; auto childBegin = begin; for (int i = 0; i < counts.size(); i++) { if (!counts[i]) continue; auto childEnd = std::next(childBegin, counts[i]); radixSort8WithAccum(maxValLog2, childBegin, childEnd, op, acc); childBegin = childEnd; } } //--------------------------------------------------------------------------- template<typename It, typename ValueOp> void radixSort8(int maxValLog2, It begin, It end, ValueOp op) { radixSort8WithAccum(maxValLog2, begin, end, op, NoOp()); } //============================================================================ // A wrapper to reverse the iteration order of a range based for loop template<typename T> struct fwd_is_reverse_iterator { T& obj; }; template<typename T> auto begin(fwd_is_reverse_iterator<T> w) -> decltype(w.obj.rbegin()) { return w.obj.rbegin(); } template<typename T> auto end(fwd_is_reverse_iterator<T> w) -> decltype(w.obj.rend()) { return w.obj.rend(); } template<typename T> fwd_is_reverse_iterator<T> inReverse(T&& obj) { return {obj}; } //---------------------------------------------------------------------------- } // namespace pcc
10,262
26.368
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PCCPointSet.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef PCCPointSet_h #define PCCPointSet_h #include <assert.h> #include <algorithm> #include <cmath> #include <cstddef> #include <fstream> #include <iomanip> #include <iostream> #include <iterator> #include <string> #include <vector> #include "PCCMath.h" #include "PCCMisc.h" namespace pcc { //============================================================================ // The type used for internally representing attribute data typedef uint16_t attr_t; // The type used for internally representing positions typedef Vec3<int32_t> point_t; //============================================================================ class PCCPointSet3 { public: typedef point_t PointType; //========================================================================= // proxy object for use with iterator, allowing handling of PCCPointSet3's // structure-of-arrays as a single array. class iterator; class Proxy { friend class iterator; PCCPointSet3* parent_; size_t idx_; public: //----------------------------------------------------------------------- Proxy() : parent_(nullptr), idx_() {} Proxy(PCCPointSet3* parent, size_t idx) : parent_(parent), idx_(idx) {} //----------------------------------------------------------------------- PointType operator*() const { return (*parent_)[idx_]; } PointType& operator*() { return (*parent_)[idx_]; } //----------------------------------------------------------------------- // Swap the position of the current proxied point (including attributes) // with that of @other in the same PointSet. void swap(const Proxy& other) const { assert(parent_ == other.parent_); parent_->swapPoints(idx_, other.idx_); } //----------------------------------------------------------------------- }; //========================================================================= // iterator for use with stl algorithms class iterator { private: Proxy p_; public: typedef std::random_access_iterator_tag iterator_category; typedef const Proxy value_type; typedef std::ptrdiff_t difference_type; typedef const Proxy* pointer; typedef const Proxy& reference; //----------------------------------------------------------------------- iterator() = default; iterator(const iterator&) = default; //----------------------------------------------------------------------- explicit iterator(PCCPointSet3* parent) : p_{parent, 0} {} explicit iterator(PCCPointSet3* parent, size_t idx) : p_{parent, idx} {} //----------------------------------------------------------------------- // :: Iterator reference operator*() const { return p_; } //----------------------------------------------------------------------- iterator& operator++() { p_.idx_++; return *this; } //----------------------------------------------------------------------- // :: ForwardIterator iterator operator++(int) { iterator retval = *this; ++(*this); return retval; } //----------------------------------------------------------------------- pointer operator->() const { return &p_; } //----------------------------------------------------------------------- bool operator==(const iterator& other) const { return p_.idx_ == other.p_.idx_; } //----------------------------------------------------------------------- bool operator!=(const iterator& other) const { return !(*this == other); } //----------------------------------------------------------------------- // :: BidirectionalIterator iterator& operator--() { p_.idx_--; return *this; } //----------------------------------------------------------------------- iterator operator--(int) { iterator retval = *this; --(*this); return retval; } //----------------------------------------------------------------------- // :: RandomAccessIterator value_type operator[](difference_type n) { return Proxy{p_.parent_, p_.idx_ + n}; } //----------------------------------------------------------------------- iterator& operator+=(difference_type n) { p_.idx_ += n; return *this; } //----------------------------------------------------------------------- iterator operator+(difference_type n) const { iterator it(*this); it += n; return it; } //----------------------------------------------------------------------- iterator& operator-=(difference_type n) { p_.idx_ -= n; return *this; } //----------------------------------------------------------------------- iterator operator-(difference_type n) const { iterator it(*this); it -= n; return it; } //----------------------------------------------------------------------- difference_type operator-(const iterator& other) const { return p_.idx_ - other.p_.idx_; } //----------------------------------------------------------------------- }; //========================================================================= PCCPointSet3() { withColors = false; withReflectances = false; withFrameIndex = false; withLaserAngles = false; } PCCPointSet3(const PCCPointSet3&) = default; PCCPointSet3& operator=(const PCCPointSet3& rhs) = default; ~PCCPointSet3() = default; void swap(PCCPointSet3& other) { using std::swap; swap(positions, other.positions); swap(colors, other.colors); swap(reflectances, other.reflectances); swap(frameidx, other.frameidx); swap(laserAngles, other.laserAngles); swap(withColors, other.withColors); swap(withReflectances, other.withReflectances); swap(withFrameIndex, other.withFrameIndex); swap(withLaserAngles, other.withLaserAngles); } PointType operator[](const size_t index) const { assert(index < positions.size()); return positions[index]; } PointType& operator[](const size_t index) { assert(index < positions.size()); return positions[index]; } void swapPoints(std::vector<PointType>& other) { positions.swap(other); } Vec3<attr_t> getColor(const size_t index) const { assert(index < colors.size() && withColors); return colors[index]; } Vec3<attr_t>& getColor(const size_t index) { assert(index < colors.size() && withColors); return colors[index]; } void setColor(const size_t index, const Vec3<attr_t> color) { assert(index < colors.size() && withColors); colors[index] = color; } attr_t getReflectance(const size_t index) const { assert(index < reflectances.size() && withReflectances); return reflectances[index]; } attr_t& getReflectance(const size_t index) { assert(index < reflectances.size() && withReflectances); return reflectances[index]; } void setReflectance(const size_t index, const attr_t reflectance) { assert(index < reflectances.size() && withReflectances); reflectances[index] = reflectance; } bool hasReflectances() const { return withReflectances; } void addReflectances() { withReflectances = true; resize(getPointCount()); } void removeReflectances() { withReflectances = false; reflectances.resize(0); } uint8_t getFrameIndex(const size_t index) const { assert(index < frameidx.size() && withFrameIndex); return frameidx[index]; } uint8_t& getFrameIndex(const size_t index) { assert(index < frameidx.size() && withFrameIndex); return frameidx[index]; } void setFrameIndex(const size_t index, const uint8_t frameindex) { assert(index < frameidx.size() && withFrameIndex); frameidx[index] = frameindex; } bool hasFrameIndex() const { return withFrameIndex; } void addFrameIndex() { withFrameIndex = true; resize(getPointCount()); } void removeFrameIndex() { withFrameIndex = false; frameidx.resize(0); } int getLaserAngle(const size_t index) const { assert(index < laserAngles.size() && withLaserAngles); return laserAngles[index]; } int& getLaserAngle(const size_t index) { assert(index < laserAngles.size() && withLaserAngles); return laserAngles[index]; } void setLaserAngle(const size_t index, const int angle) { assert(index < laserAngles.size() && withLaserAngles); laserAngles[index] = angle; } bool hasLaserAngles() const { return withLaserAngles; } void addLaserAngles() { withLaserAngles = true; resize(getPointCount()); } void removeLaserAngles() { withLaserAngles = false; laserAngles.resize(0); } bool hasColors() const { return withColors; } void addColors() { withColors = true; resize(getPointCount()); } void removeColors() { withColors = false; colors.resize(0); } void addRemoveAttributes(bool withColors, bool withReflectances) { if (withColors) addColors(); else removeColors(); if (withReflectances) addReflectances(); else removeReflectances(); } void addRemoveAttributes(const PCCPointSet3& ref) { ref.hasColors() ? addColors() : removeColors(); ref.hasReflectances() ? addReflectances() : removeReflectances(); ref.hasLaserAngles() ? addLaserAngles() : removeLaserAngles(); } size_t getPointCount() const { return positions.size(); } void resize(const size_t size) { positions.resize(size); if (hasColors()) { colors.resize(size); } if (hasReflectances()) { reflectances.resize(size); } if (hasFrameIndex()) { frameidx.resize(size); } if (hasLaserAngles()) { laserAngles.resize(size); } } void reserve(const size_t size) { positions.reserve(size); if (hasColors()) { colors.reserve(size); } if (hasReflectances()) { reflectances.reserve(size); } if (hasFrameIndex()) { frameidx.reserve(size); } if (hasLaserAngles()) { laserAngles.reserve(size); } } void clear() { positions.clear(); colors.clear(); reflectances.clear(); frameidx.clear(); laserAngles.clear(); } size_t removeDuplicatePointInQuantizedPoint(int minGeomNodeSizeLog2) { for (int i = 0; i < positions.size(); i++) { PointType newPoint = positions[i]; if (minGeomNodeSizeLog2 > 0) { uint32_t mask = ((uint32_t)-1) << minGeomNodeSizeLog2; positions[i].x() = ((int32_t)(positions[i].x()) & mask); positions[i].y() = ((int32_t)(positions[i].y()) & mask); positions[i].z() = ((int32_t)(positions[i].z()) & mask); } } positions.erase( std::unique(positions.begin(), positions.end()), positions.end()); return positions.size(); } void append(const PCCPointSet3& src) { if (!getPointCount()) addRemoveAttributes(src); int dstEnd = positions.size(); int srcSize = src.positions.size(); resize(dstEnd + srcSize); std::copy( src.positions.begin(), src.positions.end(), std::next(positions.begin(), dstEnd)); if (hasColors() && src.hasColors()) std::copy( src.colors.begin(), src.colors.end(), std::next(colors.begin(), dstEnd)); if (hasReflectances() && src.hasReflectances()) std::copy( src.reflectances.begin(), src.reflectances.end(), std::next(reflectances.begin(), dstEnd)); if (hasLaserAngles()) std::copy( src.laserAngles.begin(), src.laserAngles.end(), std::next(laserAngles.begin(), dstEnd)); } void swapPoints(const size_t index1, const size_t index2) { assert(index1 < getPointCount()); assert(index2 < getPointCount()); std::swap((*this)[index1], (*this)[index2]); if (hasColors()) { std::swap(getColor(index1), getColor(index2)); } if (hasReflectances()) { std::swap(getReflectance(index1), getReflectance(index2)); } if (hasLaserAngles()) { std::swap(getLaserAngle(index1), getLaserAngle(index2)); } } Box3<int32_t> computeBoundingBox() const { Box3<int32_t> bbox( std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::lowest()); const size_t pointCount = getPointCount(); for (size_t i = 0; i < pointCount; ++i) { const auto& pt = (*this)[i]; for (int k = 0; k < 3; ++k) { if (pt[k] > bbox.max[k]) { bbox.max[k] = pt[k]; } if (pt[k] < bbox.min[k]) { bbox.min[k] = pt[k]; } } } return bbox; } //-------------------------------------------------------------------------- // Determine the bounding box of the set of points given by the indicies // given by iterating over [begin, end) template<typename ForwardIt> Box3<int32_t> computeBoundingBox(ForwardIt begin, ForwardIt end) const { Box3<int32_t> bbox( std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::lowest()); for (auto it = begin; it != end; ++it) { int i = *it; const auto& pt = (*this)[i]; for (int k = 0; k < 3; ++k) { if (pt[k] > bbox.max[k]) { bbox.max[k] = pt[k]; } if (pt[k] < bbox.min[k]) { bbox.min[k] = pt[k]; } } } return bbox; } //-------------------------------------------------------------------------- private: std::vector<PointType> positions; std::vector<Vec3<attr_t>> colors; std::vector<attr_t> reflectances; std::vector<uint8_t> frameidx; bool withColors; bool withReflectances; bool withFrameIndex; std::vector<int> laserAngles; bool withLaserAngles; }; //=========================================================================== // Swap the position of two points (including attributes) in the PointSet // as referenced by the proxies a and b. inline void swap(const PCCPointSet3::Proxy& a, const PCCPointSet3::Proxy& b) { a.swap(b); } //--------------------------------------------------------------------------- // Swap two point clouds inline void swap(PCCPointSet3& a, PCCPointSet3& b) { a.swap(b); } //--------------------------------------------------------------------------- } /* namespace pcc */ #endif /* PCCPointSet_h */
16,227
25.956811
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PCCTMC3Common.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef PCCTMC3Common_h #define PCCTMC3Common_h #include "PCCMath.h" #include "PCCPointSet.h" #include "constants.h" #include "hls.h" #include "nanoflann.hpp" #include <cstdint> #include <cstddef> #include <memory> #include <vector> namespace pcc { //============================================================================ // Hierachichal bounding boxes. // Insert points (into the base layer), then generate the hierarchy via update. template<int32_t BucketSizeLog2, int32_t LevelCount> class BoxHierarchy { public: void resize(const int32_t pointCount) { constexpr auto BucketSize = 1 << BucketSizeLog2; constexpr auto BucketSizeMinus1 = BucketSize - 1; int32_t count = pointCount; for (int i = 0; i < LevelCount; ++i) { count = (count + BucketSizeMinus1) >> BucketSizeLog2; _bBoxes[i].clear(); _bBoxes[i].resize(count, Box3<int32_t>(INT32_MAX, INT32_MIN)); } } void insert(const Vec3<int32_t>& point, const int32_t index) { const auto bindex = (index >> BucketSizeLog2); assert(bindex >= 0 && bindex < _bBoxes[0].size()); _bBoxes[0][bindex].insert(point); } void update() { constexpr auto LevelCountMinus1 = LevelCount - 1; for (int i = 0; i < LevelCountMinus1; ++i) { for (int32_t j = 0, count = int32_t(_bBoxes[i].size()); j < count; ++j) { _bBoxes[i + 1][j >> BucketSizeLog2].merge(_bBoxes[i][j]); } } } const Box3<int32_t>& bBox(int32_t bindex, int32_t level) const { return _bBoxes[level][bindex]; } int32_t bucketSizeLog2(int32_t level = 0) const { return BucketSizeLog2 * (1 + level); } int32_t bucketSize(int32_t level = 0) const { return 1 << bucketSizeLog2(level); } private: std::vector<Box3<int32_t>> _bBoxes[LevelCount]; }; //============================================================================ class MortonIndexMap3d { public: struct Range { int32_t start; int32_t end; }; void resize(const int32_t cubeSizeLog2) { _cubeSizeLog2 = cubeSizeLog2; _cubeSize = 1 << cubeSizeLog2; _bufferSize = 1 << (3 * cubeSizeLog2); _mask = _bufferSize - 1; _buffer.reset(new Range[_bufferSize]); } void reserve(const uint32_t sz) { _updates.reserve(sz); } int cubeSize() const { return _cubeSize; } int cubeSizeLog2() const { return _cubeSizeLog2; } void init() { for (int32_t i = 0; i < _bufferSize; ++i) { _buffer[i] = {-1, -1}; } _updates.resize(0); } void clearUpdates() { for (const auto index : _updates) { _buffer[index] = {-1, -1}; } _updates.resize(0); } void set(const int64_t mortonCode, const int32_t index) { const int64_t mortonAddr = mortonCode & _mask; auto& unit = _buffer[mortonAddr]; if (unit.start == -1) { unit.start = index; } unit.end = index + 1; _updates.push_back(mortonAddr); } Range get(const int64_t mortonCode) const { return _buffer[mortonCode & _mask]; } private: int32_t _cubeSize = 0; int32_t _cubeSizeLog2 = 0; int32_t _bufferSize = 0; int64_t _mask = 0; std::unique_ptr<Range[]> _buffer; // A list of indexes in _buffer that are dirty std::vector<int32_t> _updates; }; //============================================================================ struct MortonCodeWithIndex { int64_t mortonCode; // The position used to generate the mortonCode Vec3<int32_t> position; int32_t index; bool operator<(const MortonCodeWithIndex& rhs) const { // NB: index used to maintain stable sort if (mortonCode == rhs.mortonCode) return index < rhs.index; return mortonCode < rhs.mortonCode; } }; //--------------------------------------------------------------------------- struct PCCNeighborInfo { uint64_t weight; uint32_t predictorIndex; bool operator<(const PCCNeighborInfo& rhs) const { return weight < rhs.weight; } }; //--------------------------------------------------------------------------- struct PCCPredictor { uint32_t neighborCount; PCCNeighborInfo neighbors[kAttributePredictionMaxNeighbourCount]; int8_t predMode; Vec3<attr_t> predictColor( const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexes) const { Vec3<int64_t> predicted(0); if (predMode > neighborCount) { /* nop */ } else if (predMode > 0) { const Vec3<attr_t> color = pointCloud.getColor(indexes[neighbors[predMode - 1].predictorIndex]); for (size_t k = 0; k < 3; ++k) { predicted[k] += color[k]; } } else { for (size_t i = 0; i < neighborCount; ++i) { const Vec3<attr_t> color = pointCloud.getColor(indexes[neighbors[i].predictorIndex]); const uint32_t w = neighbors[i].weight; for (size_t k = 0; k < 3; ++k) { predicted[k] += w * color[k]; } } for (uint32_t k = 0; k < 3; ++k) { predicted[k] = divExp2RoundHalfInf(predicted[k], kFixedPointWeightShift); } } return Vec3<attr_t>(predicted[0], predicted[1], predicted[2]); } int64_t predictReflectance( const PCCPointSet3& pointCloud, const std::vector<uint32_t>& indexes) const { int64_t predicted(0); if (predMode > neighborCount) { /* nop */ } else if (predMode > 0) { predicted = pointCloud.getReflectance( indexes[neighbors[predMode - 1].predictorIndex]); } else { for (size_t i = 0; i < neighborCount; ++i) { predicted += neighbors[i].weight * pointCloud.getReflectance(indexes[neighbors[i].predictorIndex]); } predicted = divExp2RoundHalfInf(predicted, kFixedPointWeightShift); } return predicted; } void computeWeights() { const uint32_t shift = (1 << kFixedPointWeightShift); int32_t n = 0; while ((neighbors[0].weight >> n) >= shift) { ++n; } if (n > 0) { for (size_t i = 0; i < neighborCount; ++i) { neighbors[i].weight = (neighbors[i].weight + (1ull << (n - 1))) >> n; } } while (neighborCount > 1) { if ( neighbors[neighborCount - 1].weight >= (neighbors[0].weight << kFixedPointWeightShift)) { --neighborCount; } else { break; } } if (neighborCount <= 1) { neighbors[0].weight = shift; } else if (neighborCount == 2) { const uint64_t d0 = neighbors[0].weight; const uint64_t d1 = neighbors[1].weight; const uint64_t sum = d1 + d0; const uint64_t w1 = divApprox(d0, sum, kFixedPointWeightShift); const uint64_t w0 = shift - w1; neighbors[0].weight = uint32_t(w0); neighbors[1].weight = uint32_t(w1); } else { neighborCount = 3; const uint64_t d0 = neighbors[0].weight; const uint64_t d1 = neighbors[1].weight; const uint64_t d2 = neighbors[2].weight; const uint64_t sum = d1 * d2 + d0 * d2 + d0 * d1; const uint64_t w2 = divApprox(d0 * d1, sum, kFixedPointWeightShift); const uint64_t w1 = divApprox(d0 * d2, sum, kFixedPointWeightShift); const uint64_t w0 = shift - (w1 + w2); neighbors[0].weight = uint32_t(w0); neighbors[1].weight = uint32_t(w1); neighbors[2].weight = uint32_t(w2); } } void blendWeights(const PCCPointSet3& cloud, const std::vector<uint32_t>& indexes) { int w0 = neighbors[0].weight; int w1 = neighbors[1].weight; int w2 = neighbors[2].weight; if (neighborCount != 3) return; auto neigh0Pos = cloud[indexes[neighbors[0].predictorIndex]]; auto neigh1Pos = cloud[indexes[neighbors[1].predictorIndex]]; auto neigh2Pos = cloud[indexes[neighbors[2].predictorIndex]]; constexpr bool variant = 1; const auto d = variant ? 10 : 8; const auto bb = variant ? 1 : 4; const auto cc = variant ? 5 : 4; auto dist01 = (neigh0Pos - neigh1Pos).getNorm2<int64_t>(); auto dist02 = (neigh0Pos - neigh2Pos).getNorm2<int64_t>(); auto& dist10 = dist01; auto dist12 = (neigh1Pos - neigh2Pos).getNorm2<int64_t>(); auto& dist20 = dist02; auto& dist21 = dist12; auto b1 = dist01 <= dist02 ? bb : cc; auto b2 = dist10 <= dist12 ? cc : bb; auto b3 = dist20 <= dist21 ? bb : cc; Vec3<int> w; w[0] = (w0 * d + w1 * (16 - d - b2) + w2 * b3) >> 4; w[1] = (w0 * b1 + w1 * d + w2 * (16 - d - b3)) >> 4; w[2] = 256 - w[0] - w[1]; for (int i = 0; i < 3; i++) neighbors[i].weight = w[i]; } void pruneDistanceGt(uint64_t maxDistance) { for (int i = 1; i < neighborCount; i++) { if (neighbors[i].weight > maxDistance) { neighborCount = i; break; } } } void init() { neighborCount = 0; memset( neighbors, 0, sizeof(PCCNeighborInfo) * kAttributePredictionMaxNeighbourCount); } }; //--------------------------------------------------------------------------- template<typename T> void PCCLiftPredict( const std::vector<PCCPredictor>& predictors, const size_t startIndex, const size_t endIndex, const bool direct, std::vector<T>& attributes) { const size_t predictorCount = endIndex - startIndex; for (size_t index = 0; index < predictorCount; ++index) { const size_t predictorIndex = predictorCount - index - 1 + startIndex; const auto& predictor = predictors[predictorIndex]; auto& attribute = attributes[predictorIndex]; T predicted(T(0)); for (size_t i = 0; i < predictor.neighborCount; ++i) { const size_t neighborPredIndex = predictor.neighbors[i].predictorIndex; const uint32_t weight = predictor.neighbors[i].weight; assert(neighborPredIndex < startIndex); predicted += weight * attributes[neighborPredIndex]; } predicted = divExp2RoundHalfInf(predicted, kFixedPointWeightShift); if (direct) { attribute -= predicted; } else { attribute += predicted; } } } //--------------------------------------------------------------------------- template<typename T> void PCCLiftUpdate( const std::vector<PCCPredictor>& predictors, const std::vector<uint64_t>& quantizationWeights, const size_t startIndex, const size_t endIndex, const bool direct, std::vector<T>& attributes) { std::vector<uint64_t> updateWeights; updateWeights.resize(startIndex, uint64_t(0)); std::vector<T> updates; updates.resize(startIndex); for (size_t index = 0; index < startIndex; ++index) { updates[index] = int64_t(0); } const size_t predictorCount = endIndex - startIndex; for (size_t index = 0; index < predictorCount; ++index) { const size_t predictorIndex = predictorCount - index - 1 + startIndex; const auto& predictor = predictors[predictorIndex]; const auto currentQuantWeight = quantizationWeights[predictorIndex]; for (size_t i = 0; i < predictor.neighborCount; ++i) { const size_t neighborPredIndex = predictor.neighbors[i].predictorIndex; const auto weight = divExp2RoundHalfInf( predictor.neighbors[i].weight * currentQuantWeight, kFixedPointWeightShift); assert(neighborPredIndex < startIndex); updateWeights[neighborPredIndex] += weight; updates[neighborPredIndex] += weight * attributes[predictorIndex]; } } for (size_t predictorIndex = 0; predictorIndex < startIndex; ++predictorIndex) { const uint32_t sumWeights = updateWeights[predictorIndex]; if (sumWeights) { auto& update = updates[predictorIndex]; update = divApprox(update, sumWeights, 0); auto& attribute = attributes[predictorIndex]; if (direct) { attribute += update; } else { attribute -= update; } } } } //--------------------------------------------------------------------------- inline void PCCComputeQuantizationWeights( const std::vector<PCCPredictor>& predictors, std::vector<uint64_t>& quantizationWeights) { const size_t pointCount = predictors.size(); quantizationWeights.resize(pointCount); for (size_t i = 0; i < pointCount; ++i) { quantizationWeights[i] = (1 << kFixedPointWeightShift); } for (size_t i = 0; i < pointCount; ++i) { const size_t predictorIndex = pointCount - i - 1; const auto& predictor = predictors[predictorIndex]; const auto currentQuantWeight = quantizationWeights[predictorIndex]; for (size_t j = 0; j < predictor.neighborCount; ++j) { const size_t neighborPredIndex = predictor.neighbors[j].predictorIndex; const auto weight = predictor.neighbors[j].weight; auto& neighborQuantWeight = quantizationWeights[neighborPredIndex]; neighborQuantWeight += divExp2RoundHalfInf( weight * currentQuantWeight, kFixedPointWeightShift); } } } //--------------------------------------------------------------------------- inline void computeQuantizationWeightsScalable( const std::vector<PCCPredictor>& predictors, const std::vector<uint32_t>& numberOfPointsPerLOD, size_t numPoints, int32_t minGeomNodeSizeLog2, std::vector<uint64_t>& quantizationWeights) { const size_t pointCount = predictors.size(); quantizationWeights.resize(pointCount); for (size_t i = 0; i < pointCount; ++i) { quantizationWeights[i] = (1 << kFixedPointWeightShift); } const size_t lodCount = numberOfPointsPerLOD.size(); for (size_t lodIndex = 0; lodIndex < lodCount; ++lodIndex) { const size_t startIndex = (lodIndex == 0) ? 0 : numberOfPointsPerLOD[lodIndex - 1]; const size_t endIndex = numberOfPointsPerLOD[lodIndex]; const uint64_t currentQuantWeight = (numPoints / numberOfPointsPerLOD[lodIndex]) << kFixedPointWeightShift; const size_t predictorCount = endIndex - startIndex; for (size_t index = 0; index < predictorCount; ++index) { const size_t predictorIndex = index + startIndex; if (!minGeomNodeSizeLog2 && (lodIndex == lodCount - 1)) { quantizationWeights[predictorIndex] = (1 << kFixedPointWeightShift); } else { quantizationWeights[predictorIndex] = currentQuantWeight; } } } } //--------------------------------------------------------------------------- inline void computeQuantizationWeights( const std::vector<PCCPredictor>& predictors, std::vector<int64_t>& quantizationWeights, Vec3<int32_t> neighWeight) { const size_t pointCount = predictors.size(); quantizationWeights.resize(pointCount); for (size_t i = 0; i < pointCount; ++i) { quantizationWeights[i] = (1 << kFixedPointWeightShift); } for (size_t i = 0; i < pointCount; ++i) { const size_t predictorIndex = pointCount - i - 1; const auto& predictor = predictors[predictorIndex]; const auto currentQuantWeight = quantizationWeights[predictorIndex]; for (size_t j = 0; j < predictor.neighborCount; ++j) { const size_t neighborPredIndex = predictor.neighbors[j].predictorIndex; auto& neighborQuantWeight = quantizationWeights[neighborPredIndex]; neighborQuantWeight += divExp2RoundHalfInf( neighWeight[j] * currentQuantWeight, kFixedPointWeightShift); } } } //--------------------------------------------------------------------------- inline point_t clacIntermediatePosition( bool enabled, int32_t nodeSizeLog2, const point_t& point) { if (!enabled || !nodeSizeLog2) return point; uint32_t mask = (uint32_t(-1)) << nodeSizeLog2; int32_t centerX = point.x() & mask; int32_t centerY = point.y() & mask; int32_t centerZ = point.z() & mask; point_t newPoint{centerX, centerY, centerZ}; return newPoint; } //--------------------------------------------------------------------------- inline void updateNearestNeigh( const Vec3<int32_t>& point0, const Vec3<int32_t>& point1, int32_t index, int32_t (&localIndexes)[3], int64_t (&minDistances)[3]) { auto d = (point0 - point1).getNorm1(); if (d >= minDistances[2]) { // do nothing } else if (d < minDistances[0]) { minDistances[2] = minDistances[1]; minDistances[1] = minDistances[0]; minDistances[0] = d; localIndexes[2] = localIndexes[1]; localIndexes[1] = localIndexes[0]; localIndexes[0] = index; } else if (d < minDistances[1]) { minDistances[2] = minDistances[1]; minDistances[1] = d; localIndexes[2] = localIndexes[1]; localIndexes[1] = index; } else { minDistances[2] = d; localIndexes[2] = index; } } //--------------------------------------------------------------------------- inline void updateNearestNeighWithCheck( const Vec3<int32_t>& point0, const Vec3<int32_t>& point1, const int32_t index, int32_t (&localIndexes)[3], int64_t (&minDistances)[3]) { if ( index == localIndexes[0] || index == localIndexes[1] || index == localIndexes[2]) return; updateNearestNeigh(point0, point1, index, localIndexes, minDistances); } //--------------------------------------------------------------------------- inline void computeNearestNeighbors( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const std::vector<MortonCodeWithIndex>& packedVoxel, const std::vector<uint32_t>& retained, int32_t startIndex, int32_t endIndex, int32_t lodIndex, std::vector<uint32_t>& indexes, std::vector<PCCPredictor>& predictors, std::vector<uint32_t>& pointIndexToPredictorIndex, int32_t& predIndex, MortonIndexMap3d& atlas) { constexpr auto searchRangeNear = 2; constexpr auto bucketSizeLog2 = 5; constexpr auto bucketSize = 1 << bucketSizeLog2; constexpr auto bucketSizeMinus1 = bucketSize - 1; constexpr auto levelCount = 3; const int32_t shiftBits = aps.scalable_lifting_enabled_flag ? 1 + lodIndex : 1 + aps.dist2 + abh.attr_dist2_delta + lodIndex; const int32_t shiftBits3 = 3 * shiftBits; const int32_t log2CubeSize = atlas.cubeSizeLog2(); const int32_t atlasBits = 3 * log2CubeSize; // NB: when the atlas boundary is greater than 2^63, all points belong // to a single atlas. The clipping is necessary to avoid undefined // behaviour of shifts greater than or equal to the word size. const int32_t atlasBoundaryBit = std::min(63, shiftBits3 + atlasBits); const int32_t retainedSize = retained.size(); const int32_t indexesSize = endIndex - startIndex; const auto rangeInterLod = aps.inter_lod_search_range; const auto rangeIntraLod = aps.intra_lod_search_range; static const uint8_t kNeighOffset[27] = { 7, // { 0, 0, 0} 0 3, // {-1, 0, 0} 1 5, // { 0, -1, 0} 2 6, // { 0, 0, -1} 3 35, // { 1, 0, 0} 4 21, // { 0, 1, 0} 5 14, // { 0, 0, 1} 6 28, // { 0, 1, 1} 7 42, // { 1, 0, 1} 8 49, // { 1, 1, 0} 9 12, // { 0, -1, 1} 10 10, // {-1, 0, 1} 11 17, // {-1, 1, 0} 12 20, // { 0, 1, -1} 13 34, // { 1, 0, -1} 14 33, // { 1, -1, 0} 15 4, // { 0, -1, -1} 16 2, // {-1, 0, -1} 17 1, // {-1, -1, 0} 18 56, // { 1, 1, 1} 19 24, // {-1, 1, 1} 20 40, // { 1, -1, 1} 21 48, // { 1, 1, -1} 22 32, // { 1, -1, -1} 23 16, // {-1, 1, -1} 24 8, // {-1, -1, 1} 25 0 // {-1, -1, -1} 26 }; // The point positions biased by lodNieghBias // todo(df): preserve this std::vector<point_t> biasedPos; biasedPos.reserve(packedVoxel.size()); for (const auto& src : packedVoxel) { auto point = clacIntermediatePosition( aps.scalable_lifting_enabled_flag, lodIndex, src.position); biasedPos.push_back(times(point, aps.lodNeighBias)); } atlas.reserve(retainedSize); std::vector<int32_t> neighborIndexes; neighborIndexes.reserve(64); BoxHierarchy<bucketSizeLog2, levelCount> hBBoxes; hBBoxes.resize(retainedSize); for (int32_t i = 0, b = 0; i < retainedSize; ++b) { hBBoxes.insert(biasedPos[retained[i]], i); ++i; for (int32_t k = 1; k < bucketSize && i < retainedSize; ++k, ++i) { hBBoxes.insert(biasedPos[retained[i]], i); } } hBBoxes.update(); BoxHierarchy<bucketSizeLog2, levelCount> hIntraBBoxes; if (lodIndex >= aps.intra_lod_prediction_skip_layers) { hIntraBBoxes.resize(indexesSize); for (int32_t i = startIndex, b = 0; i < endIndex; ++b) { hIntraBBoxes.insert(biasedPos[indexes[i]], i - startIndex); ++i; for (int32_t k = 1; k < bucketSize && i < endIndex; ++k, ++i) { hIntraBBoxes.insert(biasedPos[indexes[i]], i - startIndex); } } hIntraBBoxes.update(); } const auto bucketSize0Log2 = hBBoxes.bucketSizeLog2(0); const auto bucketSize1Log2 = hBBoxes.bucketSizeLog2(1); const auto bucketSize2Log2 = hBBoxes.bucketSizeLog2(2); int64_t curAtlasId = -1; int64_t lastMortonCodeShift3 = -1; int64_t cubeIndex = 0; for (int32_t i = startIndex, j = 0; i < endIndex; ++i) { int32_t localIndexes[3] = {-1, -1, -1}; int64_t minDistances[3] = {std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()}; const int32_t index = indexes[i]; const auto& pv = packedVoxel[index]; const int64_t mortonCode = pv.mortonCode; const int64_t pointAtlasId = mortonCode >> atlasBoundaryBit; const int64_t mortonCodeShiftBits3 = mortonCode >> shiftBits3; const int32_t pointIndex = pv.index; const auto bpoint = biasedPos[index]; indexes[i] = pointIndex; auto& predictor = predictors[--predIndex]; pointIndexToPredictorIndex[pointIndex] = predIndex; if (retainedSize) { while (j < retainedSize - 1 && mortonCode >= packedVoxel[retained[j]].mortonCode) { ++j; } if (curAtlasId != pointAtlasId) { atlas.clearUpdates(); curAtlasId = pointAtlasId; while ( cubeIndex < retainedSize && (packedVoxel[retained[cubeIndex]].mortonCode >> atlasBoundaryBit) == curAtlasId) { atlas.set( packedVoxel[retained[cubeIndex]].mortonCode >> shiftBits3, cubeIndex); ++cubeIndex; } } if (lastMortonCodeShift3 != mortonCodeShiftBits3) { lastMortonCodeShift3 = mortonCodeShiftBits3; const auto basePosition = morton3dAdd(mortonCodeShiftBits3, -1ll); neighborIndexes.resize(0); for (int32_t n = 0; n < 27; ++n) { const auto neighbMortonCode = morton3dAdd(basePosition, kNeighOffset[n]); if ((neighbMortonCode >> atlasBits) != curAtlasId) { continue; } const auto range = atlas.get(neighbMortonCode); for (int32_t k = range.start; k < range.end; ++k) { neighborIndexes.push_back(k); } } } for (const auto k : neighborIndexes) { updateNearestNeigh( bpoint, biasedPos[retained[k]], k, localIndexes, minDistances); } if (localIndexes[2] == -1) { const auto center = localIndexes[0] == -1 ? j : localIndexes[0]; const auto k0 = std::max(0, center - rangeInterLod); const auto k1 = std::min(retainedSize - 1, center + rangeInterLod); updateNearestNeighWithCheck( bpoint, biasedPos[retained[center]], center, localIndexes, minDistances); for (int32_t n = 1; n <= searchRangeNear; ++n) { const int32_t kp = center + n; if (kp <= k1) { updateNearestNeighWithCheck( bpoint, biasedPos[retained[kp]], kp, localIndexes, minDistances); } const int32_t kn = center - n; if (kn >= k0) { updateNearestNeighWithCheck( bpoint, biasedPos[retained[kn]], kn, localIndexes, minDistances); } } const int32_t p1 = std::min(retainedSize - 1, center + searchRangeNear + 1); const int32_t p0 = std::max(0, center - searchRangeNear - 1); // search p1...k1 const int32_t b21 = k1 >> bucketSize2Log2; const int32_t b20 = p1 >> bucketSize2Log2; const int32_t b11 = k1 >> bucketSize1Log2; const int32_t b10 = p1 >> bucketSize1Log2; const int32_t b01 = k1 >> bucketSize0Log2; const int32_t b00 = p1 >> bucketSize0Log2; for (int32_t b2 = b20; b2 <= b21; ++b2) { if ( localIndexes[2] != -1 && hBBoxes.bBox(b2, 2).getDist1(bpoint) >= minDistances[2]) continue; const auto alignedIndex1 = b2 << bucketSizeLog2; const auto start1 = std::max(b10, alignedIndex1); const auto end1 = std::min(b11, alignedIndex1 + bucketSizeMinus1); for (int32_t b1 = start1; b1 <= end1; ++b1) { if ( localIndexes[2] != -1 && hBBoxes.bBox(b1, 1).getDist1(bpoint) >= minDistances[2]) continue; const auto alignedIndex0 = b1 << bucketSizeLog2; const auto start0 = std::max(b00, alignedIndex0); const auto end0 = std::min(b01, alignedIndex0 + bucketSizeMinus1); for (int32_t b0 = start0; b0 <= end0; ++b0) { if ( localIndexes[2] != -1 && hBBoxes.bBox(b0, 0).getDist1(bpoint) >= minDistances[2]) continue; const int32_t alignedIndex = b0 << bucketSizeLog2; const int32_t h0 = std::max(p1, alignedIndex); const int32_t h1 = std::min(k1, alignedIndex + bucketSizeMinus1); for (int32_t k = h0; k <= h1; ++k) { updateNearestNeighWithCheck( bpoint, biasedPos[retained[k]], k, localIndexes, minDistances); } } } } // search k0...p1 const int32_t c21 = p0 >> bucketSize2Log2; const int32_t c20 = k0 >> bucketSize2Log2; const int32_t c11 = p0 >> bucketSize1Log2; const int32_t c10 = k0 >> bucketSize1Log2; const int32_t c01 = p0 >> bucketSize0Log2; const int32_t c00 = k0 >> bucketSize0Log2; for (int32_t c2 = c21; c2 >= c20; --c2) { if ( localIndexes[2] != -1 && hBBoxes.bBox(c2, 2).getDist1(bpoint) >= minDistances[2]) continue; const auto alignedIndex1 = c2 << bucketSizeLog2; const auto start1 = std::max(c10, alignedIndex1); const auto end1 = std::min(c11, alignedIndex1 + bucketSizeMinus1); for (int32_t c1 = end1; c1 >= start1; --c1) { if ( localIndexes[2] != -1 && hBBoxes.bBox(c1, 1).getDist1(bpoint) >= minDistances[2]) continue; const auto alignedIndex0 = c1 << bucketSizeLog2; const auto start0 = std::max(c00, alignedIndex0); const auto end0 = std::min(c01, alignedIndex0 + bucketSizeMinus1); for (int32_t c0 = end0; c0 >= start0; --c0) { if ( localIndexes[2] != -1 && hBBoxes.bBox(c0, 0).getDist1(bpoint) >= minDistances[2]) continue; const int32_t alignedIndex = c0 << bucketSizeLog2; const int32_t h0 = std::max(k0, alignedIndex); const int32_t h1 = std::min(p0, alignedIndex + bucketSizeMinus1); for (int32_t k = h1; k >= h0; --k) { updateNearestNeighWithCheck( bpoint, biasedPos[retained[k]], k, localIndexes, minDistances); } } } } } predictor.neighborCount = (localIndexes[0] != -1) + (localIndexes[1] != -1) + (localIndexes[2] != -1); for (int32_t h = 0; h < predictor.neighborCount; ++h) localIndexes[h] = retained[localIndexes[h]]; } if (lodIndex >= aps.intra_lod_prediction_skip_layers) { const int32_t k00 = i + 1; const int32_t k01 = std::min(endIndex - 1, k00 + searchRangeNear); for (int32_t k = k00; k <= k01; ++k) { updateNearestNeigh( bpoint, biasedPos[indexes[k]], indexes[k], localIndexes, minDistances); } const int32_t k0 = k01 + 1 - startIndex; const int32_t k1 = std::min(endIndex - 1, k00 + rangeIntraLod) - startIndex; // search k0...k1 const int32_t b21 = k1 >> bucketSize2Log2; const int32_t b20 = k0 >> bucketSize2Log2; const int32_t b11 = k1 >> bucketSize1Log2; const int32_t b10 = k0 >> bucketSize1Log2; const int32_t b01 = k1 >> bucketSize0Log2; const int32_t b00 = k0 >> bucketSize0Log2; for (int32_t b2 = b20; b2 <= b21; ++b2) { if ( localIndexes[2] != -1 && hIntraBBoxes.bBox(b2, 2).getDist1(bpoint) >= minDistances[2]) continue; const auto alignedIndex1 = b2 << bucketSizeLog2; const auto start1 = std::max(b10, alignedIndex1); const auto end1 = std::min(b11, alignedIndex1 + bucketSizeMinus1); for (int32_t b1 = start1; b1 <= end1; ++b1) { if ( localIndexes[2] != -1 && hIntraBBoxes.bBox(b1, 1).getDist1(bpoint) >= minDistances[2]) continue; const auto alignedIndex0 = b1 << bucketSizeLog2; const auto start0 = std::max(b00, alignedIndex0); const auto end0 = std::min(b01, alignedIndex0 + bucketSizeMinus1); for (int32_t b0 = start0; b0 <= end0; ++b0) { if ( localIndexes[2] != -1 && hIntraBBoxes.bBox(b0, 0).getDist1(bpoint) >= minDistances[2]) continue; const int32_t alignedIndex = b0 << bucketSizeLog2; const int32_t h0 = std::max(k0, alignedIndex); const int32_t h1 = std::min(k1, alignedIndex + bucketSizeMinus1); for (int32_t h = h0; h <= h1; ++h) { const int32_t k = startIndex + h; updateNearestNeigh( bpoint, biasedPos[indexes[k]], indexes[k], localIndexes, minDistances); } } } } } predictor.neighborCount = std::min( aps.num_pred_nearest_neighbours_minus1 + 1, (localIndexes[0] != -1) + (localIndexes[1] != -1) + (localIndexes[2] != -1)); for (int32_t h = 0; h < predictor.neighborCount; ++h) { auto& neigh = predictor.neighbors[h]; neigh.predictorIndex = packedVoxel[localIndexes[h]].index; neigh.weight = (biasedPos[localIndexes[h]] - bpoint).getNorm2<int64_t>(); } // Prune neighbours based upon max neigh range. if (aps.scalable_lifting_enabled_flag) { int maxNeighRange = aps.max_neigh_range_minus1 + 1; int64_t maxDistance = 3ll * maxNeighRange << 2 * lodIndex; if (aps.lodNeighBias == 1) { predictor.pruneDistanceGt(maxDistance); } else { auto curPt = clacIntermediatePosition(true, lodIndex, pv.position); for (int h = 1; h < predictor.neighborCount; h++) { auto neighPt = clacIntermediatePosition( true, lodIndex, packedVoxel[localIndexes[h]].position); // Discard this and subsequent points if distance limit exceeded auto norm2 = (curPt - neighPt).getNorm2<int64_t>(); if (norm2 > maxDistance) { predictor.neighborCount = h; break; } } } } if (predictor.neighborCount > 1) { if (predictor.neighbors[0].weight > predictor.neighbors[1].weight) std::swap(predictor.neighbors[1], predictor.neighbors[0]); if (predictor.neighborCount == 3) { if (predictor.neighbors[1].weight > predictor.neighbors[2].weight) { std::swap(predictor.neighbors[2], predictor.neighbors[1]); if (predictor.neighbors[0].weight > predictor.neighbors[1].weight) std::swap(predictor.neighbors[1], predictor.neighbors[0]); } } } } } //--------------------------------------------------------------------------- inline void subsampleByDistance( const std::vector<MortonCodeWithIndex>& packedVoxel, const std::vector<uint32_t>& input, const int32_t shiftBits0, std::vector<uint32_t>& retained, std::vector<uint32_t>& indexes, MortonIndexMap3d& atlas) { assert(retained.empty()); if (input.size() == 1) { indexes.push_back(input[0]); return; } const int64_t radius2 = 3ll << (shiftBits0 << 1); const int32_t shiftBits = shiftBits0 + 1; const int32_t shiftBits3 = 3 * shiftBits; const int32_t atlasBits = 3 * atlas.cubeSizeLog2(); // NB: when the atlas boundary is greater than 2^63, all points belong // to a single atlas. The clipping is necessary to avoid undefined // behaviour of shifts greater than or equal to the word size. const int32_t atlasBoundaryBit = std::min(63, shiftBits3 + atlasBits); // these neighbour offsets are relative to basePosition static const uint8_t kNeighOffset[20] = { 7, // { 0, 0, 0} 3, // {-1, 0, 0} 5, // { 0, -1, 0} 6, // { 0, 0, -1} 12, // { 0, -1, 1} 10, // {-1, 0, 1} 17, // {-1, 1, 0} 20, // { 0, 1, -1} 34, // { 1, 0, -1} 33, // { 1, -1, 0} 4, // { 0, -1, -1} 2, // {-1, 0, -1} 1, // {-1, -1, 0} 24, // {-1, 1, 1} 40, // { 1, -1, 1} 48, // { 1, 1, -1} 32, // { 1, -1, -1} 16, // {-1, 1, -1} 8, // {-1, -1, 1} 0, // {-1, -1, -1} }; atlas.reserve(indexes.size() >> 1); int64_t curAtlasId = -1; int64_t lastRetainedMortonCode = -1; for (const auto index : input) { const auto& point = packedVoxel[index].position; const int64_t mortonCode = packedVoxel[index].mortonCode; const int64_t pointAtlasId = mortonCode >> atlasBoundaryBit; const int64_t mortonCodeShiftBits3 = mortonCode >> shiftBits3; if (curAtlasId != pointAtlasId) { atlas.clearUpdates(); curAtlasId = pointAtlasId; } if (retained.empty()) { retained.push_back(index); lastRetainedMortonCode = mortonCodeShiftBits3; atlas.set(lastRetainedMortonCode, int32_t(retained.size()) - 1); continue; } if (lastRetainedMortonCode == mortonCodeShiftBits3) { indexes.push_back(index); continue; } // the position of the parent, offset by (-1,-1,-1) const auto basePosition = morton3dAdd(mortonCodeShiftBits3, -1ll); bool found = false; for (int32_t n = 0; n < 20 && !found; ++n) { const auto neighbMortonCode = morton3dAdd(basePosition, kNeighOffset[n]); if ((neighbMortonCode >> atlasBits) != curAtlasId) continue; const auto unit = atlas.get(neighbMortonCode); for (int32_t k = unit.start; k < unit.end; ++k) { const auto delta = (packedVoxel[retained[k]].position - point); if (delta.getNorm2<int64_t>() <= radius2) { found = true; break; } } } if (found) { indexes.push_back(index); } else { retained.push_back(index); lastRetainedMortonCode = mortonCodeShiftBits3; atlas.set(lastRetainedMortonCode, int32_t(retained.size()) - 1); } } } //--------------------------------------------------------------------------- inline int32_t subsampleByOctreeWithCentroid( const PCCPointSet3& pointCloud, const std::vector<MortonCodeWithIndex>& packedVoxel, int32_t octreeNodeSizeLog2, const bool backward, const std::vector<uint32_t>& voxels) { point_t centroid(0); int count = 0; for (const auto t : voxels) { // forward direction point_t pos = clacIntermediatePosition( true, octreeNodeSizeLog2, pointCloud[packedVoxel[t].index]); centroid += pos; count++; } int32_t nnIndex = backward ? voxels.size() - 1 : 0; int64_t minNorm2 = std::numeric_limits<int64_t>::max(); if (backward) { int num = voxels.size() - 1; for (auto t = voxels.rbegin(), e = voxels.rend(); t != e; t++) { // backward direction point_t pos = clacIntermediatePosition( true, octreeNodeSizeLog2, pointCloud[packedVoxel[*t].index]); pos *= count; int64_t m = (pos - centroid).getNorm1(); if (minNorm2 > m) { minNorm2 = m; nnIndex = num; } num--; } } else { int num = 0; for (const auto t : voxels) { // forward direction point_t pos = clacIntermediatePosition( true, octreeNodeSizeLog2, pointCloud[packedVoxel[t].index]); pos *= count; int64_t m = (pos - centroid).getNorm1(); if (minNorm2 > m) { minNorm2 = m; nnIndex = num; } num++; } } return voxels[nnIndex]; } //--------------------------------------------------------------------------- inline void subsampleByOctree( const PCCPointSet3& pointCloud, const std::vector<MortonCodeWithIndex>& packedVoxel, const std::vector<uint32_t>& input, int32_t octreeNodeSizeLog2, std::vector<uint32_t>& retained, std::vector<uint32_t>& indexes, bool direction, int lodSamplingPeriod = 0) { const int indexCount = int(input.size()); if (indexCount == 1) { indexes.push_back(input[0]); return; } uint64_t lodUniformQuant = 3 * (octreeNodeSizeLog2 + 1); uint64_t currVoxelPos; std::vector<uint32_t> voxels; voxels.reserve(8); for (int i = 0; i < indexCount; ++i) { uint64_t nextVoxelPos = currVoxelPos = (packedVoxel[input[i]].mortonCode >> lodUniformQuant); if (i < indexCount - 1) nextVoxelPos = (packedVoxel[input[i + 1]].mortonCode >> lodUniformQuant); voxels.push_back(input[i]); if (i == (indexCount - 1) || currVoxelPos < nextVoxelPos) { if ((voxels.size() < lodSamplingPeriod) && (i != (indexCount - 1))) continue; uint32_t picked = subsampleByOctreeWithCentroid( pointCloud, packedVoxel, octreeNodeSizeLog2, direction, voxels); for (const auto idx : voxels) { if (picked == idx) retained.push_back(idx); else indexes.push_back(idx); } voxels.clear(); } } } //--------------------------------------------------------------------------- inline void subsampleByDecimation( const std::vector<uint32_t>& input, int lodSamplingPeriod, std::vector<uint32_t>& retained, std::vector<uint32_t>& indexes) { const int indexCount = int(input.size()); for (int i = 0, j = 1; i < indexCount; ++i) { if (--j) indexes.push_back(input[i]); else { retained.push_back(input[i]); j = lodSamplingPeriod; } } } //--------------------------------------------------------------------------- inline void subsample( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const PCCPointSet3& pointCloud, const std::vector<MortonCodeWithIndex>& packedVoxel, const std::vector<uint32_t>& input, const int32_t lodIndex, std::vector<uint32_t>& retained, std::vector<uint32_t>& indexes, MortonIndexMap3d& atlas) { if (aps.scalable_lifting_enabled_flag) { int32_t octreeNodeSizeLog2 = lodIndex; bool direction = octreeNodeSizeLog2 & 1; subsampleByOctree( pointCloud, packedVoxel, input, octreeNodeSizeLog2, retained, indexes, direction); } else if (aps.lod_decimation_type == LodDecimationMethod::kPeriodic) { auto samplingPeriod = aps.lodSamplingPeriod[lodIndex]; subsampleByDecimation(input, samplingPeriod, retained, indexes); } else if (aps.lod_decimation_type == LodDecimationMethod::kCentroid) { auto samplingPeriod = aps.lodSamplingPeriod[lodIndex]; int32_t octreeNodeSizeLog2 = aps.dist2 + abh.attr_dist2_delta + lodIndex; subsampleByOctree( pointCloud, packedVoxel, input, octreeNodeSizeLog2, retained, indexes, true, samplingPeriod); } else { const auto shiftBits = aps.dist2 + abh.attr_dist2_delta + lodIndex; subsampleByDistance( packedVoxel, input, shiftBits, retained, indexes, atlas); } } //--------------------------------------------------------------------------- inline void computeMortonCodesUnsorted( const PCCPointSet3& pointCloud, const Vec3<int32_t> lodNeighBias, std::vector<MortonCodeWithIndex>& packedVoxel) { const int32_t pointCount = int32_t(pointCloud.getPointCount()); packedVoxel.resize(pointCount); for (int n = 0; n < pointCount; n++) { auto& pv = packedVoxel[n]; pv.position = pointCloud[n]; pv.mortonCode = mortonAddr(pv.position); pv.index = n; } } //--------------------------------------------------------------------------- inline void updatePredictors( const std::vector<uint32_t>& pointIndexToPredictorIndex, std::vector<PCCPredictor>& predictors) { for (auto& predictor : predictors) { if (predictor.neighborCount < 2) { predictor.neighbors[0].weight = 1; } else if (predictor.neighbors[0].weight == 0) { predictor.neighborCount = 1; predictor.neighbors[0].weight = 1; } for (int32_t k = 0; k < predictor.neighborCount; ++k) { auto& neighbor = predictor.neighbors[k]; neighbor.predictorIndex = pointIndexToPredictorIndex[neighbor.predictorIndex]; } } } //--------------------------------------------------------------------------- inline void buildPredictorsFast( const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const PCCPointSet3& pointCloud, int32_t minGeomNodeSizeLog2, int geom_num_points_minus1, std::vector<PCCPredictor>& predictors, std::vector<uint32_t>& numberOfPointsPerLevelOfDetail, std::vector<uint32_t>& indexes) { const int32_t pointCount = int32_t(pointCloud.getPointCount()); assert(pointCount); std::vector<MortonCodeWithIndex> packedVoxel; computeMortonCodesUnsorted(pointCloud, aps.lodNeighBias, packedVoxel); if (!aps.canonical_point_order_flag) std::sort(packedVoxel.begin(), packedVoxel.end()); std::vector<uint32_t> retained, input, pointIndexToPredictorIndex; pointIndexToPredictorIndex.resize(pointCount); retained.reserve(pointCount); input.resize(pointCount); for (uint32_t i = 0; i < pointCount; ++i) { input[i] = i; } // prepare output buffers predictors.resize(pointCount); numberOfPointsPerLevelOfDetail.resize(0); indexes.resize(0); indexes.reserve(pointCount); numberOfPointsPerLevelOfDetail.reserve(21); numberOfPointsPerLevelOfDetail.push_back(pointCount); bool concatenateLayers = aps.scalable_lifting_enabled_flag; std::vector<uint32_t> indexesOfSubsample; if (concatenateLayers) indexesOfSubsample.reserve(pointCount); std::vector<Box3<int32_t>> bBoxes; const int32_t log2CubeSize = 7; MortonIndexMap3d atlas; atlas.resize(log2CubeSize); atlas.init(); auto maxNumDetailLevels = aps.maxNumDetailLevels(); int32_t predIndex = int32_t(pointCount); for (auto lodIndex = minGeomNodeSizeLog2; !input.empty() && lodIndex < maxNumDetailLevels; ++lodIndex) { const int32_t startIndex = indexes.size(); if (lodIndex == maxNumDetailLevels - 1) { for (const auto index : input) { indexes.push_back(index); } } else { subsample( aps, abh, pointCloud, packedVoxel, input, lodIndex, retained, indexes, atlas); } const int32_t endIndex = indexes.size(); if (concatenateLayers) { indexesOfSubsample.resize(endIndex); if (startIndex != endIndex) { for (int32_t i = startIndex; i < endIndex; i++) indexesOfSubsample[i] = indexes[i]; int32_t numOfPointInSkipped = geom_num_points_minus1 + 1 - pointCount; if (endIndex - startIndex <= startIndex + numOfPointInSkipped) { concatenateLayers = false; } else { for (int32_t i = 0; i < startIndex; i++) indexes[i] = indexesOfSubsample[i]; // reset predIndex predIndex = pointCount; for (int lod = 0; lod < lodIndex - minGeomNodeSizeLog2; lod++) { int divided_startIndex = pointCount - numberOfPointsPerLevelOfDetail[lod]; int divided_endIndex = pointCount - numberOfPointsPerLevelOfDetail[lod + 1]; computeNearestNeighbors( aps, abh, packedVoxel, retained, divided_startIndex, divided_endIndex, lod + minGeomNodeSizeLog2, indexes, predictors, pointIndexToPredictorIndex, predIndex, atlas); } } } } computeNearestNeighbors( aps, abh, packedVoxel, retained, startIndex, endIndex, lodIndex, indexes, predictors, pointIndexToPredictorIndex, predIndex, atlas); if (!retained.empty()) { numberOfPointsPerLevelOfDetail.push_back(retained.size()); } input.resize(0); std::swap(retained, input); } std::reverse(indexes.begin(), indexes.end()); updatePredictors(pointIndexToPredictorIndex, predictors); std::reverse( numberOfPointsPerLevelOfDetail.begin(), numberOfPointsPerLevelOfDetail.end()); } //--------------------------------------------------------------------------- } // namespace pcc #endif /* PCCTMC3Common_h */
46,748
31.737395
79
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PCCTMC3Decoder.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <functional> #include <map> #include "Attribute.h" #include "PayloadBuffer.h" #include "PCCMath.h" #include "PCCPointSet.h" #include "frame.h" #include "framectr.h" #include "geometry.h" #include "hls.h" namespace pcc { //============================================================================ struct DecoderParams { // For partial decoding (aka, scalable bitstreams), the number of octree // layers to skip during the decode process (attribute coding must take // this into account) int minGeomNodeSizeLog2; // A maximum number of points to partially decode. int decodeMaxPoints; // Number of fractional bits used in output position representation. int outputFpBits; }; //============================================================================ class PCCTMC3Decoder3 { public: class Callbacks; PCCTMC3Decoder3(const DecoderParams& params); PCCTMC3Decoder3(const PCCTMC3Decoder3&) = delete; PCCTMC3Decoder3(PCCTMC3Decoder3&&) = default; PCCTMC3Decoder3& operator=(const PCCTMC3Decoder3& rhs) = delete; PCCTMC3Decoder3& operator=(PCCTMC3Decoder3&& rhs) = default; ~PCCTMC3Decoder3(); void init(); int decompress(const PayloadBuffer* buf, Callbacks* callback); //========================================================================== void storeSps(SequenceParameterSet&& sps); void storeGps(GeometryParameterSet&& gps); void storeAps(AttributeParameterSet&& aps); void storeTileInventory(TileInventory&& inventory); //========================================================================== private: void activateParameterSets(const AttributeParamInventoryHdr& gbh); void activateParameterSets(const GeometryBrickHeader& gbh); int decodeGeometryBrick(const PayloadBuffer& buf); void decodeAttributeBrick(const PayloadBuffer& buf); void decodeConstantAttribute(const PayloadBuffer& buf); bool dectectFrameBoundary(const PayloadBuffer* buf); void outputCurrentCloud(Callbacks* callback); void startFrame(); //========================================================================== private: // Decoder specific parameters DecoderParams _params; // Indicates that pointcloud output should be suppressed at a frame boundary bool _suppressOutput; // Indicates that this is the start of a new frame. // NB: this is set to false quiet early in the decoding process bool _firstSliceInFrame; // Indicates whether the output has been initialised bool _outputInitialized; // Current identifier of payloads with the same geometry int _sliceId; // Identifies the previous slice in bistream order int _prevSliceId; // Cumulative frame counter FrameCtr _frameCtr; // Position of the slice in the translated+scaled co-ordinate system. Vec3<int> _sliceOrigin; // The point cloud currently being decoded PCCPointSet3 _currentPointCloud; // The accumulated decoded slices PCCPointSet3 _accumCloud; // The current output cloud CloudFrame _outCloud; // Point positions in spherical coordinates of the current slice std::vector<point_t> _posSph; // Received parameter sets, mapping parameter set id -> parameterset std::map<int, SequenceParameterSet> _spss; std::map<int, GeometryParameterSet> _gpss; std::map<int, AttributeParameterSet> _apss; // Metadata that allows slices/tiles to be indentified by their bounding box TileInventory _tileInventory; // The active SPS const SequenceParameterSet* _sps; const GeometryParameterSet* _gps; GeometryBrickHeader _gbh; // Memorized context buffers std::unique_ptr<GeometryOctreeContexts> _ctxtMemOctreeGeom; std::unique_ptr<PredGeomContexts> _ctxtMemPredGeom; std::vector<AttributeContexts> _ctxtMemAttrs; std::vector<int> _ctxtMemAttrSliceIds; // Attribute decoder for reuse between attributes of same slice std::unique_ptr<AttributeDecoderIntf> _attrDecoder; }; //---------------------------------------------------------------------------- class PCCTMC3Decoder3::Callbacks { public: virtual void onOutputCloud(const CloudFrame&) = 0; }; //============================================================================ } // namespace pcc
6,013
32.977401
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PCCTMC3Encoder.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <functional> #include <map> #include <string> #include <vector> #include "Attribute.h" #include "PayloadBuffer.h" #include "PCCMath.h" #include "PCCPointSet.h" #include "frame.h" #include "geometry.h" #include "geometry_params.h" #include "hls.h" #include "partitioning.h" #include "pointset_processing.h" namespace pcc { //============================================================================ struct EncoderAttributeParams { // NB: this only makes sense for setting configurable parameters AttributeBrickHeader abh; // Threshold for choosing dist2 out of the population of nearest neighbour // distances. float dist2PercentileEstimate; }; //---------------------------------------------------------------------------- struct EncoderParams { SequenceParameterSet sps; GeometryParameterSet gps; GeometryBrickHeader gbh; // NB: information about attributes is split between the SPS and the APS. // => The SPS enumerates the attributes, the APS controls coding params. std::vector<AttributeParameterSet> aps; // Encoder specific parameters for attributes std::vector<EncoderAttributeParams> attr; // todo(df): this should go away std::map<std::string, int> attributeIdxMap; // Determine the sequence bonuding box using the first input frame bool autoSeqBbox; // Length of the source point cloud unit vectors. double srcUnitLength; // Scale factor used to define the coordinate system used for coding. // This is the coordinate system where slicing is performed. // P_cod = P_src * codedGeomScale double codedGeomScale; // Scale factor used to define the sequence coordinate system. // P_seq = P_src * seqGeomScale double seqGeomScale; // Scale factor used to define the external coordinate system. // P_ext = P_src * extGeomScale double extGeomScale; // Number of fractional bits used in output position representation. int outputFpBits; // Encoder specific parameters for geometry OctreeEncOpts geom; // Options for the predictive geometry coder PredGeomEncOpts predGeom; // Parameters that control partitioning PartitionParams partition; // attribute recolouring parameters RecolourParams recolour; // LiDAR head position Vec3<int> lidarHeadPosition; // number of expected lasers int numLasers; // floating Lasers' theta (have to be converted to fixed point in gps) std::vector<double> lasersTheta; // floating Lasers' H (have to be converted to fixed point in gps) std::vector<double> lasersZ; // per-slice trisoup node sizes std::vector<int> trisoupNodeSizesLog2; // Enable enforcement of level limits (encoder will abort if exceeded) bool enforceLevelLimits; // Qp used for IDCM quantisation (used to derive HLS values) int idcmQp; // precision expected for attributes after scaling with predgeom // and spherical coordinates int attrSphericalMaxLog2; }; //============================================================================ class PCCTMC3Encoder3 { public: class Callbacks; PCCTMC3Encoder3(); PCCTMC3Encoder3(const PCCTMC3Encoder3&) = delete; PCCTMC3Encoder3(PCCTMC3Encoder3&&) = default; PCCTMC3Encoder3& operator=(const PCCTMC3Encoder3& rhs) = delete; PCCTMC3Encoder3& operator=(PCCTMC3Encoder3&& rhs) = default; ~PCCTMC3Encoder3(); int compress( const PCCPointSet3& inputPointCloud, EncoderParams* params, Callbacks*, CloudFrame* reconstructedCloud = nullptr); void compressPartition( const PCCPointSet3& inputPointCloud, const PCCPointSet3& originPartCloud, EncoderParams* params, Callbacks*, CloudFrame* reconstructedCloud = nullptr); static void deriveParameterSets(EncoderParams* params); static void fixupParameterSets(EncoderParams* params); private: void appendSlice(PCCPointSet3& cloud); void encodeGeometryBrick(const EncoderParams*, PayloadBuffer* buf); SrcMappedPointSet quantization(const PCCPointSet3& src); private: PCCPointSet3 pointCloud; // Point positions in spherical coordinates of the current slice std::vector<point_t> _posSph; // Scale factor used to decimate the input point cloud. // Decimation is performed as if the input were scaled by // Round(P_src * inputDecimationScale) // and duplicate points removed. // todo: expose this parameter? double _inputDecimationScale; // Scale factor that defines coding coordinate system double _srcToCodingScale; // Sequence origin in terms of coding coordinate system Vec3<int> _originInCodingCoords; // Position of the slice in the translated+scaled co-ordinate system. Vec3<int> _sliceOrigin; // Size of the current slice Vec3<int> _sliceBoxWhd; // The active parameter sets const SequenceParameterSet* _sps; const GeometryParameterSet* _gps; std::vector<const AttributeParameterSet*> _aps; // Cached copy of the curent _gbh (after encoding geometry) GeometryBrickHeader _gbh; // Indicates that this is the start of a new frame bool _firstSliceInFrame; // Current identifier of payloads with the same geometry int _sliceId; // Identifies the previous slice in bistream order int _prevSliceId; // Identifies the current tile int _tileId; // Current frame number. // NB: only the log2_max_frame_ctr LSBs are sampled for frame_ctr int _frameCounter; // Memorized context buffers std::unique_ptr<GeometryOctreeContexts> _ctxtMemOctreeGeom; std::unique_ptr<PredGeomContexts> _ctxtMemPredGeom; std::vector<AttributeContexts> _ctxtMemAttrs; std::vector<int> _ctxtMemAttrSliceIds; }; //---------------------------------------------------------------------------- class PCCTMC3Encoder3::Callbacks { public: virtual void onOutputBuffer(const PayloadBuffer&) = 0; virtual void onPostRecolour(const PCCPointSet3&) = 0; }; //============================================================================ } // namespace pcc
7,766
30.445344
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/PayloadBuffer.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "hls.h" #include <vector> namespace pcc { //============================================================================ struct PayloadBuffer : public std::vector<char> { PayloadType type; PayloadBuffer() = default; PayloadBuffer(PayloadType payload_type) : type(payload_type) { reserve(4096); } }; //============================================================================ } // namespace pcc
2,267
37.440678
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/RAHT.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "RAHT.h" #include <cassert> #include <cinttypes> #include <climits> #include <cstddef> #include <utility> #include <vector> #include <stdio.h> #include "PCCTMC3Common.h" #include "PCCMisc.h" namespace pcc { //============================================================================ struct UrahtNode { int64_t pos; int weight; Qps qp; }; //============================================================================ // remove any non-unique leaves from a level in the uraht tree int reduceUnique( int numNodes, int numAttrs, std::vector<UrahtNode>* weightsIn, std::vector<UrahtNode>* weightsOut, std::vector<int>* attrsIn, std::vector<int>* attrsOut) { // process a single level of the tree int64_t posPrev = -1; auto weightsInWrIt = weightsIn->begin(); auto weightsInRdIt = weightsIn->cbegin(); auto attrsInWrIt = attrsIn->begin(); auto attrsInRdIt = attrsIn->begin(); for (int i = 0; i < numNodes; i++) { const auto& node = *weightsInRdIt++; // copy across unique nodes if (node.pos != posPrev) { posPrev = node.pos; *weightsInWrIt++ = node; for (int k = 0; k < numAttrs; k++) *attrsInWrIt++ = *attrsInRdIt++; continue; } // duplicate node (weightsInWrIt - 1)->weight += node.weight; weightsOut->push_back(node); for (int k = 0; k < numAttrs; k++) { *(attrsInWrIt - numAttrs + k) += *attrsInRdIt; attrsOut->push_back(*attrsInRdIt++); } } // number of nodes in next level return std::distance(weightsIn->begin(), weightsInWrIt); } //============================================================================ // Split a level of values into sum and difference pairs. int reduceLevel( int level, int numNodes, int numAttrs, std::vector<UrahtNode>* weightsIn, std::vector<UrahtNode>* weightsOut, std::vector<int>* attrsIn, std::vector<int>* attrsOut) { // process a single level of the tree int64_t posPrev = -1; auto weightsInWrIt = weightsIn->begin(); auto weightsInRdIt = weightsIn->cbegin(); auto attrsInWrIt = attrsIn->begin(); auto attrsInRdIt = attrsIn->begin(); for (int i = 0; i < numNodes; i++) { auto& node = *weightsInRdIt++; bool newPair = (posPrev ^ node.pos) >> level != 0; posPrev = node.pos; if (newPair) { *weightsInWrIt++ = node; for (int k = 0; k < numAttrs; k++) *attrsInWrIt++ = *attrsInRdIt++; } else { auto& left = *(weightsInWrIt - 1); left.weight += node.weight; left.qp[0] = (left.qp[0] + node.qp[0]) >> 1; left.qp[1] = (left.qp[1] + node.qp[1]) >> 1; weightsOut->push_back(node); for (int k = 0; k < numAttrs; k++) { *(attrsInWrIt - numAttrs + k) += *attrsInRdIt; attrsOut->push_back(*attrsInRdIt++); } } } // number of nodes in next level return std::distance(weightsIn->begin(), weightsInWrIt); } //============================================================================ // Merge sum and difference values to form a tree. void expandLevel( int level, int numNodes, int numAttrs, std::vector<UrahtNode>* weightsIn, // expand by numNodes before expand std::vector<UrahtNode>* weightsOut, // shrink after expand std::vector<int>* attrsIn, std::vector<int>* attrsOut) { if (numNodes == 0) return; // process a single level of the tree auto weightsInWrIt = weightsIn->rbegin(); auto weightsInRdIt = std::next(weightsIn->crbegin(), numNodes); auto weightsOutRdIt = weightsOut->crbegin(); auto attrsInWrIt = attrsIn->rbegin(); auto attrsInRdIt = std::next(attrsIn->crbegin(), numNodes * numAttrs); auto attrsOutRdIt = attrsOut->crbegin(); for (int i = 0; i < numNodes;) { bool isPair = (weightsOutRdIt->pos ^ weightsInRdIt->pos) >> level == 0; if (!isPair) { *weightsInWrIt++ = *weightsInRdIt++; for (int k = 0; k < numAttrs; k++) *attrsInWrIt++ = *attrsInRdIt++; continue; } // going to process a pair i++; // Out node is inserted before In node. const auto& nodeDelta = *weightsInWrIt++ = *weightsOutRdIt++; auto curAttrIt = attrsInWrIt; for (int k = 0; k < numAttrs; k++) *attrsInWrIt++ = *attrsOutRdIt++; // move In node to correct position, subtracting delta *weightsInWrIt = *weightsInRdIt++; (weightsInWrIt++)->weight -= nodeDelta.weight; for (int k = 0; k < numAttrs; k++) { *attrsInWrIt = *attrsInRdIt++; *attrsInWrIt++ -= *curAttrIt++; } } } //============================================================================ // Search for neighbour with @value in the ordered list [first, last). // // If distance is positive, search [from, from+distance]. // If distance is negative, search [from-distance, from]. template<typename It, typename T, typename T2, typename Cmp> It findNeighbour(It first, It last, It from, T value, T2 distance, Cmp compare) { It start = first; It end = last; if (distance >= 0) { start = from; if ((distance + 1) < std::distance(from, last)) end = std::next(from, distance + 1); } else { end = from; if ((-distance) < std::distance(first, from)) start = std::prev(from, -distance); } auto found = std::lower_bound(start, end, value, compare); if (found == end) return last; return found; } //============================================================================ // Find the neighbours of the node indicated by @t between @first and @last. // The position weight of each found neighbour is stored in two arrays. template<typename It> void findNeighbours( It first, It last, It it, int level, uint8_t occupancy, int parentNeighIdx[19], int parentNeighWeights[19]) { static const uint8_t neighMasks[19] = {255, 15, 240, 51, 204, 85, 170, 3, 12, 5, 10, 48, 192, 80, 160, 17, 34, 68, 136}; // current position (discard extra precision) int64_t cur_pos = it->pos >> level; // the position of the parent, offset by (-1,-1,-1) int64_t base_pos = morton3dAdd(cur_pos, -1ll); // these neighbour offsets are relative to base_pos static const uint8_t neighOffset[19] = {0, 3, 35, 5, 21, 6, 14, 1, 17, 2, 10, 33, 49, 34, 42, 4, 12, 20, 28}; // special case for the direct parent (no need to search); parentNeighIdx[0] = std::distance(first, it); parentNeighWeights[0] = it->weight; for (int i = 1; i < 19; i++) { // Only look for neighbours that have an effect if (!(occupancy & neighMasks[i])) { parentNeighIdx[i] = -1; continue; } // compute neighbour address to look for // the delta between it and the current position is int64_t neigh_pos = morton3dAdd(base_pos, neighOffset[i]); int64_t delta = neigh_pos - cur_pos; // find neighbour auto found = findNeighbour( first, last, it, neigh_pos, delta, [=](decltype(*it)& candidate, int64_t neigh_pos) { return (candidate.pos >> level) < neigh_pos; }); if (found == last) { parentNeighIdx[i] = -1; continue; } if ((found->pos >> level) != neigh_pos) { parentNeighIdx[i] = -1; continue; } parentNeighIdx[i] = std::distance(first, found); parentNeighWeights[i] = found->weight; } } //============================================================================ // Generate the spatial prediction of a block. template<typename It> void intraDcPred( int numAttrs, const int neighIdx[19], int occupancy, It first, FixedPoint predBuf[][8]) { static const uint8_t predMasks[19] = {255, 15, 240, 51, 204, 85, 170, 3, 12, 5, 10, 48, 192, 80, 160, 17, 34, 68, 136}; static const int predWeight[19] = {4, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; static const int kDivisors[25] = {8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311, 1260, 1214, 1170}; int weightSum[8] = {-4, -4, -4, -4, -4, -4, -4, -4}; std::fill_n(&predBuf[0][0], 8 * numAttrs, FixedPoint(0)); int64_t neighValue[3]; int64_t limitLow = 0; int64_t limitHigh = 0; for (int i = 0; i < 19; i++) { if (neighIdx[i] == -1) continue; auto neighValueIt = std::next(first, numAttrs * neighIdx[i]); for (int k = 0; k < numAttrs; k++) neighValue[k] = *neighValueIt++; // skip neighbours that are outside of threshold limits if (i) { if (10 * neighValue[0] <= limitLow || 10 * neighValue[0] >= limitHigh) continue; } else { constexpr int ratioThreshold1 = 2; constexpr int ratioThreshold2 = 25; limitLow = ratioThreshold1 * neighValue[0]; limitHigh = ratioThreshold2 * neighValue[0]; } // apply weighted neighbour value to masked positions for (int k = 0; k < numAttrs; k++) neighValue[k] *= predWeight[i] << pcc::FixedPoint::kFracBits; int mask = predMasks[i] & occupancy; for (int j = 0; mask; j++, mask >>= 1) { if (mask & 1) { weightSum[j] += predWeight[i]; for (int k = 0; k < numAttrs; k++) predBuf[k][j].val += neighValue[k]; } } } // normalise FixedPoint div; for (int i = 0; i < 8; i++, occupancy >>= 1) { if (occupancy & 1) { div.val = kDivisors[weightSum[i]]; for (int k = 0; k < numAttrs; k++) predBuf[k][i] *= div; } } } //============================================================================ // Encapsulation of a RAHT transform stage. class RahtKernel { public: RahtKernel(int weightLeft, int weightRight) { uint64_t w = weightLeft + weightRight; uint64_t isqrtW = irsqrt(w); _a.val = (isqrt(uint64_t(weightLeft) << (2 * _a.kFracBits)) * isqrtW) >> 40; _b.val = (isqrt(uint64_t(weightRight) << (2 * _b.kFracBits)) * isqrtW) >> 40; } void fwdTransform( FixedPoint left, FixedPoint right, FixedPoint* lf, FixedPoint* hf) { FixedPoint a = _a, b = _b; // lf = left * a + right * b // hf = right * a - left * b *lf = right; *lf *= b; *hf = right; *hf *= a; a *= left; b *= left; *lf += a; *hf -= b; } void invTransform( FixedPoint lf, FixedPoint hf, FixedPoint* left, FixedPoint* right) { FixedPoint a = _a, b = _b; *left = lf; *left *= a; *right = lf; *right *= b; b *= hf; a *= hf; *left -= b; *right += a; } private: FixedPoint _a, _b; }; //============================================================================ // In-place transform a set of sparse 2x2x2 blocks each using the same weights template<class Kernel> void fwdTransformBlock222( int numBufs, FixedPoint buf[][8], int weights[8 + 8 + 8 + 8]) { static const int a[4 + 4 + 4] = {0, 2, 4, 6, 0, 4, 1, 5, 0, 1, 2, 3}; static const int b[4 + 4 + 4] = {1, 3, 5, 7, 2, 6, 3, 7, 4, 5, 6, 7}; for (int i = 0, iw = 0; i < 12; i++, iw += 2) { int i0 = a[i]; int i1 = b[i]; if (weights[iw] + weights[iw + 1] == 0) continue; // only one occupied, propagate to next level if (!weights[iw] || !weights[iw + 1]) { if (!weights[iw]) { for (int k = 0; k < numBufs; k++) std::swap(buf[k][i0], buf[k][i1]); } continue; } // actual transform Kernel kernel(weights[iw], weights[iw + 1]); for (int k = 0; k < numBufs; k++) { auto& bufk = buf[k]; kernel.fwdTransform(bufk[i0], bufk[i1], &bufk[i0], &bufk[i1]); } } } //============================================================================ // In-place inverse transform a set of sparse 2x2x2 blocks each using the // same weights template<class Kernel> void invTransformBlock222( int numBufs, FixedPoint buf[][8], int weights[8 + 8 + 8 + 8]) { static const int a[4 + 4 + 4] = {0, 2, 4, 6, 0, 4, 1, 5, 0, 1, 2, 3}; static const int b[4 + 4 + 4] = {1, 3, 5, 7, 2, 6, 3, 7, 4, 5, 6, 7}; for (int i = 11, iw = 22; i >= 0; i--, iw -= 2) { int i0 = a[i]; int i1 = b[i]; if (weights[iw] + weights[iw + 1] == 0) continue; // only one occupied, propagate to next level if (!weights[iw] || !weights[iw + 1]) { if (!weights[iw]) { for (int k = 0; k < numBufs; k++) std::swap(buf[k][i0], buf[k][i1]); } continue; } // actual transform Kernel kernel(weights[iw], weights[iw + 1]); for (int k = 0; k < numBufs; k++) { auto& bufk = buf[k]; kernel.invTransform(bufk[i0], bufk[i1], &bufk[i0], &bufk[i1]); } } } //============================================================================ // expand a set of eight weights into three levels void mkWeightTree(int weights[8 + 8 + 8 + 8]) { int* in = &weights[0]; int* out = &weights[8]; for (int i = 0; i < 4; i++) { out[0] = out[4] = in[0] + in[1]; if (!in[0] || !in[1]) out[4] = 0; // single node, no high frequencies in += 2; out++; } out += 4; for (int i = 0; i < 4; i++) { out[0] = out[4] = in[0] + in[1]; if (!in[0] || !in[1]) out[4] = 0; // single node, no high frequencies in += 2; out++; } out += 4; for (int i = 0; i < 4; i++) { out[0] = out[4] = in[0] + in[1]; if (!in[0] || !in[1]) out[4] = 0; // single node, no high frequencies in += 2; out++; } } //============================================================================ // Invoke mapFn(coefIdx) for each present coefficient in the transform template<class T> void scanBlock(int weights[8 + 8 + 8 + 8], T mapFn) { static const int8_t kRahtScanOrder[] = {0, 4, 2, 1, 6, 5, 3, 7}; // there is always the DC coefficient (empty blocks are not transformed) mapFn(0); for (int i = 1; i < 8; i++) { if (!weights[24 + kRahtScanOrder[i]]) continue; mapFn(kRahtScanOrder[i]); } } //============================================================================ // Tests if two positions are siblings at the given tree level static bool isSibling(int64_t pos0, int64_t pos1, int level) { return ((pos0 ^ pos1) >> level) == 0; } //============================================================================ // Core transform process (for encoder/decoder) template<bool isEncoder> void uraht_process( bool raht_prediction_enabled_flag, const int predictionThreshold[2], const QpSet& qpset, const Qps* pointQpOffsets, int numPoints, int numAttrs, int64_t* positions, int* attributes, int32_t* coeffBufIt) { // coefficients are stored in three planar arrays. coeffBufItK is a set // of iterators to each array. int32_t* coeffBufItK[3] = { coeffBufIt, coeffBufIt + numPoints, coeffBufIt + numPoints * 2, }; if (numPoints == 1) { auto quantizers = qpset.quantizers(0, pointQpOffsets[0]); for (int k = 0; k < numAttrs; k++) { auto& q = quantizers[std::min(k, int(quantizers.size()) - 1)]; if (isEncoder) { auto coeff = attributes[k]; assert(coeff <= INT_MAX && coeff >= INT_MIN); *coeffBufItK[k]++ = coeff = q.quantize(coeff << kFixedPointAttributeShift); attributes[k] = divExp2RoundHalfUp(q.scale(coeff), kFixedPointAttributeShift); } else { int64_t coeff = *coeffBufItK[k]++; attributes[k] = divExp2RoundHalfUp(q.scale(coeff), kFixedPointAttributeShift); } } return; } std::vector<UrahtNode> weightsLf, weightsHf; std::vector<int> attrsLf, attrsHf; weightsLf.reserve(numPoints); attrsLf.reserve(numPoints * numAttrs); int regionQpShift = 4; // copy positions into internal form // todo(df): lift to api for (int i = 0; i < numPoints; i++) { weightsLf.emplace_back(UrahtNode{positions[i], 1, {pointQpOffsets[i][0] << regionQpShift, pointQpOffsets[i][1] << regionQpShift}}); for (int k = 0; k < numAttrs; k++) { attrsLf.push_back(attributes[i * numAttrs + k]); } } weightsHf.reserve(numPoints); attrsHf.reserve(numPoints * numAttrs); // ascend tree std::vector<int> levelHfPos; for (int level = 0, numNodes = weightsLf.size(); numNodes > 1; level++) { levelHfPos.push_back(weightsHf.size()); if (level == 0) { // process any duplicate points numNodes = reduceUnique( numNodes, numAttrs, &weightsLf, &weightsHf, &attrsLf, &attrsHf); } else { // normal level reduction numNodes = reduceLevel( level, numNodes, numAttrs, &weightsLf, &weightsHf, &attrsLf, &attrsHf); } } assert(weightsLf[0].weight == numPoints); // reconstruction buffers std::vector<int> attrRec, attrRecParent; attrRec.resize(numPoints * numAttrs); attrRecParent.resize(numPoints * numAttrs); std::vector<int> attrRecUs, attrRecParentUs; attrRecUs.resize(numPoints * numAttrs); attrRecParentUs.resize(numPoints * numAttrs); std::vector<UrahtNode> weightsParent; weightsParent.reserve(numPoints); std::vector<int> numParentNeigh, numGrandParentNeigh; numParentNeigh.resize(numPoints); numGrandParentNeigh.resize(numPoints); // quant layer selection auto qpLayer = 0; // descend tree weightsLf.resize(1); attrsLf.resize(numAttrs); for (int level = levelHfPos.size() - 1, isFirst = 1; level > 0; /*nop*/) { int numNodes = weightsHf.size() - levelHfPos[level]; weightsLf.resize(weightsLf.size() + numNodes); attrsLf.resize(attrsLf.size() + numNodes * numAttrs); expandLevel( level, numNodes, numAttrs, &weightsLf, &weightsHf, &attrsLf, &attrsHf); weightsHf.resize(levelHfPos[level]); attrsHf.resize(levelHfPos[level] * numAttrs); // expansion of level is complete, processing is now on the next level level--; // every three levels, perform transform if (level % 3) continue; // initial scan position of the coefficient buffer // -> first level = all coeffs // -> otherwise = ac coeffs only bool inheritDc = !isFirst; bool enablePredictionInLvl = inheritDc && raht_prediction_enabled_flag; isFirst = 0; // select quantiser according to transform layer qpLayer = std::min(qpLayer + 1, int(qpset.layers.size()) - 1); // prepare reconstruction buffers // previous reconstruction -> attrRecParent std::swap(attrRec, attrRecParent); std::swap(attrRecUs, attrRecParentUs); std::swap(numParentNeigh, numGrandParentNeigh); auto attrRecParentUsIt = attrRecParentUs.cbegin(); auto attrRecParentIt = attrRecParent.cbegin(); auto weightsParentIt = weightsParent.cbegin(); auto numGrandParentNeighIt = numGrandParentNeigh.cbegin(); for (int i = 0, iLast, iEnd = weightsLf.size(); i < iEnd; i = iLast) { // todo(df): hoist and dynamically allocate FixedPoint transformBuf[6][8] = {}; FixedPoint(*transformPredBuf)[8] = &transformBuf[numAttrs]; int weights[8 + 8 + 8 + 8] = {}; Qps nodeQp[8] = {}; uint8_t occupancy = 0; // generate weights, occupancy mask, and fwd transform buffers // for all siblings of the current node. for (iLast = i; iLast < iEnd; iLast++) { int nextNode = iLast > i && !isSibling(weightsLf[iLast].pos, weightsLf[i].pos, level + 3); if (nextNode) break; int nodeIdx = (weightsLf[iLast].pos >> level) & 0x7; weights[nodeIdx] = weightsLf[iLast].weight; nodeQp[nodeIdx][0] = weightsLf[iLast].qp[0] >> regionQpShift; nodeQp[nodeIdx][1] = weightsLf[iLast].qp[1] >> regionQpShift; occupancy |= 1 << nodeIdx; if (isEncoder) { for (int k = 0; k < numAttrs; k++) transformBuf[k][nodeIdx] = attrsLf[iLast * numAttrs + k]; } } mkWeightTree(weights); if (!inheritDc) { for (int j = i, nodeIdx = 0; nodeIdx < 8; nodeIdx++) { if (!weights[nodeIdx]) continue; numParentNeigh[j++] = 19; } } // Inter-level prediction: // - Find the parent neighbours of the current node // - Generate prediction for all attributes into transformPredBuf // - Subtract transformed coefficients from forward transform // - The transformPredBuf is then used for reconstruction bool enablePrediction = enablePredictionInLvl; if (enablePredictionInLvl) { // indexes of the neighbouring parents int parentNeighIdx[19]; int parentNeighWeights[19]; int parentNeighCount = 0; if (*numGrandParentNeighIt < predictionThreshold[0]) { enablePrediction = false; } else { findNeighbours( weightsParent.cbegin(), weightsParent.cend(), weightsParentIt, level + 3, occupancy, parentNeighIdx, parentNeighWeights); for (int i = 0; i < 19; i++) { parentNeighCount += (parentNeighIdx[i] != -1); } if (parentNeighCount < predictionThreshold[1]) { enablePrediction = false; } else intraDcPred( numAttrs, parentNeighIdx, occupancy, attrRecParent.begin(), transformPredBuf); } for (int j = i, nodeIdx = 0; nodeIdx < 8; nodeIdx++) { if (!weights[nodeIdx]) continue; numParentNeigh[j++] = parentNeighCount; } } int parentWeight = 0; if (inheritDc) { parentWeight = weightsParentIt->weight; weightsParentIt++; numGrandParentNeighIt++; } // normalise coefficients for (int childIdx = 0; childIdx < 8; childIdx++) { if (weights[childIdx] <= 1) continue; // Summed attribute values if (isEncoder) { FixedPoint rsqrtWeight; uint64_t w = weights[childIdx]; int shift = w > 1024 ? ilog2(w - 1) >> 1 : 0; rsqrtWeight.val = irsqrt(w) >> (40 - shift - FixedPoint::kFracBits); for (int k = 0; k < numAttrs; k++) { transformBuf[k][childIdx].val >>= shift; transformBuf[k][childIdx] *= rsqrtWeight; } } // Predicted attribute values if (enablePrediction) { FixedPoint sqrtWeight; sqrtWeight.val = isqrt(uint64_t(weights[childIdx]) << (2 * FixedPoint::kFracBits)); for (int k = 0; k < numAttrs; k++) transformPredBuf[k][childIdx] *= sqrtWeight; } } // forward transform: // - encoder: transform both attribute sums and prediction // - decoder: just transform prediction if (isEncoder && enablePrediction) fwdTransformBlock222<RahtKernel>(2 * numAttrs, transformBuf, weights); else if (isEncoder) fwdTransformBlock222<RahtKernel>(numAttrs, transformBuf, weights); else if (enablePrediction) fwdTransformBlock222<RahtKernel>(numAttrs, transformPredBuf, weights); // per-coefficient operations: // - subtract transform domain prediction (encoder) // - write out/read in quantised coefficients // - inverse quantise + add transform domain prediction scanBlock(weights, [&](int idx) { // skip the DC coefficient unless at the root of the tree if (inheritDc && !idx) return; // subtract transformed prediction (skipping DC) if (isEncoder && enablePrediction) { for (int k = 0; k < numAttrs; k++) { transformBuf[k][idx] -= transformPredBuf[k][idx]; } } // The RAHT transform auto quantizers = qpset.quantizers(qpLayer, nodeQp[idx]); for (int k = 0; k < numAttrs; k++) { auto& q = quantizers[std::min(k, int(quantizers.size()) - 1)]; if (isEncoder) { auto coeff = transformBuf[k][idx].round(); assert(coeff <= INT_MAX && coeff >= INT_MIN); *coeffBufItK[k]++ = coeff = q.quantize(coeff << kFixedPointAttributeShift); transformPredBuf[k][idx] += divExp2RoundHalfUp(q.scale(coeff), kFixedPointAttributeShift); } else { int64_t coeff = *coeffBufItK[k]++; transformPredBuf[k][idx] += divExp2RoundHalfUp(q.scale(coeff), kFixedPointAttributeShift); } } }); // replace DC coefficient with parent if inheritable if (inheritDc) { for (int k = 0; k < numAttrs; k++) { attrRecParentIt++; int64_t val = *attrRecParentUsIt++; if (val > 0) transformPredBuf[k][0].val = val << (15 - 2); else transformPredBuf[k][0].val = -((-val) << (15 - 2)); } } invTransformBlock222<RahtKernel>(numAttrs, transformPredBuf, weights); for (int j = i, nodeIdx = 0; nodeIdx < 8; nodeIdx++) { if (!weights[nodeIdx]) continue; for (int k = 0; k < numAttrs; k++) { FixedPoint temp = transformPredBuf[k][nodeIdx]; temp.val <<= 2; attrRecUs[j * numAttrs + k] = temp.round(); } // scale values for next level if (weights[nodeIdx] > 1) { FixedPoint rsqrtWeight; uint64_t w = weights[nodeIdx]; int shift = w > 1024 ? ilog2(w - 1) >> 1 : 0; rsqrtWeight.val = irsqrt(w) >> (40 - shift - FixedPoint::kFracBits); for (int k = 0; k < numAttrs; k++) { transformPredBuf[k][nodeIdx].val >>= shift; transformPredBuf[k][nodeIdx] *= rsqrtWeight; } } for (int k = 0; k < numAttrs; k++) attrRec[j * numAttrs + k] = transformPredBuf[k][nodeIdx].round(); j++; } } // preserve current weights/positions for later search weightsParent = weightsLf; } // process duplicate points at level 0 std::swap(attrRec, attrRecParent); auto attrRecParentIt = attrRecParent.cbegin(); auto attrsHfIt = attrsHf.cbegin(); for (int i = 0, out = 0, iEnd = weightsLf.size(); i < iEnd; i++) { int weight = weightsLf[i].weight; Qps nodeQp = {weightsLf[i].qp[0] >> regionQpShift, weightsLf[i].qp[1] >> regionQpShift}; // unique points have weight = 1 if (weight == 1) { for (int k = 0; k < numAttrs; k++) attrRec[out++] = *attrRecParentIt++; continue; } // duplicates FixedPoint attrSum[3]; FixedPoint attrRecDc[3]; FixedPoint sqrtWeight; sqrtWeight.val = isqrt(uint64_t(weight) << (2 * FixedPoint::kFracBits)); for (int k = 0; k < numAttrs; k++) { if (isEncoder) attrSum[k] = attrsLf[i * numAttrs + k]; attrRecDc[k] = *attrRecParentIt++; attrRecDc[k] *= sqrtWeight; } FixedPoint rsqrtWeight; for (int w = weight - 1; w > 0; w--) { RahtKernel kernel(w, 1); int shift = w > 1024 ? ilog2(uint32_t(w - 1)) >> 1 : 0; if (isEncoder) rsqrtWeight.val = irsqrt(w) >> (40 - shift - FixedPoint::kFracBits); auto quantizers = qpset.quantizers(qpLayer, nodeQp); for (int k = 0; k < numAttrs; k++) { auto& q = quantizers[std::min(k, int(quantizers.size()) - 1)]; FixedPoint transformBuf[2]; if (isEncoder) { // invert the initial reduction (sum) // NB: read from (w-1) since left side came from attrsLf. transformBuf[1] = attrsHfIt[(w - 1) * numAttrs + k]; attrSum[k] -= transformBuf[1]; transformBuf[0] = attrSum[k]; // NB: weight of transformBuf[1] is by construction 1. transformBuf[0].val >>= shift; transformBuf[0] *= rsqrtWeight; kernel.fwdTransform( transformBuf[0], transformBuf[1], &transformBuf[0], &transformBuf[1]); auto coeff = transformBuf[1].round(); assert(coeff <= INT_MAX && coeff >= INT_MIN); *coeffBufItK[k]++ = coeff = q.quantize(coeff << kFixedPointAttributeShift); transformBuf[1] = divExp2RoundHalfUp(q.scale(coeff), kFixedPointAttributeShift); } else { int64_t coeff = *coeffBufItK[k]++; transformBuf[1] = divExp2RoundHalfUp(q.scale(coeff), kFixedPointAttributeShift); } // inherit the DC value transformBuf[0] = attrRecDc[k]; kernel.invTransform( transformBuf[0], transformBuf[1], &transformBuf[0], &transformBuf[1]); attrRecDc[k] = transformBuf[0]; attrRec[out + w * numAttrs + k] = transformBuf[1].round(); if (w == 1) attrRec[out + k] = transformBuf[0].round(); } } attrsHfIt += (weight - 1) * numAttrs; out += weight * numAttrs; } // write-back reconstructed attributes assert(attrRec.size() == numAttrs * numPoints); std::copy(attrRec.begin(), attrRec.end(), attributes); } //============================================================================ /* * RAHT Fixed Point * * Inputs: * quantStepSizeLuma = Quantization step * mortonCode = list of 'voxelCount' Morton codes of voxels, sorted in ascending Morton code order * attributes = 'voxelCount' x 'attribCount' array of attributes, in row-major order * attribCount = number of attributes (e.g., 3 if attributes are red, green, blue) * voxelCount = number of voxels * * Outputs: * weights = list of 'voxelCount' weights associated with each transform coefficient * coefficients = quantized transformed attributes array, in column-major order * binaryLayer = binary layer where each coefficient was generated * * Note output weights are typically used only for the purpose of * sorting or bucketing for entropy coding. */ void regionAdaptiveHierarchicalTransform( bool raht_prediction_enabled_flag, const int predictionThreshold[2], const QpSet& qpset, const Qps* pointQpOffsets, int64_t* mortonCode, int* attributes, const int attribCount, const int voxelCount, int* coefficients) { uraht_process<true>( raht_prediction_enabled_flag, predictionThreshold, qpset, pointQpOffsets, voxelCount, attribCount, mortonCode, attributes, coefficients); } //============================================================================ /* * inverse RAHT Fixed Point * * Inputs: * quantStepSizeLuma = Quantization step * mortonCode = list of 'voxelCount' Morton codes of voxels, sorted in ascending Morton code order * attribCount = number of attributes (e.g., 3 if attributes are red, green, blue) * voxelCount = number of voxels * coefficients = quantized transformed attributes array, in column-major order * * Outputs: * attributes = 'voxelCount' x 'attribCount' array of attributes, in row-major order * * Note output weights are typically used only for the purpose of * sorting or bucketing for entropy coding. */ void regionAdaptiveHierarchicalInverseTransform( bool raht_prediction_enabled_flag, const int predictionThreshold[2], const QpSet& qpset, const Qps* pointQpOffsets, int64_t* mortonCode, int* attributes, const int attribCount, const int voxelCount, int* coefficients) { uraht_process<false>( raht_prediction_enabled_flag, predictionThreshold, qpset, pointQpOffsets, voxelCount, attribCount, mortonCode, attributes, coefficients); } //============================================================================ } // namespace pcc
33,545
30.43955
98
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/RAHT.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cstdint> #include "FixedPoint.h" #include "quantization.h" #include "quantization.h" #include <vector> namespace pcc { void regionAdaptiveHierarchicalTransform( bool raht_prediction_enabled_flag, const int predictionThreshold[2], const QpSet& qpset, const Qps* pointQPOffset, int64_t* mortonCode, int* attributes, const int attribCount, const int voxelCount, int* coefficients); void regionAdaptiveHierarchicalInverseTransform( bool raht_prediction_enabled_flag, const int predictionThreshold[2], const QpSet& qpset, const Qps* pointQpOffset, int64_t* mortonCode, int* attributes, const int attribCount, const int voxelCount, int* coefficients); } /* namespace pcc */
2,558
35.557143
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/TMC3.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "TMC3.h" #include <memory> #include "PCCTMC3Encoder.h" #include "PCCTMC3Decoder.h" #include "constants.h" #include "ply.h" #include "pointset_processing.h" #include "program_options_lite.h" #include "io_tlv.h" #include "version.h" using namespace std; using namespace pcc; //============================================================================ enum class OutputSystem { // Output after global scaling, don't convert to external system kConformance = 0, // Scale output to external coordinate system kExternal = 1, }; //---------------------------------------------------------------------------- struct Parameters { bool isDecoder; // command line parsing should adjust dist2 values according to PQS bool positionQuantizationScaleAdjustsDist2; // Scale factor to apply when loading the ply before integer conversion. // Eg, If source point positions are in fractional metres converting to // millimetres will allow some fidelity to be preserved. double inputScale; // Length of the output point clouds unit vectors. double outputUnitLength; // output mode for ply writing (binary or ascii) bool outputBinaryPly; // Fractional fixed-point bits retained in conformance output int outputFpBits; // Output coordinate system to use OutputSystem outputSystem; // when true, configure the encoder as if no attributes are specified bool disableAttributeCoding; // Frame number of first file in input sequence. int firstFrameNum; // Number of frames to process. int frameCount; std::string uncompressedDataPath; std::string compressedStreamPath; std::string reconstructedDataPath; // Filename for saving recoloured point cloud (encoder). std::string postRecolorPath; // Filename for saving pre inverse scaled point cloud (decoder). std::string preInvScalePath; pcc::EncoderParams encoder; pcc::DecoderParams decoder; // perform attribute colourspace conversion on ply input/output. bool convertColourspace; // resort the input points by azimuth angle bool sortInputByAzimuth; }; //---------------------------------------------------------------------------- class SequenceCodec { public: // NB: params must outlive the lifetime of the decoder. SequenceCodec(Parameters* params) : params(params) {} // Perform conversions and write output point cloud // \params cloud a mutable copy of reconFrame.cloud void writeOutputFrame( const std::string& postInvScalePath, const std::string& preInvScalePath, const CloudFrame& reconFrame, PCCPointSet3& cloud); // determine the output ply scale factor double outputScale(const CloudFrame& cloud) const; // the output ply origin, scaled according to output coordinate system Vec3<double> outputOrigin(const CloudFrame& cloud) const; void scaleAttributesForInput( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud); void scaleAttributesForOutput( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud); protected: Parameters* params; }; //---------------------------------------------------------------------------- class SequenceEncoder : public SequenceCodec , PCCTMC3Encoder3::Callbacks { public: // NB: params must outlive the lifetime of the decoder. SequenceEncoder(Parameters* params); int compress(Stopwatch* clock); protected: int compressOneFrame(Stopwatch* clock); void onOutputBuffer(const PayloadBuffer& buf) override; void onPostRecolour(const PCCPointSet3& cloud) override; private: ply::PropertyNameMap _plyAttrNames; // The raw origin used for input sorting Vec3<int> _angularOrigin; PCCTMC3Encoder3 encoder; std::ofstream bytestreamFile; int frameNum; }; //---------------------------------------------------------------------------- class SequenceDecoder : public SequenceCodec , PCCTMC3Decoder3::Callbacks { public: // NB: params must outlive the lifetime of the decoder. SequenceDecoder(Parameters* params); int decompress(Stopwatch* clock); protected: void onOutputCloud(const CloudFrame& cloud) override; private: PCCTMC3Decoder3 decoder; std::ofstream bytestreamFile; Stopwatch* clock; }; //============================================================================ void convertToGbr( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud); void convertFromGbr( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud); //============================================================================ int main(int argc, char* argv[]) { cout << "MPEG PCC tmc3 version " << ::pcc::version << endl; Parameters params; try { if (!ParseParameters(argc, argv, params)) return 1; } catch (df::program_options_lite::ParseFailure& e) { std::cerr << "Error parsing option \"" << e.arg << "\" with argument \"" << e.val << "\"." << std::endl; return 1; } // Timers to count elapsed wall/user time pcc::chrono::Stopwatch<std::chrono::steady_clock> clock_wall; pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user; clock_wall.start(); int ret = 0; if (params.isDecoder) { ret = SequenceDecoder(&params).decompress(&clock_user); } else { ret = SequenceEncoder(&params).compress(&clock_user); } clock_wall.stop(); using namespace std::chrono; auto total_wall = duration_cast<milliseconds>(clock_wall.count()).count(); auto total_user = duration_cast<milliseconds>(clock_user.count()).count(); std::cout << "Processing time (wall): " << total_wall / 1000.0 << " s\n"; std::cout << "Processing time (user): " << total_user / 1000.0 << " s\n"; return ret; } //--------------------------------------------------------------------------- std::array<const char*, 3> axisOrderToPropertyNames(AxisOrder order) { static const std::array<const char*, 3> kAxisOrderToPropertyNames[] = { {"z", "y", "x"}, {"x", "y", "z"}, {"x", "z", "y"}, {"y", "z", "x"}, {"z", "y", "x"}, {"z", "x", "y"}, {"y", "x", "z"}, {"x", "y", "z"}, }; return kAxisOrderToPropertyNames[int(order)]; } //--------------------------------------------------------------------------- // :: Command line / config parsing helpers template<typename T> static std::istream& readUInt(std::istream& in, T& val) { unsigned int tmp; in >> tmp; val = T(tmp); return in; } namespace pcc { static std::istream& operator>>(std::istream& in, ScaleUnit& val) { try { readUInt(in, val); } catch (...) { in.clear(); std::string str; in >> str; val = ScaleUnit::kDimensionless; if (str == "metre") val = ScaleUnit::kMetre; else if (!str.empty()) throw std::runtime_error("Cannot parse unit"); } return in; } } // namespace pcc static std::istream& operator>>(std::istream& in, OutputSystem& val) { return readUInt(in, val); } namespace pcc { static std::istream& operator>>(std::istream& in, ColourMatrix& val) { return readUInt(in, val); } } // namespace pcc namespace pcc { static std::istream& operator>>(std::istream& in, AxisOrder& val) { return readUInt(in, val); } } // namespace pcc namespace pcc { static std::istream& operator>>(std::istream& in, AttributeEncoding& val) { return readUInt(in, val); } } // namespace pcc namespace pcc { static std::istream& operator>>(std::istream& in, LodDecimationMethod& val) { return readUInt(in, val); } } // namespace pcc namespace pcc { static std::istream& operator>>(std::istream& in, PartitionMethod& val) { return readUInt(in, val); } } // namespace pcc namespace pcc { static std::istream& operator>>(std::istream& in, PredGeomEncOpts::SortMode& val) { return readUInt(in, val); } } // namespace pcc namespace pcc { static std::istream& operator>>(std::istream& in, OctreeEncOpts::QpMethod& val) { return readUInt(in, val); } } // namespace pcc static std::ostream& operator<<(std::ostream& out, const OutputSystem& val) { switch (val) { case OutputSystem::kConformance: out << "0 (Conformance)"; break; case OutputSystem::kExternal: out << "1 (External)"; break; } return out; } namespace pcc { static std::ostream& operator<<(std::ostream& out, const ScaleUnit& val) { switch (val) { case ScaleUnit::kDimensionless: out << "0 (Dimensionless)"; break; case ScaleUnit::kMetre: out << "1 (Metre)"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const ColourMatrix& val) { switch (val) { case ColourMatrix::kIdentity: out << "0 (Identity)"; break; case ColourMatrix::kBt709: out << "1 (Bt709)"; break; case ColourMatrix::kUnspecified: out << "2 (Unspecified)"; break; case ColourMatrix::kReserved_3: out << "3 (Reserved)"; break; case ColourMatrix::kUsa47Cfr73dot682a20: out << "4 (Usa47Cfr73dot682a20)"; break; case ColourMatrix::kBt601: out << "5 (Bt601)"; break; case ColourMatrix::kSmpte170M: out << "6 (Smpte170M)"; break; case ColourMatrix::kSmpte240M: out << "7 (Smpte240M)"; break; case ColourMatrix::kYCgCo: out << "8 (kYCgCo)"; break; case ColourMatrix::kBt2020Ncl: out << "9 (Bt2020Ncl)"; break; case ColourMatrix::kBt2020Cl: out << "10 (Bt2020Cl)"; break; case ColourMatrix::kSmpte2085: out << "11 (Smpte2085)"; break; default: out << "Unknown"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const AxisOrder& val) { switch (val) { case AxisOrder::kZYX: out << "0 (zyx)"; break; case AxisOrder::kXYZ: out << "1 (xyz)"; break; case AxisOrder::kXZY: out << "2 (xzy)"; break; case AxisOrder::kYZX: out << "3 (yzx)"; break; case AxisOrder::kZYX_4: out << "4 (zyx)"; break; case AxisOrder::kZXY: out << "5 (zxy)"; break; case AxisOrder::kYXZ: out << "6 (yxz)"; break; case AxisOrder::kXYZ_7: out << "7 (xyz)"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const AttributeEncoding& val) { switch (val) { case AttributeEncoding::kRAHTransform: out << "0 (RAHT)"; break; case AttributeEncoding::kPredictingTransform: out << "1 (Pred)"; break; case AttributeEncoding::kLiftingTransform: out << "2 (Lift)"; break; case AttributeEncoding::kRaw: out << "3 (Raw)"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const LodDecimationMethod& val) { switch (val) { case LodDecimationMethod::kNone: out << "0 (None)"; break; case LodDecimationMethod::kPeriodic: out << "1 (Periodic)"; break; case LodDecimationMethod::kCentroid: out << "2 (Centroid)"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const PartitionMethod& val) { switch (val) { case PartitionMethod::kNone: out << "0 (None)"; break; case PartitionMethod::kUniformGeom: out << "2 (UniformGeom)"; break; case PartitionMethod::kOctreeUniform: out << "3 (UniformOctree)"; break; case PartitionMethod::kUniformSquare: out << "4 (UniformSquare)"; break; case PartitionMethod::kNpoints: out << "5 (NPointSpans)"; break; default: out << int(val) << " (Unknown)"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const PredGeomEncOpts::SortMode& val) { switch (val) { using SortMode = PredGeomEncOpts::SortMode; case SortMode::kNoSort: out << int(val) << " (None)"; break; case SortMode::kSortMorton: out << int(val) << " (Morton)"; break; case SortMode::kSortAzimuth: out << int(val) << " (Azimuth)"; break; case SortMode::kSortRadius: out << int(val) << " (Radius)"; break; case SortMode::kSortLaserAngle: out << int(val) << " (LaserAngle)"; break; default: out << int(val) << " (Unknown)"; break; } return out; } } // namespace pcc namespace pcc { static std::ostream& operator<<(std::ostream& out, const OctreeEncOpts::QpMethod& val) { switch (val) { using Method = OctreeEncOpts::QpMethod; case Method::kUniform: out << int(val) << " (Uniform)"; break; case Method::kRandom: out << int(val) << " (Random)"; break; case Method::kByDensity: out << int(val) << " (ByDensity)"; break; default: out << int(val) << " (Unknown)"; break; } return out; } } // namespace pcc namespace df { namespace program_options_lite { template<typename T> struct option_detail<pcc::Vec3<T>> { static constexpr bool is_container = true; static constexpr bool is_fixed_size = true; typedef T* output_iterator; static void clear(pcc::Vec3<T>& container){}; static output_iterator make_output_iterator(pcc::Vec3<T>& container) { return &container[0]; } }; } // namespace program_options_lite } // namespace df //--------------------------------------------------------------------------- // :: Command line / config parsing void sanitizeEncoderOpts( Parameters& params, df::program_options_lite::ErrorReporter& err); //--------------------------------------------------------------------------- bool ParseParameters(int argc, char* argv[], Parameters& params) { namespace po = df::program_options_lite; struct { AttributeDescription desc; AttributeParameterSet aps; EncoderAttributeParams encoder; } params_attr; bool print_help = false; // a helper to set the attribute std::function<po::OptionFunc::Func> attribute_setter = [&](po::Options&, const std::string& name, po::ErrorReporter) { // copy the current state of parsed attribute parameters // // NB: this does not cause the default values of attr to be restored // for the next attribute block. A side-effect of this is that the // following is allowed leading to attribute foo having both X=1 and // Y=2: // "--attr.X=1 --attribute foo --attr.Y=2 --attribute foo" // // NB: insert returns any existing element const auto& it = params.encoder.attributeIdxMap.insert( {name, int(params.encoder.attributeIdxMap.size())}); if (it.second) { params.encoder.sps.attributeSets.push_back(params_attr.desc); params.encoder.aps.push_back(params_attr.aps); params.encoder.attr.push_back(params_attr.encoder); return; } // update existing entry params.encoder.sps.attributeSets[it.first->second] = params_attr.desc; params.encoder.aps[it.first->second] = params_attr.aps; params.encoder.attr[it.first->second] = params_attr.encoder; }; /* clang-format off */ // The definition of the program/config options, along with default values. // // NB: when updating the following tables: // (a) please keep to 80-columns for easier reading at a glance, // (b) do not vertically align values -- it breaks quickly // po::Options opts; opts.addOptions() ("help", print_help, false, "this help text") ("config,c", po::parseConfigFile, "configuration file name") (po::Section("General")) ("mode", params.isDecoder, false, "The encoding/decoding mode:\n" " 0: encode\n" " 1: decode") // i/o parameters ("firstFrameNum", params.firstFrameNum, 0, "Frame number for use with interpolating %d format specifiers " "in input/output filenames") ("frameCount", params.frameCount, 1, "Number of frames to encode") ("reconstructedDataPath", params.reconstructedDataPath, {}, "The ouput reconstructed pointcloud file path (decoder only)") ("uncompressedDataPath", params.uncompressedDataPath, {}, "The input pointcloud file path") ("compressedStreamPath", params.compressedStreamPath, {}, "The compressed bitstream path (encoder=output, decoder=input)") ("postRecolorPath", params.postRecolorPath, {}, "Recolored pointcloud file path (encoder only)") ("preInvScalePath", params.preInvScalePath, {}, "Pre inverse scaled pointcloud file path (decoder only)") ("convertPlyColourspace", params.convertColourspace, true, "Convert ply colourspace according to attribute colourMatrix") ("outputBinaryPly", params.outputBinaryPly, true, "Output ply files using binary (or ascii) format") ("outputUnitLength", params.outputUnitLength, 0., "Length of reconstructed point cloud x,y,z unit vectors\n" " 0: use srcUnitLength") ("outputScaling", params.outputSystem, OutputSystem::kExternal, "Output coordnate system scaling\n" " 0: Conformance\n" " 1: External") ("outputPrecisionBits", params.outputFpBits, -1, "Fractional bits in conformance output (prior to external scaling)\n" " 0: integer, -1: automatic (full)") // This section controls all general geometry scaling parameters (po::Section("Coordinate system scaling")) ("srcUnitLength", params.encoder.srcUnitLength, 1., "Length of source point cloud x,y,z unit vectors in srcUnits") ("srcUnit", params.encoder.sps.seq_geom_scale_unit_flag, ScaleUnit::kDimensionless, " 0: dimensionless\n 1: metres") ("inputScale", params.inputScale, 1., "Scale input while reading src ply. " "Eg, 1000 converts metres to integer millimetres") ("codingScale", params.encoder.codedGeomScale, 1., "Scale used to represent coded geometry. Relative to inputScale") ("sequenceScale", params.encoder.seqGeomScale, 1., "Scale used to obtain sequence coordinate system. " "Relative to inputScale") // Alias for compatibility with old name. ("positionQuantizationScale", params.encoder.seqGeomScale, 1., "(deprecated)") ("externalScale", params.encoder.extGeomScale, 1., "Scale used to define external coordinate system.\n" "Meaningless when srcUnit = metres\n" " 0: Use srcUnitLength\n" " >0: Relative to inputScale") (po::Section("Decoder")) ("skipOctreeLayers", params.decoder.minGeomNodeSizeLog2, 0, "Partial decoding of octree and attributes\n" " 0 : Full decode\n" " N>0 : Skip the bottom N layers in decoding process") ("decodeMaxPoints", params.decoder.decodeMaxPoints, 0, "Partially decode up to N points") (po::Section("Encoder")) ("sortInputByAzimuth", params.sortInputByAzimuth, false, "Sort input points by azimuth angle") ("geometry_axis_order", params.encoder.sps.geometry_axis_order, AxisOrder::kXYZ, "Sets the geometry axis coding order:\n" " 0: (zyx)\n 1: (xyz)\n 2: (xzy)\n" " 3: (yzx)\n 4: (zyx)\n 5: (zxy)\n" " 6: (yxz)\n 7: (xyz)") ("autoSeqBbox", params.encoder.autoSeqBbox, true, "Calculate seqOrigin and seqSizeWhd automatically.") // NB: the underlying variable is in STV order. // Conversion happens during argument sanitization. ("seqOrigin", params.encoder.sps.seqBoundingBoxOrigin, {0}, "Origin (x,y,z) of the sequence bounding box " "(in input coordinate system). " "Requires autoSeqBbox=0") // NB: the underlying variable is in STV order. // Conversion happens during argument sanitization. ("seqSizeWhd", params.encoder.sps.seqBoundingBoxSize, {0}, "Size of the sequence bounding box " "(in input coordinate system). " "Requires autoSeqBbox=0") ("mergeDuplicatedPoints", params.encoder.gps.geom_unique_points_flag, true, "Enables removal of duplicated points") ("partitionMethod", params.encoder.partition.method, PartitionMethod::kUniformSquare, "Method used to partition input point cloud into slices/tiles:\n" " 0: none\n" " 2: n Uniform-geometry partition bins along the longest edge\n" " 3: Uniform geometry partition at n octree depth\n" " 4: Uniform square partition\n" " 5: n-point spans of input") ("partitionOctreeDepth", params.encoder.partition.octreeDepth, 1, "Depth of octree partition for partitionMethod=4") ("sliceMaxPoints", params.encoder.partition.sliceMaxPoints, 1100000, "Maximum number of points per slice") ("sliceMinPoints", params.encoder.partition.sliceMinPoints, 550000, "Minimum number of points per slice (soft limit)") ("tileSize", params.encoder.partition.tileSize, 0, "Partition input into cubic tiles of given size") ("cabac_bypass_stream_enabled_flag", params.encoder.sps.cabac_bypass_stream_enabled_flag, false, "Controls coding method for ep(bypass) bins") ("entropyContinuationEnabled", params.encoder.sps.entropy_continuation_enabled_flag, false, "Propagate context state between slices") ("disableAttributeCoding", params.disableAttributeCoding, false, "Ignore attribute coding configuration") ("enforceLevelLimits", params.encoder.enforceLevelLimits, true, "Abort if level limits exceeded") (po::Section("Geometry")) ("geomTreeType", params.encoder.gps.predgeom_enabled_flag, false, "Selects the tree coding method:\n" " 0: octree\n" " 1: predictive") ("qtbtEnabled", params.encoder.gps.qtbt_enabled_flag, true, "Enables non-cubic geometry bounding box") ("maxNumQtBtBeforeOt", params.encoder.geom.qtbt.maxNumQtBtBeforeOt, 4, "Max number of qtbt partitions before ot") ("minQtbtSizeLog2", params.encoder.geom.qtbt.minQtbtSizeLog2, 0, "Minimum size of qtbt partitions") ("numOctreeEntropyStreams", // NB: this is adjusted by minus 1 after the arguments are parsed params.encoder.gbh.geom_stream_cnt_minus1, 1, "Number of entropy streams for octree coding") ("bitwiseOccupancyCoding", params.encoder.gps.bitwise_occupancy_coding_flag, true, "Selects between bitwise and bytewise occupancy coding:\n" " 0: bytewise\n" " 1: bitwise") ("neighbourAvailBoundaryLog2", // NB: this is adjusted by minus 1 after the arguments are parsed params.encoder.gps.neighbour_avail_boundary_log2_minus1, 0, "Defines the avaliability volume for neighbour occupancy lookups:\n" "<2: Limited to sibling nodes only") ("inferredDirectCodingMode", params.encoder.gps.inferred_direct_coding_mode, 1, "Early termination of the geometry octree for isolated points:" " 0: disabled\n" " 1: fully constrained\n" " 2: partially constrained\n" " 3: unconstrained (fastest)") ("jointTwoPointIdcm", params.encoder.gps.joint_2pt_idcm_enabled_flag, true, "Jointly code common prefix of two IDCM points") ("adjacentChildContextualization", params.encoder.gps.adjacent_child_contextualization_enabled_flag, true, "Occupancy contextualization using neighbouring adjacent children") ("intra_pred_max_node_size_log2", params.encoder.gps.intra_pred_max_node_size_log2, 0, "octree nodesizes eligible for occupancy intra prediction") ("planarEnabled", params.encoder.gps.geom_planar_mode_enabled_flag, true, "Use planar mode for geometry coding") ("planarModeThreshold0", params.encoder.gps.geom_planar_threshold0, 77, "Activation threshold (0-127) of first planar mode. " "Lower values imply more use of the first planar mode") ("planarModeThreshold1", params.encoder.gps.geom_planar_threshold1, 99, "Activation threshold (0-127) of second planar mode. " "Lower values imply more use of the first planar mode") ("planarModeThreshold2", params.encoder.gps.geom_planar_threshold2, 113, "Activation threshold (0-127) of third planar mode. " "Lower values imply more use of the third planar mode") ("planarModeIdcmUse", // NB: this is adjusted by minus1 after thearguments are parsed params.encoder.gps.geom_idcm_rate_minus1, 0, "Degree (1/32%) of IDCM activation when planar mode is enabled\n" " 0 => never, 32 => always") ("trisoupNodeSizeLog2", params.encoder.trisoupNodeSizesLog2, {0}, "Node size for surface triangulation\n" " <2: disabled") ("trisoup_sampling_value", params.encoder.gps.trisoup_sampling_value, 0, "Trisoup voxelisation sampling rate\n" " 0: automatic") ("positionQuantisationEnabled", params.encoder.gps.geom_scaling_enabled_flag, false, "Enable in-loop quantisation of positions") ("positionQuantisationMethod", params.encoder.geom.qpMethod, OctreeEncOpts::QpMethod::kUniform, "Method used to determine per-node QP:\n" " 0: uniform\n" " 1: random\n" " 2: by node point density") ("positionQpMultiplierLog2", params.encoder.gps.geom_qp_multiplier_log2, 0, "Granularity of QP to step size mapping:\n" " n: 2^n QPs per doubling interval, n in 0..3") ("positionBaseQp", params.encoder.gps.geom_base_qp, 0, "Base QP used in position quantisation (0 = lossless)") ("positionIdcmQp", params.encoder.idcmQp, 0, "QP used in position quantisation of IDCM nodes") ("positionSliceQpOffset", params.encoder.gbh.geom_slice_qp_offset, 0, "Per-slice QP offset used in position quantisation") ("positionQuantisationOctreeSizeLog2", params.encoder.geom.qpOffsetNodeSizeLog2, -1, "Octree node size used for signalling position QP offsets " "(-1 => disabled)") ("positionQuantisationOctreeDepth", params.encoder.geom.qpOffsetDepth, -1, "Octree depth used for signalling position QP offsets (-1 => disabled)") ("positionBaseQpFreqLog2", params.encoder.gps.geom_qp_offset_intvl_log2, 8, "Frequency of sending QP offsets in predictive geometry coding") // NB: this will be corrected to be relative to base value later ("positionSliceQpFreqLog2", params.encoder.gbh.geom_qp_offset_intvl_log2_delta, 0, "Frequency of sending QP offsets in predictive geometry coding") ("angularEnabled", params.encoder.gps.geom_angular_mode_enabled_flag, false, "Controls angular contextualisation of occupancy") // NB: the underlying variable is in STV order. // Conversion happens during argument sanitization. ("lidarHeadPosition", params.encoder.gps.gpsAngularOrigin, {0, 0, 0}, "laser head position (x,y,z) in angular mode") ("numLasers", params.encoder.numLasers, 0, "Number of lasers in angular mode") ("lasersTheta", params.encoder.lasersTheta, {}, "Vertical laser angle in angular mode") ("lasersZ", params.encoder.lasersZ, {}, "Vertical laser offset in angular mode") ("lasersNumPhiPerTurn", params.encoder.gps.angularNumPhiPerTurn, {}, "Number of sampling poisitions in a complete laser turn in angular mode") ("planarBufferDisabled", params.encoder.gps.planar_buffer_disabled_flag, false, "Disable planar buffer (when angular mode is enabled)") ("predGeomAzimuthQuantization", params.encoder.gps.azimuth_scaling_enabled_flag, true, "Quantize azimuth according to radius in predictive geometry coding") ("positionAzimuthScaleLog2", params.encoder.gps.geom_angular_azimuth_scale_log2_minus11, 5, "Additional bits to represent azimuth angle in predictive geometry coding") // NB: this will be corrected to be minus 1 later ("positionAzimuthSpeed", params.encoder.gps.geom_angular_azimuth_speed_minus1, 363, "Scale factor applied to azimuth angle in predictive geometry coding") ("positionRadiusInvScaleLog2", params.encoder.gps.geom_angular_radius_inv_scale_log2, 0, "Inverse scale factor applied to radius in predictive geometry coding") ("predGeomSort", params.encoder.predGeom.sortMode, PredGeomEncOpts::kSortMorton, "Predictive geometry tree construction order") ("predGeomAzimuthSortPrecision", params.encoder.predGeom.azimuthSortRecipBinWidth, 0.0f, "Reciprocal precision used in azimuthal sorting for tree construction") ("predGeomTreePtsMax", params.encoder.predGeom.maxPtsPerTree, 1100000, "Maximum number of points per predictive geometry tree") ("pointCountMetadata", params.encoder.gps.octree_point_count_list_present_flag, false, "Add octree layer point count metadata") (po::Section("Attributes")) // attribute processing // NB: Attribute options are special in the way they are applied (see above) ("attribute", attribute_setter, "Encode the given attribute (NB, must appear after the" "following attribute parameters)") // NB: the cli option sets +1, the minus1 will be applied later ("attrScale", params_attr.desc.params.attr_scale_minus1, 1, "Scale factor used to interpret coded attribute values") ("attrOffset", params_attr.desc.params.attr_offset, 0, "Offset used to interpret coded attribute values") ("bitdepth", params_attr.desc.bitdepth, 8, "Attribute bitdepth") ("defaultValue", params_attr.desc.params.attr_default_value, {}, "Default attribute component value(s) in case of data omission") // todo(df): this should be per-attribute ("colourMatrix", params_attr.desc.params.cicp_matrix_coefficients_idx, ColourMatrix::kBt709, "Matrix used in colourspace conversion\n" " 0: none (identity)\n" " 1: ITU-T BT.709\n" " 8: YCgCo") ("transformType", params_attr.aps.attr_encoding, AttributeEncoding::kPredictingTransform, "Coding method to use for attribute:\n" " 0: Region Adaptive Hierarchical Transform (RAHT)\n" " 1: Hierarchical neighbourhood prediction\n" " 2: Hierarchical neighbourhood prediction as lifting transform") ("rahtPredictionEnabled", params_attr.aps.raht_prediction_enabled_flag, true, "Controls the use of transform-domain prediction") ("rahtPredictionThreshold0", params_attr.aps.raht_prediction_threshold0, 2, "Grandparent threshold for early transform-domain prediction termination") ("rahtPredictionThreshold1", params_attr.aps.raht_prediction_threshold1, 6, "Parent threshold for early transform-domain prediction termination") // NB: the cli option sets +1, the minus1 will be applied later ("numberOfNearestNeighborsInPrediction", params_attr.aps.num_pred_nearest_neighbours_minus1, 3, "Attribute's maximum number of nearest neighbors to be used for prediction") ("adaptivePredictionThreshold", params_attr.aps.adaptive_prediction_threshold, 1 << 6, "Neighbouring attribute value difference that enables direct " "prediction. 8-bit value scaled to attribute bitdeph. " "Applies to transformType=0 only") ("intraLodSearchRange", params_attr.aps.intra_lod_search_range, -1, "Intra LoD nearest neighbor search range\n" " -1: Full-range") ("interLodSearchRange", params_attr.aps.inter_lod_search_range, -1, "Inter LoD nearest neighbor search range\n" " -1: Full-range") // NB: the underlying variable is in STV order. // Conversion happens during argument sanitization. ("lod_neigh_bias", params_attr.aps.lodNeighBias, {1, 1, 1}, "Attribute's (x,y,z) component intra prediction weights") ("lodDecimator", params_attr.aps.lod_decimation_type, LodDecimationMethod::kNone, "LoD decimation method:\n" " 0: none\n" " 1: periodic subsampling using lodSamplingPeriod\n" " 2: centroid subsampling using lodSamplingPeriod") ("max_num_direct_predictors", params_attr.aps.max_num_direct_predictors, 3, "Maximum number of nearest neighbour candidates used in direct" "attribute prediction") ("direct_avg_predictor_disabled_flag", params_attr.aps.direct_avg_predictor_disabled_flag, false, "Disable average predictor") ("predWeightBlending", params_attr.aps.pred_weight_blending_enabled_flag, false, "Blend prediction weights according to neigbour distances. " "Applies to transformType=0 only") // NB: this parameter actually represents the number of refinement layers ("levelOfDetailCount", params_attr.aps.num_detail_levels_minus1, 1, "Attribute's number of levels of detail") ("dist2", params_attr.aps.dist2, 0, "Initial squared distance used in LoD generation") ("dist2PercentileEstimate", params_attr.encoder.dist2PercentileEstimate, 0.85f, "Percentile for dist2 estimation during nearest neighbour search") ("positionQuantizationScaleAdjustsDist2", params.positionQuantizationScaleAdjustsDist2, false, "Scale dist2 values by squared positionQuantizationScale") ("lodSamplingPeriod", params_attr.aps.lodSamplingPeriod, {4}, "List of per LoD sampling periods used in LoD generation") ("intraLodPredictionSkipLayers", params_attr.aps.intra_lod_prediction_skip_layers, -1, "Number of finest detail levels that skip intra prediction\n" " -1: skip all (disables intra pred)") ("interComponentPredictionEnabled", params_attr.aps.inter_component_prediction_enabled_flag, false, "Use primary attribute component to predict values of subsequent " "components") ("lastComponentPredictionEnabled", params_attr.aps.last_component_prediction_enabled_flag, true, "Use second attribute component to predict value of the final component") ("canonical_point_order_flag", params_attr.aps.canonical_point_order_flag, false, "Enable skipping morton sort in case of number of LoD equal to 1") ("spherical_coord_flag", params_attr.aps.spherical_coord_flag, false, "Code attributes in spherical domain") ("attrSphericalMaxLog2", params.encoder.attrSphericalMaxLog2, 0, "Override spherical coordinate normalisation factor") ("aps_scalable_enable_flag", params_attr.aps.scalable_lifting_enabled_flag, false, "Enable scalable attritube coding") ("max_neigh_range", // NB: this is adjusted by minus 1 after the arguments are parsed params_attr.aps.max_neigh_range_minus1, 5, "maximum nearest neighbour range for scalable lifting") ("qp", // NB: this is adjusted with minus 4 after the arguments are parsed params_attr.aps.init_qp_minus4, 4, "Attribute's luma quantisation parameter") ("qpChromaOffset", params_attr.aps.aps_chroma_qp_offset, 0, "Attribute's chroma quantisation parameter offset (relative to luma)") ("aps_slice_qp_deltas_present_flag", params_attr.aps.aps_slice_qp_deltas_present_flag, false, "Enable signalling of per-slice QP values") ("qpLayerOffsetsLuma", params_attr.encoder.abh.attr_layer_qp_delta_luma, {}, "Attribute's per layer luma QP offsets") ("qpLayerOffsetsChroma", params_attr.encoder.abh.attr_layer_qp_delta_chroma, {}, "Attribute's per layer chroma QP offsets") ("quantNeighWeight", params_attr.aps.quant_neigh_weight, {16, 8, 4}, "Factors used to derive quantization weights (transformType=1)") // This section is just dedicated to attribute recolouring (encoder only). // parameters are common to all attributes. (po::Section("Recolouring")) ("recolourSearchRange", params.encoder.recolour.searchRange, 1, "") ("recolourNumNeighboursFwd", params.encoder.recolour.numNeighboursFwd, 8, "") ("recolourNumNeighboursBwd", params.encoder.recolour.numNeighboursBwd, 1, "") ("recolourUseDistWeightedAvgFwd", params.encoder.recolour.useDistWeightedAvgFwd, true, "") ("recolourUseDistWeightedAvgBwd", params.encoder.recolour.useDistWeightedAvgBwd, true, "") ("recolourSkipAvgIfIdenticalSourcePointPresentFwd", params.encoder.recolour.skipAvgIfIdenticalSourcePointPresentFwd, true, "") ("recolourSkipAvgIfIdenticalSourcePointPresentBwd", params.encoder.recolour.skipAvgIfIdenticalSourcePointPresentBwd, false, "") ("recolourDistOffsetFwd", params.encoder.recolour.distOffsetFwd, 4., "") ("recolourDistOffsetBwd", params.encoder.recolour.distOffsetBwd, 4., "") ("recolourMaxGeometryDist2Fwd", params.encoder.recolour.maxGeometryDist2Fwd, 1000., "") ("recolourMaxGeometryDist2Bwd", params.encoder.recolour.maxGeometryDist2Bwd, 1000., "") ("recolourMaxAttributeDist2Fwd", params.encoder.recolour.maxAttributeDist2Fwd, 1000., "") ("recolourMaxAttributeDist2Bwd", params.encoder.recolour.maxAttributeDist2Bwd, 1000., "") ; /* clang-format on */ po::setDefaults(opts); po::ErrorReporter err; const list<const char*>& argv_unhandled = po::scanArgv(opts, argc, (const char**)argv, err); for (const auto arg : argv_unhandled) { err.warn() << "Unhandled argument ignored: " << arg << "\n"; } if (argc == 1 || print_help) { po::doHelp(std::cout, opts, 78); return false; } // set default output units (this works for the decoder too) if (params.outputUnitLength <= 0.) params.outputUnitLength = params.encoder.srcUnitLength; params.encoder.outputFpBits = params.outputFpBits; params.decoder.outputFpBits = params.outputFpBits; if (!params.isDecoder) sanitizeEncoderOpts(params, err); // check required arguments are specified if (!params.isDecoder && params.uncompressedDataPath.empty()) err.error() << "uncompressedDataPath not set\n"; if (params.isDecoder && params.reconstructedDataPath.empty()) err.error() << "reconstructedDataPath not set\n"; if (params.compressedStreamPath.empty()) err.error() << "compressedStreamPath not set\n"; // report the current configuration (only in the absence of errors so // that errors/warnings are more obvious and in the same place). if (err.is_errored) return false; // Dump the complete derived configuration cout << "+ Effective configuration parameters\n"; po::dumpCfg(cout, opts, "General", 4); if (params.isDecoder) { po::dumpCfg(cout, opts, "Decoder", 4); } else { po::dumpCfg(cout, opts, "Coordinate system scaling", 4); po::dumpCfg(cout, opts, "Encoder", 4); po::dumpCfg(cout, opts, "Geometry", 4); po::dumpCfg(cout, opts, "Recolouring", 4); for (const auto& it : params.encoder.attributeIdxMap) { // NB: when dumping the config, opts references params_attr params_attr.desc = params.encoder.sps.attributeSets[it.second]; params_attr.aps = params.encoder.aps[it.second]; params_attr.encoder = params.encoder.attr[it.second]; cout << " " << it.first << "\n"; po::dumpCfg(cout, opts, "Attributes", 8); } } cout << endl; return true; } //---------------------------------------------------------------------------- void sanitizeEncoderOpts( Parameters& params, df::program_options_lite::ErrorReporter& err) { // Input scaling affects the definition of the source unit length. // eg, if the unit length of the source is 1m, scaling by 1000 generates // a cloud with unit length 1mm. params.encoder.srcUnitLength /= params.inputScale; // global scale factor must be positive if (params.encoder.codedGeomScale > params.encoder.seqGeomScale) { err.warn() << "codingScale must be <= sequenceScale, adjusting\n"; params.encoder.codedGeomScale = params.encoder.seqGeomScale; } // fix the representation of various options params.encoder.gbh.geom_stream_cnt_minus1--; params.encoder.gps.geom_idcm_rate_minus1--; params.encoder.gps.geom_angular_azimuth_speed_minus1--; params.encoder.gps.neighbour_avail_boundary_log2_minus1 = std::max(0, params.encoder.gps.neighbour_avail_boundary_log2_minus1 - 1); for (auto& attr_sps : params.encoder.sps.attributeSets) { attr_sps.params.attr_scale_minus1--; } for (auto& attr_aps : params.encoder.aps) { attr_aps.init_qp_minus4 -= 4; attr_aps.num_pred_nearest_neighbours_minus1--; attr_aps.max_neigh_range_minus1--; } // Config options are absolute, but signalling is relative params.encoder.gbh.geom_qp_offset_intvl_log2_delta -= params.encoder.gps.geom_qp_offset_intvl_log2; // If idcm rate is configured as 0, disable idcm // NB: if user has requested less contrained idcm, warn if (params.encoder.gps.geom_idcm_rate_minus1 < 0) { if (params.encoder.gps.inferred_direct_coding_mode == 1) params.encoder.gps.inferred_direct_coding_mode = 0; } if (params.encoder.gps.geom_idcm_rate_minus1 < 31) { if (params.encoder.gps.inferred_direct_coding_mode > 1) { params.encoder.gps.geom_idcm_rate_minus1 = 31; err.warn() << "ignoring planarModeIdcmUse < 32: " "contradicts inferredDirectCodingMode > 1\n"; } } // convert coordinate systems if the coding order is different from xyz convertXyzToStv(&params.encoder.sps); convertXyzToStv(params.encoder.sps, &params.encoder.gps); for (auto& aps : params.encoder.aps) convertXyzToStv(params.encoder.sps, &aps); // Trisoup is enabled when a node size is specified // sanity: don't enable if only node size is 0. // todo(df): this needs to take into account slices where it is disabled if (params.encoder.trisoupNodeSizesLog2.size() == 1) if (params.encoder.trisoupNodeSizesLog2[0] < 2) params.encoder.trisoupNodeSizesLog2.clear(); for (auto trisoupNodeSizeLog2 : params.encoder.trisoupNodeSizesLog2) if (trisoupNodeSizeLog2 < 2) err.error() << "Trisoup node size must be greater than 1\n"; params.encoder.gps.trisoup_enabled_flag = !params.encoder.trisoupNodeSizesLog2.empty(); // Certain coding modes are not available when trisoup is enabled. // Disable them, and warn if set (they may be set as defaults). if (params.encoder.gps.trisoup_enabled_flag) { if (!params.encoder.gps.geom_unique_points_flag) err.warn() << "TriSoup geometry does not preserve duplicated points\n"; if (params.encoder.gps.inferred_direct_coding_mode) err.warn() << "TriSoup geometry is incompatable with IDCM\n"; params.encoder.gps.geom_unique_points_flag = true; params.encoder.gps.inferred_direct_coding_mode = 0; } // tweak qtbt generation when trisoup is /isn't enabled params.encoder.geom.qtbt.trisoupEnabled = params.encoder.gps.trisoup_enabled_flag; // Planar coding mode is not available for bytewise coding if (!params.encoder.gps.bitwise_occupancy_coding_flag) { if (params.encoder.gps.geom_planar_mode_enabled_flag) err.warn() << "Bytewise geometry coding does not support planar mode\n"; params.encoder.gps.geom_planar_mode_enabled_flag = false; } // support disabling attribute coding (simplifies configuration) if (params.disableAttributeCoding) { params.encoder.attributeIdxMap.clear(); params.encoder.sps.attributeSets.clear(); params.encoder.aps.clear(); } // fixup any per-attribute settings for (const auto& it : params.encoder.attributeIdxMap) { auto& attr_sps = params.encoder.sps.attributeSets[it.second]; auto& attr_aps = params.encoder.aps[it.second]; auto& attr_enc = params.encoder.attr[it.second]; // default values for attribute attr_sps.attr_instance_id = 0; auto& attrMeta = attr_sps.params; attrMeta.cicp_colour_primaries_idx = 2; attrMeta.cicp_transfer_characteristics_idx = 2; attrMeta.cicp_video_full_range_flag = true; attrMeta.cicpParametersPresent = false; attrMeta.attr_frac_bits = 0; attrMeta.scalingParametersPresent = false; // Enable scaling if a paramter has been set // - pre/post scaling is only currently supported for reflectance attrMeta.scalingParametersPresent = attrMeta.attr_offset || attrMeta.attr_scale_minus1 || attrMeta.attr_frac_bits; // todo(df): remove this hack when scaling is generalised if (it.first != "reflectance" && attrMeta.scalingParametersPresent) { err.warn() << it.first << ": scaling not supported, disabling\n"; attrMeta.scalingParametersPresent = 0; } if (it.first == "reflectance") { // Avoid wasting bits signalling chroma quant step size for reflectance attr_aps.aps_chroma_qp_offset = 0; attr_enc.abh.attr_layer_qp_delta_chroma.clear(); // There is no matrix for reflectace attrMeta.cicp_matrix_coefficients_idx = ColourMatrix::kUnspecified; attr_sps.attr_num_dimensions_minus1 = 0; attr_sps.attributeLabel = KnownAttributeLabel::kReflectance; } if (it.first == "color") { attr_sps.attr_num_dimensions_minus1 = 2; attr_sps.attributeLabel = KnownAttributeLabel::kColour; attrMeta.cicpParametersPresent = true; } // Assume that YCgCo is actually YCgCoR for now // This requires an extra bit to represent chroma (luma will have a // reduced range) if (attrMeta.cicp_matrix_coefficients_idx == ColourMatrix::kYCgCo) attr_sps.bitdepth++; // Extend the default attribute value to the correct width if present if (!attrMeta.attr_default_value.empty()) attrMeta.attr_default_value.resize( attr_sps.attr_num_dimensions_minus1 + 1, attrMeta.attr_default_value.back()); // In order to simplify specification of dist2 values, which are // depending on the scale of the coded point cloud, the following // adjust the dist2 values according to PQS. The user need only // specify the unquantised PQS value. if (params.positionQuantizationScaleAdjustsDist2) { auto delta = log2(params.encoder.codedGeomScale); attr_aps.dist2 = std::max(0, int32_t(std::round(attr_aps.dist2 + delta))); } // derive samplingPeriod values based on initial value if ( !attr_aps.lodParametersPresent() || (attr_aps.lod_decimation_type == LodDecimationMethod::kNone)) { attr_aps.lodSamplingPeriod.clear(); } else if (!attr_aps.lodSamplingPeriod.empty()) { auto i = attr_aps.lodSamplingPeriod.size(); attr_aps.lodSamplingPeriod.resize(attr_aps.num_detail_levels_minus1); // add any extra values as required for (; i < attr_aps.num_detail_levels_minus1; i++) attr_aps.lodSamplingPeriod[i] = attr_aps.lodSamplingPeriod[i - 1]; } if (attr_aps.attr_encoding == AttributeEncoding::kLiftingTransform) { attr_aps.adaptive_prediction_threshold = 0; attr_aps.intra_lod_prediction_skip_layers = -1; } // For RAHT, ensure that the unused lod count = 0 (prevents mishaps) if (attr_aps.attr_encoding == AttributeEncoding::kRAHTransform) { attr_aps.num_detail_levels_minus1 = 0; attr_aps.adaptive_prediction_threshold = 0; } if (!params.encoder.gps.geom_angular_mode_enabled_flag) { if (attr_aps.spherical_coord_flag) err.warn() << it.first << ".spherical_coord_flag=1 requires angularEnabled=1, " "disabling\n"; attr_aps.spherical_coord_flag = false; } } // convert floating point values of Lasers' Theta and H to fixed point if (params.encoder.gps.geom_angular_mode_enabled_flag) { if (params.encoder.numLasers == 0) err.error() << "numLasers must be at least 1\n"; for (auto val : params.encoder.lasersTheta) { int one = 1 << 18; params.encoder.gps.angularTheta.push_back(round(val * one)); } for (auto val : params.encoder.lasersZ) { int one = 1 << 3; auto scale = params.encoder.codedGeomScale; if (params.encoder.gps.predgeom_enabled_flag) scale = params.encoder.codedGeomScale / params.encoder.seqGeomScale; params.encoder.gps.angularZ.push_back(round(val * scale * one)); } if (params.encoder.gps.angularTheta.size() != params.encoder.numLasers) err.error() << "lasersZ.size() != numLasers\n"; if (params.encoder.gps.angularZ.size() != params.encoder.numLasers) err.error() << "lasersTheta.size() != numLasers\n"; if ( params.encoder.gps.angularNumPhiPerTurn.size() != params.encoder.numLasers) err.error() << "lasersNumPhiPerTurn.size() != numLasers\n"; if (params.encoder.gps.qtbt_enabled_flag) { params.encoder.geom.qtbt.angularMaxNodeMinDimLog2ToSplitV = std::max<int>(0, 8 + log2(params.encoder.codedGeomScale)); params.encoder.geom.qtbt.angularMaxDiffToSplitZ = std::max<int>(0, 1 + log2(params.encoder.codedGeomScale)); } if (params.encoder.gps.predgeom_enabled_flag) { auto& gps = params.encoder.gps; int maxSpeed = 1 << (gps.geom_angular_azimuth_scale_log2_minus11 + 12); if (params.encoder.gps.geom_angular_azimuth_speed_minus1 + 1 > maxSpeed) err.error() << "positionAzimuthSpeed > max (" << maxSpeed << ")\n"; } } // tweak qtbt when angular is / isn't enabled params.encoder.geom.qtbt.angularTweakEnabled = params.encoder.gps.geom_angular_mode_enabled_flag; if (!params.encoder.geom.qtbt.angularTweakEnabled) { // NB: these aren't used in this condition params.encoder.geom.qtbt.angularMaxNodeMinDimLog2ToSplitV = 0; params.encoder.geom.qtbt.angularMaxDiffToSplitZ = 0; } // sanity checks if (params.encoder.gps.geom_qp_multiplier_log2 & ~3) err.error() << "positionQpMultiplierLog2 must be in the range 0..3\n"; if (!params.encoder.gps.geom_angular_mode_enabled_flag) { if (params.encoder.gps.planar_buffer_disabled_flag) { params.encoder.gps.planar_buffer_disabled_flag = 0; err.warn() << "ignoring planarBufferDisabled without angularEnabled\n"; } } // The following featues depend upon the occupancy atlas if (!params.encoder.gps.neighbour_avail_boundary_log2_minus1) { if (params.encoder.gps.adjacent_child_contextualization_enabled_flag) err.warn() << "ignoring adjacentChildContextualization when" " neighbourAvailBoundaryLog2=0\n"; if (params.encoder.gps.intra_pred_max_node_size_log2) err.warn() << "ignoring intra_pred_max_node_size_log2 when" " neighbourAvailBoundaryLog2=0\n"; params.encoder.gps.adjacent_child_contextualization_enabled_flag = 0; params.encoder.gps.intra_pred_max_node_size_log2 = 0; } if ( params.encoder.partition.sliceMaxPoints < params.encoder.partition.sliceMinPoints) err.error() << "sliceMaxPoints must be greater than or equal to sliceMinPoints\n"; for (const auto& it : params.encoder.attributeIdxMap) { const auto& attr_sps = params.encoder.sps.attributeSets[it.second]; const auto& attr_aps = params.encoder.aps[it.second]; auto& attr_enc = params.encoder.attr[it.second]; if (it.first == "color") { if ( attr_enc.abh.attr_layer_qp_delta_luma.size() != attr_enc.abh.attr_layer_qp_delta_chroma.size()) { err.error() << it.first << ".qpLayerOffsetsLuma length != .qpLayerOffsetsChroma\n"; } } if (attr_sps.bitdepth > 16) err.error() << it.first << ".bitdepth must be less than 17\n"; if (attr_aps.lodParametersPresent()) { int lod = attr_aps.num_detail_levels_minus1; if (lod > 255 || lod < 0) { err.error() << it.first << ".levelOfDetailCount must be in the range [0,255]\n"; } // if zero, values are derived automatically if (attr_aps.dist2 < 0 || attr_aps.dist2 > 20) { err.error() << it.first << ".dist2 must be in the range [0,20]\n"; } if (lod > 0 && attr_aps.canonical_point_order_flag) { err.error() << it.first << "when levelOfDetailCount > 0, " "canonicalPointOrder must be 0\n"; } if ( attr_aps.attr_encoding == AttributeEncoding::kPredictingTransform && lod == 0 && attr_aps.intra_lod_prediction_skip_layers != 0) { err.error() << "when transformType == 0 (Pred) and levelOfDetailCount == 0, " "intraLodPredictionSkipLayers must be 0\n"; } if ( (attr_aps.lod_decimation_type != LodDecimationMethod::kNone) && attr_aps.lodSamplingPeriod.empty()) { err.error() << it.first << ".lodSamplingPeriod must contain at least one entry\n"; } for (auto samplingPeriod : attr_aps.lodSamplingPeriod) { if (samplingPeriod < 2) err.error() << it.first << ".lodSamplingPeriod values must be > 1\n"; } if (attr_aps.adaptive_prediction_threshold < 0) { err.error() << it.first << ".adaptivePredictionThreshold must be positive\n"; } if ( attr_aps.num_pred_nearest_neighbours_minus1 >= kAttributePredictionMaxNeighbourCount) { err.error() << it.first << ".numberOfNearestNeighborsInPrediction must be <= " << kAttributePredictionMaxNeighbourCount << "\n"; } if (attr_aps.scalable_lifting_enabled_flag) { if (attr_aps.lod_decimation_type != LodDecimationMethod::kNone) { err.error() << it.first << ".lod_decimation_type must be 0\n"; } if (params.encoder.gps.trisoup_enabled_flag) { err.error() << it.first << " trisoup_enabled_flag must be disabled\n"; } if (params.encoder.gps.geom_qp_multiplier_log2 != 3) err.error() << it.first << " positionQpMultiplierLog2 must be 3\n"; } } if (attr_aps.init_qp_minus4 < 0 || attr_aps.init_qp_minus4 + 4 > 51) err.error() << it.first << ".qp must be in the range [4,51]\n"; if (std::abs(attr_aps.aps_chroma_qp_offset) > 51 - 4) { err.error() << it.first << ".qpChromaOffset must be in the range [-47,47]\n"; } } } //============================================================================ SequenceEncoder::SequenceEncoder(Parameters* params) : SequenceCodec(params) { // determine the naming (ordering) of ply properties _plyAttrNames.position = axisOrderToPropertyNames(params->encoder.sps.geometry_axis_order); // NB: this is the raw origin before the encoder tweaks it _angularOrigin = params->encoder.gps.gpsAngularOrigin; } //---------------------------------------------------------------------------- int SequenceEncoder::compress(Stopwatch* clock) { bytestreamFile.open(params->compressedStreamPath, ios::binary); if (!bytestreamFile.is_open()) { return -1; } const int lastFrameNum = params->firstFrameNum + params->frameCount; for (frameNum = params->firstFrameNum; frameNum < lastFrameNum; frameNum++) { if (compressOneFrame(clock)) return -1; } std::cout << "Total bitstream size " << bytestreamFile.tellp() << " B\n"; bytestreamFile.close(); return 0; } //---------------------------------------------------------------------------- int SequenceEncoder::compressOneFrame(Stopwatch* clock) { std::string srcName{expandNum(params->uncompressedDataPath, frameNum)}; PCCPointSet3 pointCloud; if ( !ply::read(srcName, _plyAttrNames, params->inputScale, pointCloud) || pointCloud.getPointCount() == 0) { cout << "Error: can't open input file!" << endl; return -1; } // Some evaluations wish to scan the points in azimuth order to simulate // real-time acquisition (since the input has lost its original order). // NB: because this is trying to emulate the input order, binning is disabled if (params->sortInputByAzimuth) sortByAzimuth( pointCloud, 0, pointCloud.getPointCount(), 0., _angularOrigin); // Sanitise the input point cloud // todo(df): remove the following with generic handling of properties bool codeColour = params->encoder.attributeIdxMap.count("color"); if (!codeColour) pointCloud.removeColors(); assert(codeColour == pointCloud.hasColors()); bool codeReflectance = params->encoder.attributeIdxMap.count("reflectance"); if (!codeReflectance) pointCloud.removeReflectances(); assert(codeReflectance == pointCloud.hasReflectances()); clock->start(); if (params->convertColourspace) convertFromGbr(params->encoder.sps.attributeSets, pointCloud); scaleAttributesForInput(params->encoder.sps.attributeSets, pointCloud); // The reconstructed point cloud CloudFrame recon; auto* reconPtr = params->reconstructedDataPath.empty() ? nullptr : &recon; auto bytestreamLenFrameStart = bytestreamFile.tellp(); int ret = encoder.compress(pointCloud, &params->encoder, this, reconPtr); if (ret) { cout << "Error: can't compress point cloud!" << endl; return -1; } auto bytestreamLenFrameEnd = bytestreamFile.tellp(); int frameLen = bytestreamLenFrameEnd - bytestreamLenFrameStart; std::cout << "Total frame size " << frameLen << " B" << std::endl; clock->stop(); if (reconPtr) writeOutputFrame(params->reconstructedDataPath, {}, recon, recon.cloud); return 0; } //---------------------------------------------------------------------------- void SequenceEncoder::onOutputBuffer(const PayloadBuffer& buf) { writeTlv(buf, bytestreamFile); } //---------------------------------------------------------------------------- void SequenceEncoder::onPostRecolour(const PCCPointSet3& cloud) { if (params->postRecolorPath.empty()) { return; } // todo(df): don't allocate if conversion is not required PCCPointSet3 tmpCloud(cloud); CloudFrame frame; frame.setParametersFrom(params->encoder.sps, params->encoder.outputFpBits); frame.cloud = cloud; frame.frameNum = frameNum - params->firstFrameNum; writeOutputFrame(params->postRecolorPath, {}, frame, tmpCloud); } //============================================================================ SequenceDecoder::SequenceDecoder(Parameters* params) : SequenceCodec(params), decoder(params->decoder) {} //---------------------------------------------------------------------------- int SequenceDecoder::decompress(Stopwatch* clock) { ifstream fin(params->compressedStreamPath, ios::binary); if (!fin.is_open()) { return -1; } this->clock = clock; clock->start(); PayloadBuffer buf; while (true) { PayloadBuffer* buf_ptr = &buf; readTlv(fin, &buf); // at end of file (or other error), flush decoder if (!fin) buf_ptr = nullptr; if (decoder.decompress(buf_ptr, this)) { cout << "Error: can't decompress point cloud!" << endl; return -1; } if (!buf_ptr) break; } fin.clear(); fin.seekg(0, ios_base::end); std::cout << "Total bitstream size " << fin.tellg() << " B" << std::endl; clock->stop(); return 0; } //---------------------------------------------------------------------------- void SequenceDecoder::onOutputCloud(const CloudFrame& frame) { clock->stop(); // copy the point cloud in order to modify it according to the output options PCCPointSet3 pointCloud(frame.cloud); writeOutputFrame( params->reconstructedDataPath, params->preInvScalePath, frame, pointCloud); clock->start(); } //============================================================================ double SequenceCodec::outputScale(const CloudFrame& frame) const { switch (params->outputSystem) { case OutputSystem::kConformance: return 1.; case OutputSystem::kExternal: // The scaling converts from the frame's unit length to configured output. // In terms of specification this is the external coordinate system. return frame.outputUnitLength / params->outputUnitLength; } } //---------------------------------------------------------------------------- Vec3<double> SequenceCodec::outputOrigin(const CloudFrame& frame) const { switch (params->outputSystem) { case OutputSystem::kConformance: return 0.; case OutputSystem::kExternal: return frame.outputOrigin * outputScale(frame); } } //---------------------------------------------------------------------------- void SequenceCodec::writeOutputFrame( const std::string& postInvScalePath, const std::string& preInvScalePath, const CloudFrame& frame, PCCPointSet3& cloud) { if (postInvScalePath.empty() && preInvScalePath.empty()) return; scaleAttributesForOutput(frame.attrDesc, cloud); if (params->convertColourspace) convertToGbr(frame.attrDesc, cloud); // the order of the property names must be determined from the sps ply::PropertyNameMap attrNames; attrNames.position = axisOrderToPropertyNames(frame.geometry_axis_order); // offset frame number int frameNum = frame.frameNum + params->firstFrameNum; // Dump the decoded colour using the pre inverse scaled geometry if (!preInvScalePath.empty()) { std::string filename{expandNum(preInvScalePath, frameNum)}; ply::write(cloud, attrNames, 1.0, 0.0, filename, !params->outputBinaryPly); } auto plyScale = outputScale(frame) / (1 << frame.outputFpBits); auto plyOrigin = outputOrigin(frame); std::string decName{expandNum(postInvScalePath, frameNum)}; if (!ply::write( cloud, attrNames, plyScale, plyOrigin, decName, !params->outputBinaryPly)) { cout << "Error: can't open output file!" << endl; } } //============================================================================ const AttributeDescription* findColourAttrDesc(const std::vector<AttributeDescription>& attrDescs) { // todo(df): don't assume that there is only one colour attribute in the sps for (const auto& desc : attrDescs) { if (desc.attributeLabel == KnownAttributeLabel::kColour) return &desc; } return nullptr; } //---------------------------------------------------------------------------- void convertToGbr( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud) { const AttributeDescription* attrDesc = findColourAttrDesc(attrDescs); if (!attrDesc) return; switch (attrDesc->params.cicp_matrix_coefficients_idx) { case ColourMatrix::kBt709: convertYCbCrBt709ToGbr(cloud); break; case ColourMatrix::kYCgCo: // todo(df): select YCgCoR vs YCgCo // NB: bitdepth is the transformed bitdepth, not the source convertYCgCoRToGbr(attrDesc->bitdepth - 1, cloud); break; default: break; } } //---------------------------------------------------------------------------- void convertFromGbr( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud) { const AttributeDescription* attrDesc = findColourAttrDesc(attrDescs); if (!attrDesc) return; switch (attrDesc->params.cicp_matrix_coefficients_idx) { case ColourMatrix::kBt709: convertGbrToYCbCrBt709(cloud); break; case ColourMatrix::kYCgCo: // todo(df): select YCgCoR vs YCgCo // NB: bitdepth is the transformed bitdepth, not the source convertGbrToYCgCoR(attrDesc->bitdepth - 1, cloud); break; default: break; } } //============================================================================ const AttributeDescription* findReflAttrDesc(const std::vector<AttributeDescription>& attrDescs) { // todo(df): don't assume that there is only one in the sps for (const auto& desc : attrDescs) { if (desc.attributeLabel == KnownAttributeLabel::kReflectance) return &desc; } return nullptr; } //---------------------------------------------------------------------------- struct AttrFwdScaler { template<typename T> T operator()(const AttributeParameters& params, T val) const { int scale = params.attr_scale_minus1 + 1; return ((val - params.attr_offset) << params.attr_frac_bits) / scale; } }; //---------------------------------------------------------------------------- struct AttrInvScaler { template<typename T> T operator()(const AttributeParameters& params, T val) const { int scale = params.attr_scale_minus1 + 1; return ((val * scale) >> params.attr_frac_bits) + params.attr_offset; } }; //---------------------------------------------------------------------------- template<typename Op> void scaleAttributes( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud, Op scaler) { // todo(df): extend this to other attributes const AttributeDescription* attrDesc = findReflAttrDesc(attrDescs); if (!attrDesc || !attrDesc->params.scalingParametersPresent) return; auto& params = attrDesc->params; // Parameters present, but nothing to do bool unityScale = !params.attr_scale_minus1 && !params.attr_frac_bits; if (unityScale && !params.attr_offset) return; const auto pointCount = cloud.getPointCount(); for (size_t i = 0; i < pointCount; ++i) { auto& val = cloud.getReflectance(i); val = scaler(params, val); } } //---------------------------------------------------------------------------- void SequenceCodec::scaleAttributesForInput( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud) { scaleAttributes(attrDescs, cloud, AttrFwdScaler()); } //---------------------------------------------------------------------------- void SequenceCodec::scaleAttributesForOutput( const std::vector<AttributeDescription>& attrDescs, PCCPointSet3& cloud) { scaleAttributes(attrDescs, cloud, AttrInvScaler()); } //============================================================================
66,350
31.382138
80
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/TMC3.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef TMC3_h #define TMC3_h #define _CRT_SECURE_NO_WARNINGS #include "TMC3Config.h" #include "pcc_chrono.h" struct Parameters; typedef pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> Stopwatch; bool ParseParameters(int argc, char* argv[], Parameters& params); int Compress(Parameters& params, Stopwatch&); int Decompress(Parameters& params, Stopwatch&); #endif /* TMC3_h */
2,224
39.454545
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/attribute_raw.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2021, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "hls.h" #include "PayloadBuffer.h" #include "PCCPointSet.h" namespace pcc { //============================================================================ class AttrRawEncoder { public: static void encode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& attr_aps, AttributeBrickHeader& abh, PCCPointSet3& cloud, PayloadBuffer* payload); }; //============================================================================ class AttrRawDecoder { public: static void decode( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const char* payload, size_t payloadLen, PCCPointSet3& cloud); }; //============================================================================ } // namespace pcc
2,685
35.794521
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/attribute_raw_decoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2021, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "attribute_raw.h" #include "BitReader.h" #include "io_hls.h" namespace pcc { //============================================================================ void AttrRawDecoder::decode( const AttributeDescription& desc, const AttributeParameterSet& aps, const AttributeBrickHeader& abh, const char* payload, size_t payloadLen, PCCPointSet3& cloud) { auto bs = makeBitReader(payload, payload + payloadLen); // todo(df): update PCCPointSet3 to support variable length attributes int valueBits = desc.bitdepth; // todo(df): update to correctly map attribute types if (desc.attr_num_dimensions_minus1 == 0) { for (size_t i = 0; i < cloud.getPointCount(); i++) { if (aps.raw_attr_variable_len_flag) { int raw_attr_component_length; bs.readUn(8, &raw_attr_component_length); valueBits = 8 * raw_attr_component_length; } bs.readUn(valueBits, &cloud.getReflectance(i)); } } else if (desc.attr_num_dimensions_minus1 == 2) { for (size_t i = 0; i < cloud.getPointCount(); i++) { auto& value = cloud.getColor(i); for (int c = 0; c < 3; c++) { if (aps.raw_attr_variable_len_flag) { int raw_attr_component_length; bs.readUn(8, &raw_attr_component_length); valueBits = 8 * raw_attr_component_length; } bs.readUn(valueBits, &value[c]); } } } else { assert( desc.attr_num_dimensions_minus1 == 0 || desc.attr_num_dimensions_minus1 == 2); } bs.byteAlign(); } //============================================================================ } /* namespace pcc */
3,454
36.150538
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/attribute_raw_encoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2021, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "attribute_raw.h" #include "BitWriter.h" #include "io_hls.h" namespace pcc { //============================================================================ void AttrRawEncoder::encode( const SequenceParameterSet& sps, const AttributeDescription& desc, const AttributeParameterSet& attr_aps, AttributeBrickHeader& abh, PCCPointSet3& cloud, PayloadBuffer* payload) { write(sps, attr_aps, abh, payload); auto bs = makeBitWriter(std::back_inserter(*payload)); // todo(df): add support for variable length coding assert(!attr_aps.raw_attr_variable_len_flag); int valueBits = desc.bitdepth; // todo(df): update to correctly map attribute types if (desc.attr_num_dimensions_minus1 == 0) { for (size_t i = 0; i < cloud.getPointCount(); i++) { bs.writeUn(valueBits, cloud.getReflectance(i)); } } else if (desc.attr_num_dimensions_minus1 == 2) { for (size_t i = 0; i < cloud.getPointCount(); i++) { auto& value = cloud.getColor(i); for (int c = 0; c < 3; c++) bs.writeUn(valueBits, value[c]); } } else { assert( desc.attr_num_dimensions_minus1 == 0 || desc.attr_num_dimensions_minus1 == 2); } bs.byteAlign(); } //============================================================================ } /* namespace pcc */
3,132
36.297619
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/colourspace.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cstdint> #include "PCCMath.h" namespace pcc { //============================================================================ template<template<typename> class T, typename Tv> T<Tv> transformGbrToYCbCrBt709(T<Tv>& gbr) { const Tv g = gbr[0]; const Tv b = gbr[1]; const Tv r = gbr[2]; const double y = PCCClip(std::round(0.212600 * r + 0.715200 * g + 0.072200 * b), 0., 255.); const double u = PCCClip( std::round(-0.114572 * r - 0.385428 * g + 0.5 * b + 128.0), 0., 255.); const double v = PCCClip( std::round(0.5 * r - 0.454153 * g - 0.045847 * b + 128.0), 0., 255.); return {Tv(y), Tv(u), Tv(v)}; } //============================================================================ template<template<typename> class T, typename Tv> T<Tv> transformYCbCrBt709ToGbr(T<Tv>& ycbcr) { const double y1 = ycbcr[0]; const double u1 = ycbcr[1] - 128.0; const double v1 = ycbcr[2] - 128.0; const double r = PCCClip(round(y1 /*- 0.00000 * u1*/ + 1.57480 * v1), 0.0, 255.0); const double g = PCCClip(round(y1 - 0.18733 * u1 - 0.46813 * v1), 0.0, 255.0); const double b = PCCClip(round(y1 + 1.85563 * u1 /*+ 0.00000 * v1*/), 0.0, 255.0); return {Tv(g), Tv(b), Tv(r)}; } //============================================================================ template<template<typename> class T, typename Tv> T<Tv> transformGbrToYCgCoR(int bitDepth, T<Tv>& gbr) { int g = gbr[0]; int b = gbr[1]; int r = gbr[2]; int co = r - b; int t = b + (co >> 1); int cg = g - t; int y = t + (cg >> 1); int offset = 1 << bitDepth; // NB: YCgCoR needs extra 1-bit for chroma return {Tv(y), Tv(cg + offset), Tv(co + offset)}; } //============================================================================ template<template<typename> class T, typename Tv> T<Tv> transformYCgCoRToGbr(int bitDepth, T<Tv>& ycgco) { int offset = 1 << bitDepth; int y0 = ycgco[0]; int cg = ycgco[1] - offset; int co = ycgco[2] - offset; int t = y0 - (cg >> 1); int g = cg + t; int b = t - (co >> 1); int r = co + b; int maxVal = (1 << bitDepth) - 1; g = PCCClip(g, 0, maxVal); b = PCCClip(b, 0, maxVal); r = PCCClip(r, 0, maxVal); return {Tv(g), Tv(b), Tv(r)}; } //============================================================================ } // namespace pcc
4,166
31.302326
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/constants.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cstdint> namespace pcc { //============================================================================ const uint32_t kAttributePredictionMaxNeighbourCount = 3; const uint32_t kAttributeResidualAlphabetSize = 255; const uint32_t kFixedPointWeightShift = 8; const uint32_t kFixedPointAttributeShift = 8; //============================================================================ } // namespace pcc
2,257
42.423077
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/coordinate_conversion.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2020, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "coordinate_conversion.h" #include "geometry_octree.h" namespace pcc { //============================================================================ Box3<int> convertXyzToRpl( Vec3<int> laserOrigin, const int* laserThetaList, int numTheta, const Vec3<int>* begin, const Vec3<int>* end, Vec3<int>* dst) { Box3<int> bbox(INT32_MAX, INT32_MIN); for (auto it = begin; it != end; it++, dst++) { auto pos = *it - laserOrigin; auto laser = findLaser(pos, laserThetaList, numTheta); int64_t xLaser = pos[0] << 8; int64_t yLaser = pos[1] << 8; (*dst)[0] = isqrt(xLaser * xLaser + yLaser * yLaser) >> 8; (*dst)[1] = (iatan2(yLaser, xLaser) + 3294199) >> 8; (*dst)[2] = laser; bbox.insert(*dst); } return bbox; } //---------------------------------------------------------------------------- Vec3<int> normalisedAxesWeights(Box3<int>& bbox, int forcedMaxLog2) { auto width = bbox.max - bbox.min + 1; auto maxWidth = width.max(); // Otherwise morton code would overflow assert(maxWidth < 1 << (21 + 8)); bool underflow = false; if (forcedMaxLog2 > 0) { for (int k = 0; k < 3; k++) if (width[k] > 1 << (forcedMaxLog2 + 8)) { std::cerr << "Warning: normalizedAxesWeight[" << k << "] underflow\n"; underflow = true; } while (maxWidth > 1 << (forcedMaxLog2 + 8)) ++forcedMaxLog2; if (underflow) std::cerr << "Using " << forcedMaxLog2 << " scaling instead\n"; maxWidth = 1 << forcedMaxLog2; } maxWidth = std::min(1 << 21, maxWidth); Vec3<int> axesWeight; for (int k = 0; k < 3; k++) axesWeight[k] = (maxWidth << 8) / width[k]; return axesWeight; } //---------------------------------------------------------------------------- void offsetAndScale( const Vec3<int>& minPos, const Vec3<int>& axisWeight, Vec3<int>* begin, Vec3<int>* end) { for (auto it = begin; it != end; it++) *it = times((*it - minPos), axisWeight) + (1 << 7) >> 8; } //============================================================================ } // namespace pcc
3,917
30.853659
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/coordinate_conversion.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2020, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "PCCMath.h" namespace pcc { //============================================================================ // Converts points in the range [begin, end) from cartesian co-ordinates to // pseudo spherical coordinates (r, phi, laser), storing the results in dst. Box3<int> convertXyzToRpl( Vec3<int> laserOrigin, const int* laserThetaList, int numTheta, const Vec3<int>* begin, const Vec3<int>* end, Vec3<int>* dst); // Determines axes weights to normalise the axes of a bounding box. // forcedMaxLog2 > 0 allows a normalisation factor to be specified. Vec3<int> normalisedAxesWeights(Box3<int>& bbox, int forcedMaxLog2); // Offsets and weights points in the range [begin, end). void offsetAndScale( const Vec3<int>& minPos, const Vec3<int>& axisWeight, Vec3<int>* begin, Vec3<int>* end); //============================================================================ } // namespace pcc
2,754
40.119403
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/decoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "PCCTMC3Decoder.h" #include <algorithm> #include <cassert> #include <string> #include "AttributeCommon.h" #include "PayloadBuffer.h" #include "PCCPointSet.h" #include "coordinate_conversion.h" #include "geometry.h" #include "geometry_octree.h" #include "geometry_predictive.h" #include "hls.h" #include "io_hls.h" #include "io_tlv.h" #include "pcc_chrono.h" #include "osspecific.h" namespace pcc { //============================================================================ PCCTMC3Decoder3::PCCTMC3Decoder3(const DecoderParams& params) : _params(params) { init(); } //---------------------------------------------------------------------------- void PCCTMC3Decoder3::init() { _firstSliceInFrame = true; _outputInitialized = false; _suppressOutput = 1; _sps = nullptr; _gps = nullptr; _spss.clear(); _gpss.clear(); _apss.clear(); _ctxtMemOctreeGeom.reset(new GeometryOctreeContexts); _ctxtMemPredGeom.reset(new PredGeomContexts); } //---------------------------------------------------------------------------- PCCTMC3Decoder3::~PCCTMC3Decoder3() = default; //============================================================================ static bool payloadStartsNewSlice(PayloadType type) { return type == PayloadType::kGeometryBrick || type == PayloadType::kFrameBoundaryMarker; } //============================================================================ bool PCCTMC3Decoder3::dectectFrameBoundary(const PayloadBuffer* buf) { // This may be from either geometry brick or attr param inventory int frameCtrLsb; switch (buf->type) { case PayloadType::kFrameBoundaryMarker: { // the frame boundary data marker explcitly indicates a boundary // However, this implementation doesn't flush the output, rather // this happens naturally when the frame boundary is detected by // a change in frameCtr. auto fbm = parseFrameBoundaryMarker(*buf); frameCtrLsb = fbm.fbdu_frame_ctr_lsb; break; } case PayloadType::kGeometryBrick: { activateParameterSets(parseGbhIds(*buf)); auto gbh = parseGbh(*_sps, *_gps, *buf, nullptr, nullptr); frameCtrLsb = gbh.frame_ctr_lsb; break; } case PayloadType::kGeneralizedAttrParamInventory: { auto apih = parseAttrParamInventoryHdr(*buf); activateParameterSets(apih); // todo(conf): check lsb_bits is same as sps frameCtrLsb = apih.attr_param_frame_ctr_lsb; break; } // other data units don't indicate a boundary default: return false; } auto bdry = _frameCtr.isDifferentFrame(frameCtrLsb, _sps->frame_ctr_bits); _frameCtr.update(frameCtrLsb, _sps->frame_ctr_bits); return bdry; } //============================================================================ void PCCTMC3Decoder3::outputCurrentCloud(PCCTMC3Decoder3::Callbacks* callback) { if (_suppressOutput) return; std::swap(_outCloud.cloud, _accumCloud); // Apply global scaling to output for integer conformance // todo: add other output scaling modes // NB: if accumCloud is reused for future inter-prediction, global scaling // must be applied to a copy. scaleGeometry(_outCloud.cloud, _sps->globalScale, _outCloud.outputFpBits); callback->onOutputCloud(_outCloud); std::swap(_outCloud.cloud, _accumCloud); _accumCloud.clear(); } //============================================================================ void PCCTMC3Decoder3::startFrame() { _outputInitialized = true; _firstSliceInFrame = true; _outCloud.frameNum = _frameCtr; // the following could be set once when the SPS is discovered _outCloud.setParametersFrom(*_sps, _params.outputFpBits); } //============================================================================ int PCCTMC3Decoder3::decompress( const PayloadBuffer* buf, PCCTMC3Decoder3::Callbacks* callback) { // Starting a new geometry brick/slice/tile, transfer any // finished points to the output accumulator if (!buf || payloadStartsNewSlice(buf->type)) { if (size_t numPoints = _currentPointCloud.getPointCount()) { for (size_t i = 0; i < numPoints; i++) for (int k = 0; k < 3; k++) _currentPointCloud[i][k] += _sliceOrigin[k]; _accumCloud.append(_currentPointCloud); } } if (!buf) { // flush decoder, output pending cloud if any outputCurrentCloud(callback); return 0; } // process a frame boundary // - this may update FrameCtr // - this will activate the sps for GeometryBrick and AttrParamInventory // - after outputing the current frame, the output must be reinitialized if (dectectFrameBoundary(buf)) { outputCurrentCloud(callback); _outputInitialized = false; } // process the buffer switch (buf->type) { case PayloadType::kSequenceParameterSet: { auto sps = parseSps(*buf); convertXyzToStv(&sps); storeSps(std::move(sps)); return 0; } case PayloadType::kGeometryParameterSet: { auto gps = parseGps(*buf); // HACK: assume that an SPS has been received prior to the GPS. // This is not required, and parsing of the GPS is independent of the SPS. // todo(df): move GPS fixup to activation process _sps = &_spss.cbegin()->second; convertXyzToStv(*_sps, &gps); storeGps(std::move(gps)); return 0; } case PayloadType::kAttributeParameterSet: { auto aps = parseAps(*buf); // HACK: assume that an SPS has been received prior to the APS. // This is not required, and parsing of the APS is independent of the SPS. // todo(df): move APS fixup to activation process _sps = &_spss.cbegin()->second; convertXyzToStv(*_sps, &aps); storeAps(std::move(aps)); return 0; } case PayloadType::kFrameBoundaryMarker: if (!_outputInitialized) startFrame(); return 0; case PayloadType::kGeometryBrick: if (!_outputInitialized) startFrame(); // avoid accidents with stale attribute decoder on next slice _attrDecoder.reset(); // Avoid dropping an actual frame _suppressOutput = false; return decodeGeometryBrick(*buf); case PayloadType::kAttributeBrick: decodeAttributeBrick(*buf); return 0; case PayloadType::kConstantAttribute: decodeConstantAttribute(*buf); return 0; case PayloadType::kTileInventory: // NB: the tile inventory is decoded in xyz order. It may need // conversion if it is used (it currently isn't). storeTileInventory(parseTileInventory(*buf)); return 0; case PayloadType::kGeneralizedAttrParamInventory: { if (!_outputInitialized) startFrame(); auto hdr = parseAttrParamInventoryHdr(*buf); assert(hdr.attr_param_sps_attr_idx < int(_sps->attributeSets.size())); auto& attrDesc = _outCloud.attrDesc[hdr.attr_param_sps_attr_idx]; parseAttrParamInventory(attrDesc, *buf, attrDesc.params); return 0; } case PayloadType::kUserData: parseUserData(*buf); return 0; } // todo(df): error, unhandled payload type return 1; } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::storeSps(SequenceParameterSet&& sps) { // todo(df): handle replacement semantics _spss.emplace(std::make_pair(sps.sps_seq_parameter_set_id, sps)); } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::storeGps(GeometryParameterSet&& gps) { // todo(df): handle replacement semantics _gpss.emplace(std::make_pair(gps.gps_geom_parameter_set_id, gps)); } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::storeAps(AttributeParameterSet&& aps) { // todo(df): handle replacement semantics _apss.emplace(std::make_pair(aps.aps_attr_parameter_set_id, aps)); } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::storeTileInventory(TileInventory&& inventory) { // todo(df): handle replacement semantics _tileInventory = inventory; } //========================================================================== void PCCTMC3Decoder3::activateParameterSets(const GeometryBrickHeader& gbh) { // HACK: assume activation of the first SPS and GPS // todo(df): parse brick header here for propper sps & gps activation // -- this is currently inconsistent between trisoup and octree assert(!_spss.empty()); assert(!_gpss.empty()); _sps = &_spss.cbegin()->second; _gps = &_gpss.cbegin()->second; } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::activateParameterSets(const AttributeParamInventoryHdr& hdr) { // HACK: assume activation of the first SPS and GPS // todo(df): parse brick header here for propper sps & gps activation // -- this is currently inconsistent between trisoup and octree assert(!_spss.empty()); assert(!_gpss.empty()); _sps = &_spss.cbegin()->second; _gps = &_gpss.cbegin()->second; } //========================================================================== // Initialise the point cloud storage and decode a single geometry slice. int PCCTMC3Decoder3::decodeGeometryBrick(const PayloadBuffer& buf) { assert(buf.type == PayloadType::kGeometryBrick); std::cout << "positions bitstream size " << buf.size() << " B\n"; // todo(df): replace with attribute mapping bool hasColour = std::any_of( _sps->attributeSets.begin(), _sps->attributeSets.end(), [](const AttributeDescription& desc) { return desc.attributeLabel == KnownAttributeLabel::kColour; }); bool hasReflectance = std::any_of( _sps->attributeSets.begin(), _sps->attributeSets.end(), [](const AttributeDescription& desc) { return desc.attributeLabel == KnownAttributeLabel::kReflectance; }); _currentPointCloud.clear(); _currentPointCloud.addRemoveAttributes(hasColour, hasReflectance); pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user; clock_user.start(); int gbhSize, gbfSize; _gbh = parseGbh(*_sps, *_gps, buf, &gbhSize, &gbfSize); _prevSliceId = _sliceId; _sliceId = _gbh.geom_slice_id; _sliceOrigin = _gbh.geomBoxOrigin; // sanity check for loss detection if (_gbh.entropy_continuation_flag) { assert(!_firstSliceInFrame); assert(_gbh.prev_slice_id == _prevSliceId); } else { // forget (reset) all saved context state at boundary _ctxtMemOctreeGeom->reset(); _ctxtMemPredGeom->reset(); for (auto& ctxtMem : _ctxtMemAttrs) ctxtMem.reset(); } // set default attribute values (in case an attribute data unit is lost) // NB: it is a requirement that geom_num_points_minus1 is correct _currentPointCloud.resize(_gbh.footer.geom_num_points_minus1 + 1); if (hasColour) { auto it = std::find_if( _outCloud.attrDesc.cbegin(), _outCloud.attrDesc.cend(), [](const AttributeDescription& desc) { return desc.attributeLabel == KnownAttributeLabel::kColour; }); Vec3<attr_t> defAttrVal = 1 << (it->bitdepth - 1); if (!it->params.attr_default_value.empty()) for (int k = 0; k < 3; k++) defAttrVal[k] = it->params.attr_default_value[k]; for (int i = 0; i < _currentPointCloud.getPointCount(); i++) _currentPointCloud.setColor(i, defAttrVal); } if (hasReflectance) { auto it = std::find_if( _outCloud.attrDesc.cbegin(), _outCloud.attrDesc.cend(), [](const AttributeDescription& desc) { return desc.attributeLabel == KnownAttributeLabel::kReflectance; }); attr_t defAttrVal = 1 << (it->bitdepth - 1); if (!it->params.attr_default_value.empty()) defAttrVal = it->params.attr_default_value[0]; for (int i = 0; i < _currentPointCloud.getPointCount(); i++) _currentPointCloud.setReflectance(i, defAttrVal); } // Calculate a tree level at which to stop // It should result in at most max points being decoded if (_params.decodeMaxPoints && _gps->octree_point_count_list_present_flag) { if (_params.decodeMaxPoints > _gbh.footer.geom_num_points_minus1) _params.minGeomNodeSizeLog2 = 0; else { auto it = std::lower_bound( std::next(_gbh.footer.octree_lvl_num_points_minus1.begin()), _gbh.footer.octree_lvl_num_points_minus1.end(), _params.decodeMaxPoints); _params.minGeomNodeSizeLog2 = std::distance(it, _gbh.footer.octree_lvl_num_points_minus1.end()) + 1; } } EntropyDecoder aec; aec.setBuffer(buf.size() - gbhSize - gbfSize, buf.data() + gbhSize); aec.enableBypassStream(_sps->cabac_bypass_stream_enabled_flag); aec.start(); if (_gps->predgeom_enabled_flag) decodePredictiveGeometry( *_gps, _gbh, _currentPointCloud, &_posSph, *_ctxtMemPredGeom, aec); else if (!_gps->trisoup_enabled_flag) { if (!_params.minGeomNodeSizeLog2) { decodeGeometryOctree( *_gps, _gbh, _currentPointCloud, *_ctxtMemOctreeGeom, aec); } else { decodeGeometryOctreeScalable( *_gps, _gbh, _params.minGeomNodeSizeLog2, _currentPointCloud, *_ctxtMemOctreeGeom, aec); } } else { decodeGeometryTrisoup( *_gps, _gbh, _currentPointCloud, *_ctxtMemOctreeGeom, aec); } // At least the first slice's geometry has been decoded _firstSliceInFrame = false; clock_user.stop(); auto total_user = std::chrono::duration_cast<std::chrono::milliseconds>(clock_user.count()); std::cout << "positions processing time (user): " << total_user.count() / 1000.0 << " s\n"; std::cout << std::endl; return 0; } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::decodeAttributeBrick(const PayloadBuffer& buf) { assert(buf.type == PayloadType::kAttributeBrick); // todo(df): replace assertions with error handling assert(_sps); assert(_gps); // verify that this corresponds to the correct geometry slice AttributeBrickHeader abh = parseAbhIds(buf); assert(abh.attr_geom_slice_id == _sliceId); // todo(df): validate that sps activation is not changed via the APS const auto it_attr_aps = _apss.find(abh.attr_attr_parameter_set_id); assert(it_attr_aps != _apss.cend()); const auto& attr_aps = it_attr_aps->second; assert(abh.attr_sps_attr_idx < _sps->attributeSets.size()); const auto& attr_sps = _sps->attributeSets[abh.attr_sps_attr_idx]; const auto& label = attr_sps.attributeLabel; // sanity check for loss detection if (_gbh.entropy_continuation_flag) assert(_gbh.prev_slice_id == _ctxtMemAttrSliceIds[abh.attr_sps_attr_idx]); // Ensure context arrays are allocated context arrays // todo(df): move this to sps activation _ctxtMemAttrSliceIds.resize(_sps->attributeSets.size()); _ctxtMemAttrs.resize(_sps->attributeSets.size()); // In order to determinet hat the attribute decoder is reusable, the abh // must be inspected. int abhSize; abh = parseAbh(*_sps, attr_aps, buf, &abhSize); pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user; // replace the attribute decoder if not compatible if (!_attrDecoder || !_attrDecoder->isReusable(attr_aps, abh)) _attrDecoder = makeAttributeDecoder(); clock_user.start(); // Convert cartesian positions to spherical for use in attribute coding. // NB: this retains the original cartesian positions to restore afterwards std::vector<pcc::point_t> altPositions; if (attr_aps.spherical_coord_flag) { // If predgeom was used, re-use the internal positions rather than // calculating afresh. Box3<int> bboxRpl; if (_gps->predgeom_enabled_flag) { altPositions = _posSph; bboxRpl = Box3<int>(altPositions.begin(), altPositions.end()); } else { altPositions.resize(_currentPointCloud.getPointCount()); auto laserOrigin = _gbh.geomAngularOrigin(*_gps); bboxRpl = convertXyzToRpl( laserOrigin, _gps->angularTheta.data(), _gps->angularTheta.size(), &_currentPointCloud[0], &_currentPointCloud[0] + _currentPointCloud.getPointCount(), altPositions.data()); } offsetAndScale( bboxRpl.min, attr_aps.attr_coord_scale, altPositions.data(), altPositions.data() + altPositions.size()); _currentPointCloud.swapPoints(altPositions); } auto& ctxtMemAttr = _ctxtMemAttrs.at(abh.attr_sps_attr_idx); _attrDecoder->decode( *_sps, attr_sps, attr_aps, abh, _gbh.footer.geom_num_points_minus1, _params.minGeomNodeSizeLog2, buf.data() + abhSize, buf.size() - abhSize, ctxtMemAttr, _currentPointCloud); if (attr_aps.spherical_coord_flag) _currentPointCloud.swapPoints(altPositions); // Note the current sliceID for loss detection _ctxtMemAttrSliceIds[abh.attr_sps_attr_idx] = _sliceId; clock_user.stop(); std::cout << label << "s bitstream size " << buf.size() << " B\n"; auto total_user = std::chrono::duration_cast<std::chrono::milliseconds>(clock_user.count()); std::cout << label << "s processing time (user): " << total_user.count() / 1000.0 << " s\n"; std::cout << std::endl; } //-------------------------------------------------------------------------- void PCCTMC3Decoder3::decodeConstantAttribute(const PayloadBuffer& buf) { assert(buf.type == PayloadType::kConstantAttribute); // todo(df): replace assertions with error handling assert(_sps); assert(_gps); ConstantAttributeDataUnit cadu = parseConstantAttribute(*_sps, buf); // verify that this corresponds to the correct geometry slice assert(cadu.constattr_geom_slice_id == _sliceId); assert(cadu.constattr_sps_attr_idx < _sps->attributeSets.size()); const auto& attrDesc = _sps->attributeSets[cadu.constattr_sps_attr_idx]; const auto& label = attrDesc.attributeLabel; // todo(df): replace with proper attribute mapping if (label == KnownAttributeLabel::kColour) { Vec3<attr_t> defAttrVal; for (int k = 0; k < 3; k++) defAttrVal[k] = attrDesc.params.attr_default_value[k]; for (int i = 0; i < _currentPointCloud.getPointCount(); i++) _currentPointCloud.setColor(i, defAttrVal); } if (label == KnownAttributeLabel::kReflectance) { attr_t defAttrVal = attrDesc.params.attr_default_value[0]; for (int i = 0; i < _currentPointCloud.getPointCount(); i++) _currentPointCloud.setReflectance(i, defAttrVal); } } //============================================================================ } // namespace pcc
20,173
32.018003
79
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/encoder.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "PCCTMC3Encoder.h" #include <cassert> #include <limits> #include <set> #include <stdexcept> #include "Attribute.h" #include "AttributeCommon.h" #include "coordinate_conversion.h" #include "geometry_params.h" #include "hls.h" #include "pointset_processing.h" #include "geometry.h" #include "geometry_octree.h" #include "geometry_predictive.h" #include "io_hls.h" #include "osspecific.h" #include "partitioning.h" #include "pcc_chrono.h" #include "ply.h" namespace pcc { //============================================================================ PCCPointSet3 getPartition(const PCCPointSet3& src, const std::vector<int32_t>& indexes); PCCPointSet3 getPartition( const PCCPointSet3& src, const SrcMappedPointSet& map, const std::vector<int32_t>& indexes); //============================================================================ PCCTMC3Encoder3::PCCTMC3Encoder3() : _frameCounter(-1) { _ctxtMemOctreeGeom.reset(new GeometryOctreeContexts); _ctxtMemPredGeom.reset(new PredGeomContexts); } //---------------------------------------------------------------------------- PCCTMC3Encoder3::~PCCTMC3Encoder3() = default; //============================================================================ int PCCTMC3Encoder3::compress( const PCCPointSet3& inputPointCloud, EncoderParams* params, PCCTMC3Encoder3::Callbacks* callback, CloudFrame* reconCloud) { // start of frame _frameCounter++; if (_frameCounter == 0) { // Angular predictive geometry coding needs to determine spherical // positions. To avoid quantization of the input disturbing this: // - sequence scaling is replaced by decimation of the input // - any user-specified global scaling is honoured _inputDecimationScale = 1.; if (params->gps.predgeom_enabled_flag) { _inputDecimationScale = params->codedGeomScale; params->codedGeomScale /= params->seqGeomScale; params->seqGeomScale = 1.; } deriveParameterSets(params); fixupParameterSets(params); _srcToCodingScale = params->codedGeomScale; // Determine input bounding box (for SPS metadata) if not manually set Box3<int> bbox; if (params->autoSeqBbox) bbox = inputPointCloud.computeBoundingBox(); else { bbox.min = params->sps.seqBoundingBoxOrigin; bbox.max = bbox.min + params->sps.seqBoundingBoxSize - 1; } // Note whether the bounding box size is defined // todo(df): set upper limit using level bool bboxSizeDefined = params->sps.seqBoundingBoxSize > 0; if (!bboxSizeDefined) params->sps.seqBoundingBoxSize = (1 << 21) - 1; // Then scale the bounding box to match the reconstructed output for (int k = 0; k < 3; k++) { auto min_k = bbox.min[k]; auto max_k = bbox.max[k]; // the sps bounding box is in terms of the conformance scale // not the source scale. // NB: plus one to convert to range min_k = std::round(min_k * params->seqGeomScale); max_k = std::round(max_k * params->seqGeomScale); params->sps.seqBoundingBoxOrigin[k] = min_k; params->sps.seqBoundingBoxSize[k] = max_k - min_k + 1; // Compensate the sequence origin such that source point (0,0,0) coded // as P_c is reconstructed as (0,0,0): // 0 = P_c * globalScale + seqOrigin auto gs = Rational(params->sps.globalScale); int rem = params->sps.seqBoundingBoxOrigin[k] % gs.numerator; rem += rem < 0 ? gs.numerator : 0; params->sps.seqBoundingBoxOrigin[k] -= rem; params->sps.seqBoundingBoxSize[k] += rem; // Convert the origin to coding coordinate system _originInCodingCoords[k] = params->sps.seqBoundingBoxOrigin[k]; _originInCodingCoords[k] /= double(gs); } // Determine the number of bits to signal the bounding box params->sps.sps_bounding_box_offset_bits = numBits(params->sps.seqBoundingBoxOrigin.abs().max()); params->sps.sps_bounding_box_size_bits = bboxSizeDefined ? numBits(params->sps.seqBoundingBoxSize.abs().max()) : 0; // Determine the lidar head position in coding coordinate system params->gps.gpsAngularOrigin *= _srcToCodingScale; params->gps.gpsAngularOrigin -= _originInCodingCoords; // determine the scale factors based on a characteristic of the // acquisition system if (params->gps.geom_angular_mode_enabled_flag) { auto gs = Rational(params->sps.globalScale); int maxX = (params->sps.seqBoundingBoxSize[0] - 1) / double(gs); int maxY = (params->sps.seqBoundingBoxSize[1] - 1) / double(gs); auto& origin = params->gps.gpsAngularOrigin; int rx = std::max(std::abs(origin[0]), std::abs(maxX - origin[0])); int ry = std::max(std::abs(origin[1]), std::abs(maxY - origin[1])); int r = std::max(rx, ry); int twoPi = 25735; int maxLaserIdx = params->gps.numLasers() - 1; if (params->gps.predgeom_enabled_flag) { auto& gps = params->gps; twoPi = 1 << (gps.geom_angular_azimuth_scale_log2_minus11 + 12); r >>= params->gps.geom_angular_radius_inv_scale_log2; } // todo(df): handle the single laser case better Box3<int> sphBox{0, {r, twoPi, maxLaserIdx}}; int refScale = params->gps.azimuth_scaling_enabled_flag ? params->attrSphericalMaxLog2 : 0; auto attr_coord_scale = normalisedAxesWeights(sphBox, refScale); for (auto& aps : params->aps) if (aps.spherical_coord_flag) aps.attr_coord_scale = attr_coord_scale; } // Allocate storage for attribute contexts _ctxtMemAttrs.resize(params->sps.attributeSets.size()); } // placeholder to "activate" the parameter sets _sps = &params->sps; _gps = &params->gps; _aps.clear(); for (const auto& aps : params->aps) { _aps.push_back(&aps); } // initial geometry IDs _tileId = 0; _sliceId = 0; _sliceOrigin = Vec3<int>{0}; _firstSliceInFrame = true; // Configure output coud if (reconCloud) { reconCloud->setParametersFrom(*_sps, params->outputFpBits); reconCloud->frameNum = _frameCounter; } // Partition the input point cloud into tiles // - quantize the input point cloud (without duplicate point removal) // - inverse quantize the cloud above to get the initial-sized cloud // - if tile partitioning is enabled,partitioning function produces // vectors tileMaps which map tileIDs to point indexes. // Compute the tile metadata for each partition. // - if not, regard the whole input cloud as a single tile to facilitate // slice partitioning subsequent // todo(df): PartitionSet partitions; SrcMappedPointSet quantizedInput = quantization(inputPointCloud); // write out all parameter sets prior to encoding callback->onOutputBuffer(write(*_sps)); callback->onOutputBuffer(write(*_sps, *_gps)); for (const auto aps : _aps) { callback->onOutputBuffer(write(*_sps, *aps)); } std::vector<std::vector<int32_t>> tileMaps; if (params->partition.tileSize) { tileMaps = tilePartition(params->partition, quantizedInput.cloud); // To tag the slice with the tile id there must be sufficient bits. // todo(df): determine sps parameter from the paritioning? assert(numBits(tileMaps.size() - 1) <= _sps->slice_tag_bits); // Default is to use implicit tile ids (ie list index) partitions.tileInventory.tile_id_bits = 0; // all tileOrigins are relative to the sequence bounding box partitions.tileInventory.origin = _sps->seqBoundingBoxOrigin; // Get the bounding box of current tile and write it into tileInventory partitions.tileInventory.tiles.resize(tileMaps.size()); // Convert tile bounding boxes to sequence coordinate system. // A position in the box must remain in the box after conversion // irrispective of how the decoder outputs positions (fractional | integer) // => truncate origin (eg, rounding 12.5 to 13 would not allow all // decoders to find that point). // => use next integer for upper coordinate. double gs = Rational(_sps->globalScale); for (int t = 0; t < tileMaps.size(); t++) { Box3<int32_t> bbox = quantizedInput.cloud.computeBoundingBox( tileMaps[t].begin(), tileMaps[t].end()); auto& tileIvt = partitions.tileInventory.tiles[t]; tileIvt.tile_id = t; for (int k = 0; k < 3; k++) { auto origin = std::trunc(bbox.min[k] * gs); auto size = std::ceil(bbox.max[k] * gs) - origin + 1; tileIvt.tileOrigin[k] = origin; tileIvt.tileSize[k] = size; } } } else { tileMaps.emplace_back(); auto& tile = tileMaps.back(); for (int i = 0; i < quantizedInput.cloud.getPointCount(); i++) tile.push_back(i); } if (partitions.tileInventory.tiles.size() > 1) { auto& inventory = partitions.tileInventory; assert(inventory.tiles.size() == tileMaps.size()); std::cout << "Tile number: " << tileMaps.size() << std::endl; inventory.ti_seq_parameter_set_id = _sps->sps_seq_parameter_set_id; inventory.ti_origin_bits_minus1 = numBits(inventory.origin.abs().max()) - 1; // The inventory comes into force on the first frame inventory.ti_frame_ctr_bits = _sps->frame_ctr_bits; inventory.ti_frame_ctr = _frameCounter & ((1 << _sps->frame_ctr_bits) - 1); // Determine the number of bits for encoding tile sizes int maxValOrigin = 1; int maxValSize = 1; for (const auto& entry : inventory.tiles) { maxValOrigin = std::max(maxValOrigin, entry.tileOrigin.max()); maxValSize = std::max(maxValSize, entry.tileSize.max() - 1); } inventory.tile_origin_bits_minus1 = numBits(maxValOrigin) - 1; inventory.tile_size_bits_minus1 = numBits(maxValSize) - 1; callback->onOutputBuffer(write(*_sps, partitions.tileInventory)); } // Partition the input point cloud // - get the partitial cloud of each tile // - partitioning function produces a list of point indexes, origin and // optional tile metadata for each partition. // - encode any tile metadata // NB: the partitioning method is required to ensure that the output // slices conform to any codec limits. // todo(df): consider requiring partitioning function to sort the input // points and provide ranges rather than a set of indicies. do { for (int t = 0; t < tileMaps.size(); t++) { const auto& tile = tileMaps[t]; auto tile_id = partitions.tileInventory.tiles.empty() ? 0 : partitions.tileInventory.tiles[t].tile_id; // Get the point cloud of current tile and compute their bounding boxes PCCPointSet3 tileCloud = getPartition(quantizedInput.cloud, tile); Box3<int32_t> bbox = tileCloud.computeBoundingBox(); // Move the tile cloud to coodinate origin // for the convenience of slice partitioning for (int i = 0; i < tileCloud.getPointCount(); i++) tileCloud[i] -= bbox.min; // don't partition if partitioning would result in a single slice. auto partitionMethod = params->partition.method; if (tileCloud.getPointCount() < params->partition.sliceMaxPoints) partitionMethod = PartitionMethod::kNone; // use the largest trisoup node size as a partitioning boundary for // consistency between slices with different trisoup node sizes. int partitionBoundaryLog2 = 0; if (!params->trisoupNodeSizesLog2.empty()) partitionBoundaryLog2 = *std::max_element( params->trisoupNodeSizesLog2.begin(), params->trisoupNodeSizesLog2.end()); //Slice partition of current tile std::vector<Partition> curSlices; switch (partitionMethod) { case PartitionMethod::kNone: curSlices = partitionNone(params->partition, tileCloud, tile_id); break; case PartitionMethod::kUniformGeom: curSlices = partitionByUniformGeom( params->partition, tileCloud, tile_id, partitionBoundaryLog2); break; case PartitionMethod::kUniformSquare: curSlices = partitionByUniformSquare( params->partition, tileCloud, tile_id, partitionBoundaryLog2); break; case PartitionMethod::kOctreeUniform: curSlices = partitionByOctreeDepth(params->partition, tileCloud, tile_id); break; case PartitionMethod::kNpoints: curSlices = partitionByNpts(params->partition, tileCloud, tile_id); break; } // Map slice indexes to tile indexes(the original indexes) for (int i = 0; i < curSlices.size(); i++) { for (int p = 0; p < curSlices[i].pointIndexes.size(); p++) { curSlices[i].pointIndexes[p] = tile[curSlices[i].pointIndexes[p]]; } } partitions.slices.insert( partitions.slices.end(), curSlices.begin(), curSlices.end()); } std::cout << "Slice number: " << partitions.slices.size() << std::endl; } while (0); // Encode each partition: // - create a pointset comprising just the partitioned points // - compress for (const auto& partition : partitions.slices) { // create partitioned point set PCCPointSet3 sliceCloud = getPartition(quantizedInput.cloud, partition.pointIndexes); PCCPointSet3 sliceSrcCloud = getPartition(inputPointCloud, quantizedInput, partition.pointIndexes); _sliceId = partition.sliceId; _tileId = partition.tileId; _sliceOrigin = sliceCloud.computeBoundingBox().min; compressPartition(sliceCloud, sliceSrcCloud, params, callback, reconCloud); } // Apply global scaling to reconstructed point cloud if (reconCloud) scaleGeometry( reconCloud->cloud, _sps->globalScale, reconCloud->outputFpBits); return 0; } //---------------------------------------------------------------------------- void PCCTMC3Encoder3::deriveParameterSets(EncoderParams* params) { // fixup extGeomScale in the case that we're coding metres if (params->sps.seq_geom_scale_unit_flag == ScaleUnit::kMetre) params->extGeomScale = 0; if (params->extGeomScale == 0.) params->extGeomScale = params->srcUnitLength; // Derive the sps scale factor: The sequence scale is normalised to an // external geometry scale of 1. // - Ie, if the user specifies that extGeomScale=2 (1 seq point is equal // to 2 external points), seq_geom_scale is halved. // // - In cases where the sequence scale is in metres, the external system // is defined to have a unit length of 1 metre, and srcUnitLength must // be used to define the sequence scale. // - The user may define the relationship to the external coordinate system. // // NB: seq_geom_scale is the reciprocal of unit length params->sps.seqGeomScale = params->seqGeomScale / params->extGeomScale; // Global scaling converts from the coded scale to the sequence scale // NB: globalScale is constrained, eg 1.1 is not representable // todo: consider adjusting seqGeomScale to make a valid globalScale // todo: consider adjusting codedGeomScale to make a valid globalScale params->sps.globalScale = Rational(params->seqGeomScale / params->codedGeomScale); } //---------------------------------------------------------------------------- void PCCTMC3Encoder3::fixupParameterSets(EncoderParams* params) { // fixup parameter set IDs params->sps.sps_seq_parameter_set_id = 0; params->gps.gps_seq_parameter_set_id = 0; params->gps.gps_geom_parameter_set_id = 0; for (int i = 0; i < params->aps.size(); i++) { params->aps[i].aps_seq_parameter_set_id = 0; params->aps[i].aps_attr_parameter_set_id = i; } // development level / header params->sps.profile.main_profile_compatibility_flag = 0; params->sps.profile.reserved_profile_compatibility_21bits = 0; params->sps.level = 0; // constraints params->sps.profile.unique_point_positions_constraint_flag = false; params->sps.profile.slice_reordering_constraint_flag = params->sps.entropy_continuation_enabled_flag; // use one bit to indicate frame boundaries params->sps.frame_ctr_bits = 1; // number of bits for slice tag (tileid) if tiles partitioning enabled // NB: the limit of 64 tiles is arbritrary params->sps.slice_tag_bits = params->partition.tileSize > 0 ? 6 : 0; // slice origin parameters used by this encoder implementation params->gps.geom_box_log2_scale_present_flag = true; params->gps.gps_geom_box_log2_scale = 0; // don't code per-slice angular origin params->gps.geom_slice_angular_origin_present_flag = false; // derive the idcm qp offset from cli params->gps.geom_idcm_qp_offset = params->idcmQp - params->gps.geom_base_qp; // Feature dependencies if (!params->gps.neighbour_avail_boundary_log2_minus1) { params->gps.adjacent_child_contextualization_enabled_flag = 0; params->gps.intra_pred_max_node_size_log2 = 0; } if (params->gps.predgeom_enabled_flag) params->gps.geom_planar_mode_enabled_flag = false; // fixup attribute parameters for (auto it : params->attributeIdxMap) { auto& attr_sps = params->sps.attributeSets[it.second]; auto& attr_aps = params->aps[it.second]; auto& attr_enc = params->attr[it.second]; // this encoder does not (yet) support variable length attributes // todo(df): add variable length attribute support attr_aps.raw_attr_variable_len_flag = 0; // sanitise any intra prediction skipping if (attr_aps.attr_encoding != AttributeEncoding::kPredictingTransform) attr_aps.intra_lod_prediction_skip_layers = attr_aps.kSkipAllLayers; if (attr_aps.intra_lod_prediction_skip_layers < 0) attr_aps.intra_lod_prediction_skip_layers = attr_aps.kSkipAllLayers; // avoid signalling overly large values attr_aps.intra_lod_prediction_skip_layers = std::min( attr_aps.intra_lod_prediction_skip_layers, attr_aps.maxNumDetailLevels() + 1); // dist2 is refined in the slice header // - the encoder always writes them unless syntatically prohibited: attr_aps.aps_slice_dist2_deltas_present_flag = attr_aps.lodParametersPresent() && !attr_aps.scalable_lifting_enabled_flag && attr_aps.num_detail_levels_minus1 && attr_aps.lod_decimation_type != LodDecimationMethod::kPeriodic; // disable dist2 estimation when decimating with centroid sampler if (attr_aps.lod_decimation_type == LodDecimationMethod::kCentroid) attr_aps.aps_slice_dist2_deltas_present_flag = false; // If the lod search ranges are negative, use a full-range search // todo(df): lookup level limit if (attr_aps.inter_lod_search_range < 0) attr_aps.inter_lod_search_range = 1100000; if (attr_aps.intra_lod_search_range < 0) attr_aps.intra_lod_search_range = 1100000; // If all intra prediction layers are skipped, don't signal a search range if ( attr_aps.intra_lod_prediction_skip_layers > attr_aps.maxNumDetailLevels()) attr_aps.intra_lod_search_range = 0; // If there are no refinement layers, don't signal an inter search range if (attr_aps.maxNumDetailLevels() == 1) attr_aps.inter_lod_search_range = 0; // the encoder options may not specify sufficient offsets for the number // of layers used by the sytax: extend with last value as appropriate int numLayers = std::max( attr_enc.abh.attr_layer_qp_delta_luma.size(), attr_enc.abh.attr_layer_qp_delta_chroma.size()); int lastDeltaLuma = 0; if (!attr_enc.abh.attr_layer_qp_delta_luma.empty()) lastDeltaLuma = attr_enc.abh.attr_layer_qp_delta_luma.back(); int lastDeltaChroma = 0; if (!attr_enc.abh.attr_layer_qp_delta_chroma.empty()) lastDeltaChroma = attr_enc.abh.attr_layer_qp_delta_chroma.back(); attr_enc.abh.attr_layer_qp_delta_luma.resize(numLayers, lastDeltaLuma); attr_enc.abh.attr_layer_qp_delta_chroma.resize(numLayers, lastDeltaChroma); } } //---------------------------------------------------------------------------- void PCCTMC3Encoder3::compressPartition( const PCCPointSet3& inputPointCloud, const PCCPointSet3& originPartCloud, EncoderParams* params, PCCTMC3Encoder3::Callbacks* callback, CloudFrame* reconCloud) { // geometry compression consists of the following stages: // - prefilter/quantize geometry (non-normative) // - encode geometry (single slice, id = 0) // - recolour pointCloud.clear(); pointCloud = inputPointCloud; // Offset the point cloud to account for (preset) _sliceOrigin. // The new maximum bounds of the offset cloud Vec3<int> maxBound{0}; const size_t pointCount = pointCloud.getPointCount(); for (size_t i = 0; i < pointCount; ++i) { const point_t point = (pointCloud[i] -= _sliceOrigin); for (int k = 0; k < 3; ++k) { const int k_coord = int(point[k]); assert(k_coord >= 0); if (maxBound[k] < k_coord) maxBound[k] = k_coord; } } // todo(df): don't update maxBound if something is forcing the value? // NB: size is max - min + 1 _sliceBoxWhd = maxBound + 1; // apply a custom trisoup node size params->gbh.trisoup_node_size_log2_minus2 = 0; if (_gps->trisoup_enabled_flag) { int idx = std::min(_sliceId, int(params->trisoupNodeSizesLog2.size()) - 1); params->gbh.trisoup_node_size_log2_minus2 = params->trisoupNodeSizesLog2[idx] - 2; } // geometry encoding if (1) { PayloadBuffer payload(PayloadType::kGeometryBrick); pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user; clock_user.start(); encodeGeometryBrick(params, &payload); clock_user.stop(); double bpp = double(8 * payload.size()) / inputPointCloud.getPointCount(); std::cout << "positions bitstream size " << payload.size() << " B (" << bpp << " bpp)\n"; auto total_user = std::chrono::duration_cast<std::chrono::milliseconds>( clock_user.count()); std::cout << "positions processing time (user): " << total_user.count() / 1000.0 << " s" << std::endl; callback->onOutputBuffer(payload); } // verify that the per-level slice constraint has been met // todo(df): avoid hard coded value here (should be level dependent) if (params->enforceLevelLimits) if (pointCloud.getPointCount() > 1100000) throw std::runtime_error( std::string("level slice point count limit (1100000) exceeded: ") + std::to_string(pointCloud.getPointCount())); // recolouring // NB: recolouring is required if points are added / removed if (_gps->geom_unique_points_flag || _gps->trisoup_enabled_flag) { for (const auto& attr_sps : _sps->attributeSets) { recolour( attr_sps, params->recolour, originPartCloud, _srcToCodingScale, _originInCodingCoords + _sliceOrigin, &pointCloud); } } // dump recoloured point cloud // todo(df): this needs to work with partitioned clouds callback->onPostRecolour(pointCloud); // attributeCoding auto attrEncoder = makeAttributeEncoder(); // for each attribute for (const auto& it : params->attributeIdxMap) { int attrIdx = it.second; const auto& attr_sps = _sps->attributeSets[attrIdx]; const auto& attr_aps = *_aps[attrIdx]; const auto& attr_enc = params->attr[attrIdx]; const auto& label = attr_sps.attributeLabel; PayloadBuffer payload(PayloadType::kAttributeBrick); pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user; clock_user.start(); // todo(df): move elsewhere? AttributeBrickHeader abh; abh.attr_attr_parameter_set_id = attr_aps.aps_attr_parameter_set_id; abh.attr_sps_attr_idx = attrIdx; abh.attr_geom_slice_id = _sliceId; abh.attr_qp_delta_luma = 0; abh.attr_qp_delta_chroma = 0; abh.attr_layer_qp_delta_luma = attr_enc.abh.attr_layer_qp_delta_luma; abh.attr_layer_qp_delta_chroma = attr_enc.abh.attr_layer_qp_delta_chroma; // NB: regionQpOrigin/regionQpSize use the STV axes, not XYZ. if (false) { abh.qpRegions.emplace_back(); auto& region = abh.qpRegions.back(); region.regionOrigin = 0; region.regionSize = 0; region.attr_region_qp_offset = {0, 0}; abh.attr_region_bits_minus1 = -1 + numBits( std::max(region.regionOrigin.max(), region.regionSize.max())); } // Number of regions is constrained to at most 1. assert(abh.qpRegions.size() <= 1); // Convert cartesian positions to spherical for use in attribute coding. // NB: this retains the original cartesian positions to restore afterwards std::vector<pcc::point_t> altPositions; if (attr_aps.spherical_coord_flag) { // If predgeom was used, re-use the internal positions rather than // calculating afresh. Box3<int> bboxRpl; if (_gps->predgeom_enabled_flag) { altPositions = _posSph; bboxRpl = Box3<int>(altPositions.begin(), altPositions.end()); } else { altPositions.resize(pointCloud.getPointCount()); auto laserOrigin = _gbh.geomAngularOrigin(*_gps); bboxRpl = convertXyzToRpl( laserOrigin, _gps->angularTheta.data(), _gps->angularTheta.size(), &pointCloud[0], &pointCloud[0] + pointCloud.getPointCount(), altPositions.data()); } offsetAndScale( bboxRpl.min, attr_aps.attr_coord_scale, altPositions.data(), altPositions.data() + altPositions.size()); pointCloud.swapPoints(altPositions); } // calculate dist2 for this slice abh.attr_dist2_delta = 0; if (attr_aps.aps_slice_dist2_deltas_present_flag) { // todo(df): this could be set in the sps and refined only if necessary auto dist2 = estimateDist2(pointCloud, 100, 128, attr_enc.dist2PercentileEstimate); abh.attr_dist2_delta = dist2 - attr_aps.dist2; } // replace the attribute encoder if not compatible if (!attrEncoder->isReusable(attr_aps, abh)) attrEncoder = makeAttributeEncoder(); auto& ctxtMemAttr = _ctxtMemAttrs.at(abh.attr_sps_attr_idx); attrEncoder->encode( *_sps, attr_sps, attr_aps, abh, ctxtMemAttr, pointCloud, &payload); if (attr_aps.spherical_coord_flag) pointCloud.swapPoints(altPositions); clock_user.stop(); int coded_size = int(payload.size()); double bpp = double(8 * coded_size) / inputPointCloud.getPointCount(); std::cout << label << "s bitstream size " << coded_size << " B (" << bpp << " bpp)\n"; auto time_user = std::chrono::duration_cast<std::chrono::milliseconds>( clock_user.count()); std::cout << label << "s processing time (user): " << time_user.count() / 1000.0 << " s" << std::endl; callback->onOutputBuffer(payload); } // Note the current slice id for loss detection with entropy continuation _prevSliceId = _sliceId; // prevent re-use of this sliceId: the next slice (geometry + attributes) // should be distinguishable from the current slice. _sliceId++; _firstSliceInFrame = false; if (reconCloud) appendSlice(reconCloud->cloud); } //---------------------------------------------------------------------------- void PCCTMC3Encoder3::encodeGeometryBrick( const EncoderParams* params, PayloadBuffer* buf) { GeometryBrickHeader gbh; gbh.geom_geom_parameter_set_id = _gps->gps_geom_parameter_set_id; gbh.geom_slice_id = _sliceId; gbh.prev_slice_id = _prevSliceId; // NB: slice_tag could be set to some other (external) meaningful value gbh.slice_tag = std::max(0, _tileId); gbh.frame_ctr_lsb = _frameCounter & ((1 << _sps->frame_ctr_bits) - 1); gbh.geomBoxOrigin = _sliceOrigin; gbh.gbhAngularOrigin = _gps->gpsAngularOrigin - _sliceOrigin; gbh.geom_box_origin_bits_minus1 = numBits(gbh.geomBoxOrigin.max()) - 1; gbh.geom_box_log2_scale = 0; gbh.geom_slice_qp_offset = params->gbh.geom_slice_qp_offset; gbh.geom_stream_cnt_minus1 = params->gbh.geom_stream_cnt_minus1; gbh.trisoup_node_size_log2_minus2 = params->gbh.trisoup_node_size_log2_minus2; gbh.geom_qp_offset_intvl_log2_delta = params->gbh.geom_qp_offset_intvl_log2_delta; // Entropy continuation is not permitted in the first slice of a frame gbh.entropy_continuation_flag = false; if (_sps->entropy_continuation_enabled_flag) gbh.entropy_continuation_flag = !_firstSliceInFrame; // inform the geometry coder what the root node size is for (int k = 0; k < 3; k++) { // NB: A minimum whd of 2 means there is always at least 1 tree level gbh.rootNodeSizeLog2[k] = ceillog2(std::max(2, _sliceBoxWhd[k])); // The root node size cannot be smaller than the trisoup node size // since this is how the root node size is defined at the decoder. // NB: the following isn't strictly necessary, but avoids accidents // involving the qtbt derivation. gbh.rootNodeSizeLog2[k] = std::max(gbh.trisoupNodeSizeLog2(*_gps), gbh.rootNodeSizeLog2[k]); } gbh.maxRootNodeDimLog2 = gbh.rootNodeSizeLog2.max(); // use a cubic node if qtbt is disabled if (!_gps->predgeom_enabled_flag && !_gps->qtbt_enabled_flag) gbh.rootNodeSizeLog2 = gbh.maxRootNodeDimLog2; // todo(df): remove estimate when arithmetic codec is replaced int maxAcBufLen = int(pointCloud.getPointCount()) * 3 * 4 + 1024; // allocate entropy streams std::vector<std::unique_ptr<EntropyEncoder>> arithmeticEncoders; for (int i = 0; i < 1 + gbh.geom_stream_cnt_minus1; i++) { arithmeticEncoders.emplace_back(new EntropyEncoder(maxAcBufLen, nullptr)); auto& aec = arithmeticEncoders.back(); aec->enableBypassStream(_sps->cabac_bypass_stream_enabled_flag); aec->start(); } // forget (reset) all saved context state at boundary if (!gbh.entropy_continuation_flag) { _ctxtMemOctreeGeom->reset(); _ctxtMemPredGeom->reset(); for (auto& ctxtMem : _ctxtMemAttrs) ctxtMem.reset(); } if (_gps->predgeom_enabled_flag) encodePredictiveGeometry( params->predGeom, *_gps, gbh, pointCloud, &_posSph, *_ctxtMemPredGeom, arithmeticEncoders[0].get()); else if (!_gps->trisoup_enabled_flag) encodeGeometryOctree( params->geom, *_gps, gbh, pointCloud, *_ctxtMemOctreeGeom, arithmeticEncoders); else { // limit the number of points to the slice limit // todo(df): this should be derived from the level gbh.footer.geom_num_points_minus1 = params->partition.sliceMaxPoints - 1; encodeGeometryTrisoup( params->geom, *_gps, gbh, pointCloud, *_ctxtMemOctreeGeom, arithmeticEncoders); } // signal the actual number of points coded gbh.footer.geom_num_points_minus1 = pointCloud.getPointCount() - 1; // assemble data unit // - record the position of each aec buffer for chunk concatenation std::vector<std::pair<size_t, size_t>> aecStreams; write(*_sps, *_gps, gbh, buf); for (auto& arithmeticEncoder : arithmeticEncoders) { auto aecLen = arithmeticEncoder->stop(); auto aecBuf = arithmeticEncoder->buffer(); aecStreams.emplace_back(buf->size(), aecLen); buf->insert(buf->end(), aecBuf, aecBuf + aecLen); } // This process is performed here from the last chunk to the first. It // is also possible to implement this in a forwards direction too. if (_sps->cabac_bypass_stream_enabled_flag) { aecStreams.pop_back(); for (auto i = aecStreams.size() - 1; i + 1; i--) { auto& stream = aecStreams[i]; auto* ptr = reinterpret_cast<uint8_t*>(buf->data()); auto* chunkA = ptr + stream.first + (stream.second & ~0xff); auto* chunkB = ptr + stream.first + stream.second; auto* end = ptr + buf->size(); ChunkStreamBuilder::spliceChunkStreams(chunkA, chunkB, end); } } // append the footer write(*_gps, gbh, gbh.footer, buf); // Cache gbh for later reference _gbh = gbh; } //---------------------------------------------------------------------------- void PCCTMC3Encoder3::appendSlice(PCCPointSet3& accumCloud) { // offset current point cloud to be in coding coordinate system size_t numPoints = pointCloud.getPointCount(); for (size_t i = 0; i < numPoints; i++) for (int k = 0; k < 3; k++) pointCloud[i][k] += _sliceOrigin[k]; accumCloud.append(pointCloud); } //---------------------------------------------------------------------------- // translates and scales inputPointCloud, storing the result in // this->pointCloud for use by the encoding process. SrcMappedPointSet PCCTMC3Encoder3::quantization(const PCCPointSet3& src) { // Currently the sequence bounding box size must be set assert(_sps->seqBoundingBoxSize != Vec3<int>{0}); // Clamp all points to [clampBox.min, clampBox.max] after translation // and quantisation. Box3<int32_t> clampBox(0, std::numeric_limits<int32_t>::max()); // When using predictive geometry, sub-sample the point cloud and let // the predictive geometry coder quantise internally. if (_inputDecimationScale != 1.) return samplePositionsUniq( _inputDecimationScale, _srcToCodingScale, _originInCodingCoords, src); if (_gps->geom_unique_points_flag) return quantizePositionsUniq( _srcToCodingScale, _originInCodingCoords, clampBox, src); SrcMappedPointSet dst; quantizePositions( _srcToCodingScale, _originInCodingCoords, clampBox, src, &dst.cloud); return dst; } //---------------------------------------------------------------------------- // get the partial point cloud according to required point indexes PCCPointSet3 getPartition(const PCCPointSet3& src, const std::vector<int32_t>& indexes) { PCCPointSet3 dst; dst.addRemoveAttributes(src); int partitionSize = indexes.size(); dst.resize(partitionSize); for (int i = 0; i < partitionSize; i++) { int inputIdx = indexes[i]; dst[i] = src[inputIdx]; if (src.hasColors()) dst.setColor(i, src.getColor(inputIdx)); if (src.hasReflectances()) dst.setReflectance(i, src.getReflectance(inputIdx)); if (src.hasLaserAngles()) dst.setLaserAngle(i, src.getLaserAngle(inputIdx)); } return dst; } //---------------------------------------------------------------------------- // get the partial point cloud according to required point indexes PCCPointSet3 getPartition( const PCCPointSet3& src, const SrcMappedPointSet& map, const std::vector<int32_t>& indexes) { // Without the list, do nothing if (map.idxToSrcIdx.empty()) return {}; // work out the destination size. // loop over each linked list until an element points to itself int size = 0; for (int idx : indexes) { int prevIdx, srcIdx = map.idxToSrcIdx[idx]; do { size++; prevIdx = srcIdx; srcIdx = map.srcIdxDupList[srcIdx]; } while (srcIdx != prevIdx); } PCCPointSet3 dst; dst.addRemoveAttributes(src); dst.resize(size); int dstIdx = 0; for (int idx : indexes) { int prevIdx, srcIdx = map.idxToSrcIdx[idx]; do { dst[dstIdx] = src[srcIdx]; if (src.hasColors()) dst.setColor(dstIdx, src.getColor(srcIdx)); if (src.hasReflectances()) dst.setReflectance(dstIdx, src.getReflectance(srcIdx)); if (src.hasLaserAngles()) dst.setLaserAngle(dstIdx, src.getLaserAngle(srcIdx)); dstIdx++; prevIdx = srcIdx; srcIdx = map.srcIdxDupList[srcIdx]; } while (srcIdx != prevIdx); } return dst; } //============================================================================ } // namespace pcc
37,287
35.6647
79
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/entropy.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "entropydirac.h" #include "entropyutils.h" namespace pcc { using EntropyEncoder = EntropyEncoderWrapper<dirac::ArithmeticEncoder>; using EntropyDecoder = EntropyDecoderWrapper<dirac::ArithmeticDecoder>; } // namespace pcc
2,072
45.066667
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/entropychunk.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <algorithm> #include <cassert> #include <cstddef> #include <cstdint> #include <stdexcept> namespace pcc { //============================================================================= // This multiplexer takes two input streams (one bytewise and one bitwise) // and assembles them into chunks within an output buffer class ChunkStreamBuilder { public: ChunkStreamBuilder(const ChunkStreamBuilder&) = delete; ChunkStreamBuilder(ChunkStreamBuilder&&) = delete; ChunkStreamBuilder& operator=(const ChunkStreamBuilder&) = delete; ChunkStreamBuilder& operator=(ChunkStreamBuilder&&) = delete; ChunkStreamBuilder() : _chunkBase(nullptr), _outputSizeRemaining(0) {} ChunkStreamBuilder(uint8_t* buf, size_t size) { reset(buf, size); } void reset(uint8_t* buf = nullptr, size_t size = 0); size_t size() const; void writeAecByte(uint8_t byte); void writeBypassBit(bool bit); void flush(); // Splice two chunk streams together. // Chunks chunkA and chunkB must be adjacent in memory. // // \param chunkA a pointer to the last chunk of the first stream // \param chunkB a pointer to the first chunk of the second stream // \param end pointer to one-past-the-end of the buffer containing A & B static void spliceChunkStreams(uint8_t* chunkA, uint8_t* chunkB, uint8_t* end); private: void reserveChunkByte(); void finaliseChunk(); void startNextChunk(); private: static const int kChunkSize = 256; // number of bytes remaining in the output buffer size_t _outputSizeRemaining; // number of bytes written to the output size_t _outputLength{0}; // start of the curernt chunk; uint8_t* _chunkBase; // number of bytes available in the current chunk int _chunkBytesRemaining; uint8_t* _aecPtr; uint8_t* _bypassPtr; int _bypassBitIdx; int _bypassByteAllocCounter; }; //============================================================================= inline void ChunkStreamBuilder::reset(uint8_t* buf, size_t size) { _outputLength = 0; if (!buf) return; // allocate the first chunk, (fixup the start address first) _chunkBase = buf - kChunkSize; _outputSizeRemaining = size; startNextChunk(); } //----------------------------------------------------------------------------- inline size_t ChunkStreamBuilder::size() const { return _outputLength; } //----------------------------------------------------------------------------- inline void ChunkStreamBuilder::writeAecByte(uint8_t byte) { reserveChunkByte(); *_aecPtr++ = byte; } //----------------------------------------------------------------------------- inline void ChunkStreamBuilder::writeBypassBit(bool bit) { if (_bypassByteAllocCounter < 1) { reserveChunkByte(); _bypassByteAllocCounter += 8; } _bypassByteAllocCounter--; if (--_bypassBitIdx < 0) { _bypassPtr--; _bypassBitIdx = 7; } *_bypassPtr = (*_bypassPtr << 1) | bit; } //----------------------------------------------------------------------------- inline void ChunkStreamBuilder::flush() { if (!_chunkBase) return; // if nothing has been written to the chunk, remove it if (_chunkBytesRemaining == kChunkSize - 1) { _outputLength -= kChunkSize; return; } finaliseChunk(); // if it isn't a full chunk, truncate _outputLength -= _chunkBytesRemaining; } //----------------------------------------------------------------------------- // Ensures that there is space to write a byte in the chunk. If not, // the current chunk is finalised and starts the next. inline void ChunkStreamBuilder::reserveChunkByte() { if (--_chunkBytesRemaining >= 0) return; // _chunkBytesRemaning is negative: set to zero (since there are none left) _chunkBytesRemaining = 0; finaliseChunk(); startNextChunk(); // reserve a byte from the chunk _chunkBytesRemaining--; } //----------------------------------------------------------------------------- inline void ChunkStreamBuilder::finaliseChunk() { int chunk_num_ae_bytes = _aecPtr - _chunkBase - 1; int bypassLen = kChunkSize - _chunkBytesRemaining - chunk_num_ae_bytes - 1; if (bypassLen) { // the number of padding bits (less the symtax element size) int chunk_bypass_num_flushed_bits = _bypassBitIdx - 3; // first, add padding bits to current partial byte *_bypassPtr <<= _bypassBitIdx; // there may be an extra byte at the end if last byte occupancy > 5 if (chunk_bypass_num_flushed_bits < 0) { *--_bypassPtr = 0; chunk_bypass_num_flushed_bits += 8; } *_bypassPtr |= uint8_t(chunk_bypass_num_flushed_bits); if (_chunkBytesRemaining) std::move( _bypassPtr, _chunkBase + kChunkSize, _chunkBase + chunk_num_ae_bytes + 1); } // write out the length of the aec data _chunkBase[0] = uint8_t(chunk_num_ae_bytes); } //----------------------------------------------------------------------------- inline void ChunkStreamBuilder::startNextChunk() { // start a new chunk if (_outputSizeRemaining < kChunkSize) throw std::runtime_error("Chunk buffer overflow"); // NB: reserves one byte for the aec length _chunkBytesRemaining = kChunkSize - 1; _chunkBase += kChunkSize; _aecPtr = _chunkBase + 1; _bypassPtr = _chunkBase + kChunkSize - 1; _bypassBitIdx = 8; _bypassByteAllocCounter = -3; _outputSizeRemaining -= kChunkSize; _outputLength += kChunkSize; } //============================================================================= class ChunkStreamReader { public: ChunkStreamReader(const ChunkStreamReader&) = delete; ChunkStreamReader(ChunkStreamReader&&) = delete; ChunkStreamReader& operator=(const ChunkStreamReader&) = delete; ChunkStreamReader& operator=(ChunkStreamReader&&) = delete; ChunkStreamReader() : _end(nullptr) , _aecBytesRemaining(0) , _aecNextChunk(nullptr) , _bypassNextChunk(nullptr) , _bypassAccumBitsRemaining(0) , _bypassBitsRemaining(0) {} ChunkStreamReader(uint8_t* buf, size_t size) { reset(buf, size); } void reset(const uint8_t* buf, size_t len); // Flush the current chunk and realign with the next stream in the input. void nextStream(); uint8_t readAecByte(); bool readBypassBit(); private: static const int kChunkSize = 256; // the limit of the buffer. Used for error checking const uint8_t* _end; // state for the aec substream int _aecBytesRemaining; const uint8_t* _aecByte; const uint8_t* _aecNextChunk; // state for the bypass substream const uint8_t* _bypassNextChunk; const uint8_t* _bypassByte; int _bypassAccumBitsRemaining; int _bypassBitsRemaining; uint8_t _bypassAccum; }; //============================================================================= inline void ChunkStreamReader::reset(const uint8_t* buf, size_t size) { _end = buf + size; _aecBytesRemaining = 0; _aecByte = nullptr; _aecNextChunk = buf; _bypassNextChunk = buf; _bypassByte = nullptr; _bypassAccumBitsRemaining = 0; _bypassBitsRemaining = 0; } //----------------------------------------------------------------------------- inline uint8_t ChunkStreamReader::readAecByte() { if (_aecBytesRemaining-- > 0) return *_aecByte++; const uint8_t* ptr = _aecNextChunk; int chunk_num_ae_bytes = 0; while (ptr < _end && !(chunk_num_ae_bytes = *ptr)) ptr += kChunkSize; if (ptr + chunk_num_ae_bytes >= _end) return 0xff; //throw std::runtime_error("aec buffer exceeded"); _aecNextChunk = ptr + kChunkSize; _aecByte = ptr + 1; _aecBytesRemaining = chunk_num_ae_bytes; _aecBytesRemaining--; return *_aecByte++; } //----------------------------------------------------------------------------- inline bool ChunkStreamReader::readBypassBit() { // extract bit from accumulator if (_bypassAccumBitsRemaining-- > 0) { int bit = !!(_bypassAccum & 0x80); _bypassAccum <<= 1; return bit; } // try to refil accumulator _bypassBitsRemaining -= 8; if (_bypassBitsRemaining > 0) { _bypassAccum = *_bypassByte--; _bypassAccumBitsRemaining = std::min(_bypassBitsRemaining, 8); return readBypassBit(); } // at end of current chunk, find next with bypass data const uint8_t* ptr = _bypassNextChunk; int chunk_num_ae_bytes = 0; while (ptr < _end && (chunk_num_ae_bytes = *ptr) == kChunkSize - 1) ptr += kChunkSize; // the last chunk may be truncated int chunkSize = kChunkSize; chunkSize = std::max(0, std::min(int(_end - ptr), chunkSize)); if (ptr + chunkSize - 1 >= _end) throw std::runtime_error("bypass buffer exceeded"); int chunk_bypass_num_flushed_bits = ptr[chunk_num_ae_bytes + 1] & 0x7; _bypassNextChunk = ptr + kChunkSize; _bypassByte = ptr + chunkSize - 1; _bypassAccum = *_bypassByte--; _bypassBitsRemaining = 8 * (chunkSize - chunk_num_ae_bytes) - chunk_bypass_num_flushed_bits - 11; _bypassAccumBitsRemaining = std::min(_bypassBitsRemaining, 8); return readBypassBit(); } //----------------------------------------------------------------------------- inline void ChunkStreamReader::nextStream() { // In the current figure, stream A is being parsed: // <----Stream A---->|<-Stream B ... // |--------|yyybbbxx|bbbbb|----- // Where, x is bypass data, y is aec data, and b is data from stream B. // // When switching to stream B, the the 'b' bytes from stream B that // appear in the last chunk of A must be realigned to B (ie, xx is removed). // The current chunk is the chunk containing the last AEC byte read // NB: it is guaranteed that there is at least one AEC byte in the last // chunk of A (since the AEC data is flushed after the bypass). assert(_bypassNextChunk <= _aecNextChunk); auto chunk = const_cast<uint8_t*>(_aecNextChunk) - kChunkSize; auto chunkAecLen = *chunk; // If there is no bypass data in the final aec chunk of A, everything is // already aligned: // |--------|yyybbbbb|bbbbb|----- if (_bypassNextChunk < _aecNextChunk) { auto next = chunk + 1 + chunkAecLen; reset(next, _end - next); return; } // Consume the end of the bypass stream. The last byte contains syntax elmt // chunk_bypass_num_flushed_bits. If more than five bits of the last byte // have been read, the last byte is the next byte. if (_bypassAccumBitsRemaining < 3) _bypassByte--; _bypassAccumBitsRemaining = 0; // |yyybbbxx| // chunk ^ | | | // chunkBp ^ | | // _bypassByte ^ | // chunkEnd ^ auto chunkEnd = std::min(chunk + kChunkSize, const_cast<uint8_t*>(_end)); auto chunkBp = chunk + chunkAecLen + 1; auto padLen = _bypassByte - chunkBp + 1; std::move_backward(chunkBp, const_cast<uint8_t*>(_bypassByte) + 1, chunkEnd); auto next = chunkEnd - padLen; reset(next, _end - next); } //============================================================================= // Since the start of the bypass data in an entropy chunk is aligned to the // end of the chunk (its written backwards), when a truncated chunk stream (ie // not multiple of 256 bytes) is concatenated with another stream, the // position of the bypass data is unknowable without a pointer. To avoid // this, the bypass data in the last chunk is moved to its expected location. // <-Stream A---->|<-Stream B ... // |--------|yyyxx|bbbbbbbb|----- // ^ expected end of A (xx) // Move xx to expected location: // |--------|yyybbbxx|bbbbb|----- inline void ChunkStreamBuilder::spliceChunkStreams( uint8_t* chunkA, uint8_t* chunkB, uint8_t* end) { auto chunkLen = chunkB - chunkA; // If the last chunk isn't truncated, there is nothing to do if (chunkLen == kChunkSize) return; // --------|yyyxx|bbbbbbbb|----- // chunkA ^ | // chunkB ^ // chunkAbp ^ // Save the bypass data in the last chunk of A int chunkAecLen = uint8_t(*chunkA); auto* chunkAbp = chunkA + 1 + chunkAecLen; auto chunkAbpLen = chunkB - chunkAbp; if (!chunkAbpLen) return; uint8_t tmpBuf[256]; std::copy_n(chunkAbp, chunkAbpLen, tmpBuf); // the amount by which to pad A with data from B // NB: this takes into account that B, at the end of the stream, is not // large enough to fill A. auto expectedChunkLen = std::min(ptrdiff_t(256), end - chunkA); auto padLen = expectedChunkLen - chunkLen; // Move initial part of stream B backwards, // Copy the saved bypass data to correct location std::move(chunkB, chunkB + padLen, chunkAbp); std::copy_n(tmpBuf, chunkAbpLen, chunkAbp + padLen); } //============================================================================= } // namespace pcc
14,508
29.164241
79
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/entropydirac.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "entropydirac.h" #include "PCCMisc.h" #include <algorithm> namespace pcc { namespace dirac { //========================================================================= void SchroMAryContext::set_alphabet(int numsyms) { // initialise all contexts with p = 0.5 this->numsyms = numsyms; this->probabilities = std::vector<uint16_t>(numsyms, 0x8000); } //========================================================================= void ArithmeticEncoder::encode(int sym, SchroMAryContext& model) { int ctxidx = 0; while (sym-- > 0) { schro_arith_encode_bit(&impl, &model.probabilities[ctxidx++], 1); } // todo(df): this should be truncated unary coded schro_arith_encode_bit(&impl, &model.probabilities[ctxidx], 0); } //========================================================================= int ArithmeticDecoder::decode(SchroMAryContext& model) { int ctxidx = 0; int sym = 0; while (schro_arith_decode_bit(&impl, &model.probabilities[ctxidx++])) { sym++; } return sym; } //========================================================================== } // namespace dirac } // namespace pcc
3,026
34.611765
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/entropydirac.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "dependencies/schroedinger/schroarith.h" #include "entropychunk.h" #include <algorithm> #include <assert.h> #include <stdlib.h> #include <algorithm> #include <memory> #include <vector> namespace pcc { namespace dirac { //========================================================================== struct SchroContext { uint16_t probability = 0x8000; // p=0.5 template<class... Args> void reset(Args...) { probability = 0x8000; } }; //-------------------------------------------------------------------------- // The approximate (7 bit) probability of a symbol being 1 or 0 according // to a context model. inline int approxSymbolProbability(int bit, SchroContext& model) { int p = std::max(1, model.probability >> 9); return bit ? 128 - p : p; } //========================================================================== struct SchroContextFixed { //uint16_t probability = 0x8000; // p=0.5 }; //========================================================================== // context definition that automatically binarises an m-ary symbol struct SchroMAryContext { SchroMAryContext() = default; SchroMAryContext(int numsyms) { set_alphabet(numsyms); } void set_alphabet(int numsyms); int numsyms; std::vector<uint16_t> probabilities; }; //========================================================================== struct InvalidContext { void set_alphabet(int) {} }; //========================================================================== class ArithmeticEncoder { public: ArithmeticEncoder() = default; ArithmeticEncoder(size_t bufferSize, std::nullptr_t) { setBuffer(bufferSize, nullptr); } //------------------------------------------------------------------------ void setBuffer(size_t size, uint8_t* buffer) { _bufSize = size; if (buffer) _buf = _bufWr = buffer; else { allocatedBuffer.reset(new uint8_t[size]); _buf = _bufWr = allocatedBuffer.get(); } } //------------------------------------------------------------------------ void enableBypassStream(bool cabac_bypass_stream_enabled_flag) { _cabac_bypass_stream_enabled_flag = cabac_bypass_stream_enabled_flag; } //------------------------------------------------------------------------ void start() { if (!_cabac_bypass_stream_enabled_flag) schro_arith_encode_init(&impl, &writeByteCallback, this); else { _chunkStream.reset(_bufWr, _bufSize); schro_arith_encode_init(&impl, &writeChunkCallback, &_chunkStream); } } //------------------------------------------------------------------------ size_t stop() { schro_arith_flush(&impl); if (_cabac_bypass_stream_enabled_flag) { _chunkStream.flush(); return _chunkStream.size(); } return _bufWr - _buf; } //------------------------------------------------------------------------ const char* buffer() { return reinterpret_cast<char*>(_buf); } //------------------------------------------------------------------------ void encode(int bit, SchroContextFixed&) { encode(bit); } //------------------------------------------------------------------------ void encode(int bit) { if (!_cabac_bypass_stream_enabled_flag) { uint16_t probability = 0x8000; // p=0.5 schro_arith_encode_bit(&impl, &probability, bit); return; } _chunkStream.writeBypassBit(bit); } //------------------------------------------------------------------------ void encode(int data, SchroMAryContext& model); //------------------------------------------------------------------------ void encode(int data, InvalidContext& model) { assert(0); } void encode(int bit, SchroContext& model) { schro_arith_encode_bit(&impl, &model.probability, bit); } //------------------------------------------------------------------------ private: static void writeByteCallback(uint8_t byte, void* thisptr) { auto _this = reinterpret_cast<ArithmeticEncoder*>(thisptr); if (_this->_bufSize == 0) throw std::runtime_error("Aec stream overflow"); _this->_bufSize--; *_this->_bufWr++ = byte; } //------------------------------------------------------------------------ static void writeChunkCallback(uint8_t byte, void* thisptr) { auto _this = reinterpret_cast<ChunkStreamBuilder*>(thisptr); _this->writeAecByte(byte); } //------------------------------------------------------------------------ private: ::SchroArith impl; uint8_t* _buf; uint8_t* _bufWr; size_t _bufSize; std::unique_ptr<uint8_t[]> allocatedBuffer; // Controls entropy coding method for bypass bins bool _cabac_bypass_stream_enabled_flag = false; ChunkStreamBuilder _chunkStream; }; //========================================================================== class ArithmeticDecoder { public: void setBuffer(size_t size, const char* buffer) { _buffer = reinterpret_cast<const uint8_t*>(buffer); _bufferLen = size; } //------------------------------------------------------------------------ void enableBypassStream(bool cabac_bypass_stream_enabled_flag) { _cabac_bypass_stream_enabled_flag = cabac_bypass_stream_enabled_flag; } //------------------------------------------------------------------------ void start() { if (_cabac_bypass_stream_enabled_flag) { _chunkReader.reset(_buffer, _bufferLen); schro_arith_decode_init(&impl, &readChunkCallback, &_chunkReader); } else { schro_arith_decode_init(&impl, &readByteCallback, this); } } //------------------------------------------------------------------------ void stop() { schro_arith_decode_flush(&impl); } //------------------------------------------------------------------------ // Terminate the arithmetic decoder, and reinitialise to start decoding // the next entropy stream. void flushAndRestart() { stop(); if (_cabac_bypass_stream_enabled_flag) { _chunkReader.nextStream(); schro_arith_decode_init(&impl, &readChunkCallback, &_chunkReader); } else { schro_arith_decode_init(&impl, &readByteCallback, this); } } //------------------------------------------------------------------------ int decode(SchroContextFixed&) { return decode(); } //------------------------------------------------------------------------ int decode() { if (!_cabac_bypass_stream_enabled_flag) { uint16_t probability = 0x8000; // p=0.5 return schro_arith_decode_bit(&impl, &probability); } return _chunkReader.readBypassBit(); } //------------------------------------------------------------------------ int decode(SchroMAryContext& model); //------------------------------------------------------------------------ int decode(InvalidContext& model) { assert(0); return 0; } //------------------------------------------------------------------------ int decode(SchroContext& model) { return schro_arith_decode_bit(&impl, &model.probability); } //------------------------------------------------------------------------ private: static uint8_t readByteCallback(void* thisptr) { auto _this = reinterpret_cast<ArithmeticDecoder*>(thisptr); if (!_this->_bufferLen) return 0xff; _this->_bufferLen--; return *_this->_buffer++; } //------------------------------------------------------------------------ static uint8_t readChunkCallback(void* thisptr) { auto _this = reinterpret_cast<ChunkStreamReader*>(thisptr); return _this->readAecByte(); } //------------------------------------------------------------------------ private: ::SchroArith impl; // the user supplied buffer. const uint8_t* _buffer; // the length of the user supplied buffer size_t _bufferLen; // Controls entropy coding method for bypass bins bool _cabac_bypass_stream_enabled_flag = false; // Parser for chunked bypass stream representation ChunkStreamReader _chunkReader; }; //========================================================================== } // namespace dirac using StaticBitModel = dirac::SchroContextFixed; using StaticMAryModel = dirac::InvalidContext; using AdaptiveBitModel = dirac::SchroContext; using AdaptiveBitModelFast = dirac::SchroContext; using AdaptiveMAryModel = dirac::SchroMAryContext; //============================================================================ } // namespace pcc
10,757
29.304225
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/entropyutils.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <algorithm> #include <stddef.h> namespace pcc { //============================================================================ // :: Entropy codec interface (Encoder) // // The base class must implement the following methods: // - void setBuffer(size_t size, const uint8_t *buffer); // - void start(); // - size_t stop(); // - void encode(int symbol, StaticBitModel&); // - void encode(int symbol, StaticMAryModel&); // - void encode(int symbol, AdaptiveBitModel&); // - void encode(int symbol, AdaptiveBitModelFast&); // - void encode(int symbol, AdaptiveMAryModel&); template<class Base> class EntropyEncoderWrapper : protected Base { public: using Base::Base; using Base::buffer; using Base::enableBypassStream; using Base::encode; using Base::setBuffer; using Base::start; using Base::stop; //-------------------------------------------------------------------------- // :: encoding / common binarisation methods void encodeExpGolomb(unsigned int symbol, int k, AdaptiveBitModel& bModel1); template<size_t NumPrefixCtx, size_t NumSuffixCtx> void encodeExpGolomb( unsigned int symbol, int k, AdaptiveBitModel (&ctxPrefix)[NumPrefixCtx], AdaptiveBitModel (&ctxSuffix)[NumSuffixCtx]); }; //============================================================================ // :: Entropy codec interface (Decoder) // // The base class must implement the following methods: // - void setBuffer(size_t size, const uint8_t *buffer); // - void start(); // - void stop(); // - int decode(StaticBitModel&); // - int decode(StaticMAryModel&); // - int decode(AdaptiveBitModel&); // - int decode(AdaptiveBitModelFast&); // - int decode(AdaptiveMAryModel&); template<class Base> class EntropyDecoderWrapper : protected Base { public: EntropyDecoderWrapper() : Base() {} using Base::decode; using Base::enableBypassStream; using Base::flushAndRestart; using Base::setBuffer; using Base::start; using Base::stop; //-------------------------------------------------------------------------- // :: encoding / common binarisation methods unsigned int decodeExpGolomb(int k, AdaptiveBitModel& bModel1); template<size_t NumPrefixCtx, size_t NumSuffixCtx> unsigned int decodeExpGolomb( int k, AdaptiveBitModel (&ctxPrefix)[NumPrefixCtx], AdaptiveBitModel (&ctxSuffix)[NumSuffixCtx]); }; //============================================================================ // :: Various binarisation forms inline unsigned long IntToUInt(long value) { return (value < 0) ? static_cast<unsigned long>(-1 - (2 * value)) : static_cast<unsigned long>(2 * value); } //---------------------------------------------------------------------------- inline long UIntToInt(unsigned long uiValue) { return (uiValue & 1) ? -(static_cast<long>((uiValue + 1) >> 1)) : (static_cast<long>(uiValue >> 1)); } //---------------------------------------------------------------------------- template<class Base> inline void EntropyEncoderWrapper<Base>::encodeExpGolomb( unsigned int symbol, int k, AdaptiveBitModel& ctxPrefix) { while (1) { if (symbol >= (1u << k)) { encode(1, ctxPrefix); symbol -= (1u << k); k++; } else { encode(0, ctxPrefix); while (k--) encode((symbol >> k) & 1); break; } } } //---------------------------------------------------------------------------- template<class Base> template<size_t NumPrefixCtx, size_t NumSuffixCtx> inline void EntropyEncoderWrapper<Base>::encodeExpGolomb( unsigned int symbol, int k, AdaptiveBitModel (&ctxPrefix)[NumPrefixCtx], AdaptiveBitModel (&ctxSuffix)[NumSuffixCtx]) { constexpr int maxPrefixIdx = NumPrefixCtx - 1; constexpr int maxSuffixIdx = NumSuffixCtx - 1; const int k0 = k; while (symbol >= (1u << k)) { encode(1, ctxPrefix[std::min(maxPrefixIdx, k - k0)]); symbol -= 1u << k; k++; } encode(0, ctxPrefix[std::min(maxPrefixIdx, k - k0)]); while (k--) encode((symbol >> k) & 1, ctxSuffix[std::min(maxSuffixIdx, k)]); } //---------------------------------------------------------------------------- template<class Base> inline unsigned int EntropyDecoderWrapper<Base>::decodeExpGolomb( int k, AdaptiveBitModel& ctxPrefix) { unsigned int l; int symbol = 0; int binary_symbol = 0; do { l = decode(ctxPrefix); if (l == 1) { symbol += (1 << k); k++; } } while (l != 0); while (k--) //next binary part if (decode() == 1) { binary_symbol |= (1 << k); } return static_cast<unsigned int>(symbol + binary_symbol); } //---------------------------------------------------------------------------- template<class Base> template<size_t NumPrefixCtx, size_t NumSuffixCtx> inline unsigned int EntropyDecoderWrapper<Base>::decodeExpGolomb( int k, AdaptiveBitModel (&ctxPrefix)[NumPrefixCtx], AdaptiveBitModel (&ctxSuffix)[NumSuffixCtx]) { constexpr int maxPrefixIdx = NumPrefixCtx - 1; constexpr int maxSuffixIdx = NumSuffixCtx - 1; const int k0 = k; unsigned int l; int symbol = 0; int binary_symbol = 0; do { l = decode(ctxPrefix[std::min(maxPrefixIdx, k - k0)]); if (l == 1) { symbol += (1 << k); k++; } } while (l != 0); while (k--) binary_symbol |= decode(ctxSuffix[std::min(maxSuffixIdx, k)]) << k; return static_cast<unsigned int>(symbol + binary_symbol); } //============================================================================ } // namespace pcc
7,386
29.651452
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/frame.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2021, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "frame.h" #include "PCCMisc.h" namespace pcc { //============================================================================ void CloudFrame::setParametersFrom( const SequenceParameterSet& sps, int fixedPointBits) { // How many bits should be preserved during global scaling: // -1: all // n: n bits, limited to all if (fixedPointBits) { int gsFracBits = ilog2(uint32_t(Rational(sps.globalScale).denominator)); if (fixedPointBits < 0) fixedPointBits = gsFracBits; else fixedPointBits = std::min(fixedPointBits, gsFracBits); } this->geometry_axis_order = sps.geometry_axis_order; this->outputFpBits = fixedPointBits; this->outputOrigin = sps.seqBoundingBoxOrigin; this->outputUnitLength = reciprocal(sps.seqGeomScale); this->outputUnit = sps.seq_geom_scale_unit_flag; this->attrDesc = sps.attributeSets; } //============================================================================ void scaleGeometry( PCCPointSet3& cloud, const SequenceParameterSet::GlobalScale& globalScale, int fixedPointFracBits) { // Conversion to rational simplifies the globalScale expression. Rational gs = globalScale; // NB: by definition, gs.denominator is a power of two. int gsDenominatorLog2 = ilog2(uint32_t(gs.denominator)); // appliy fixed-point scaling to numerator, removing common factor gs.numerator <<= std::max(fixedPointFracBits - gsDenominatorLog2, 0); gsDenominatorLog2 = std::max(gsDenominatorLog2 - fixedPointFracBits, 0); gs.denominator = 1 << gsDenominatorLog2; // Nothing to do if scale factor is 1. if (gs.numerator == gs.denominator) return; // The scaling here is equivalent to the fixed-point conformance output size_t numPoints = cloud.getPointCount(); for (size_t i = 0; i < numPoints; i++) { auto& pos = cloud[i]; pos = (pos * gs.numerator + (gs.denominator >> 1)) >> gsDenominatorLog2; } } //============================================================================ } // namespace pcc
3,842
37.049505
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/frame.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2021, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <vector> #include "PCCMath.h" #include "PCCPointSet.h" #include "hls.h" namespace pcc { //============================================================================ // Represents a frame in the encoder or decoder. struct CloudFrame { // The value of the decoder's reconstructed FrameCtr for this frame. int frameNum; // Defines the ordering of the position components (eg, xyz vs zyx) AxisOrder geometry_axis_order; // The length of the output cloud's unit vector. // The units of outputUnitLength is given by outputUnit. // // When outputUnit is ScaleUnit::kMetres, outputUnitLength is // measured in metres. // // When outputUnit is ScaleUnit::kDimensionless, outputUnitLength is // measured in units of an external coordinate system. double outputUnitLength; // The unit of the output cloud's unit vector ScaleUnit outputUnit; // The origin of the output cloud // NB: this respects geometry_axis_order. Vec3<int> outputOrigin; // Number of fractional bits in representaiton of cloud.positions. int outputFpBits; // Descriptions of each attribute. Attribute parameters in the description // are only applicable to this frame -- they may change in a subsequent frame std::vector<AttributeDescription> attrDesc; // The output point cloud. The coordinate system is defined by the // other parameters in this structure. // NB: Point positions respect geometry_axis_order. PCCPointSet3 cloud; // Determines parameters according to the sps. void setParametersFrom(const SequenceParameterSet& sps, int fixedPointBits); }; //============================================================================ // Scale point cloud geometry by global scale factor void scaleGeometry( PCCPointSet3& cloud, const SequenceParameterSet::GlobalScale& globalScale, int fixedPointFracBits); //============================================================================ } // namespace pcc
3,800
37.01
79
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/framectr.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2021, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ namespace pcc { //============================================================================ // A free-running frame counter, synchronised by updates class FrameCtr { public: operator int() const { return _frameCtr; } // Update the frame counter using the current value of frame_ctr_lsb. void update(int frame_ctr_lsb, int frame_ctr_lsb_bits); // Query whether frame_ctr_lsb does not match the current frame counter. bool isDifferentFrame(int frame_ctr_lsb, int frame_ctr_lsb_bits) const { return frame_ctr_lsb != (_frameCtr & ((1 << frame_ctr_lsb_bits) - 1)); } private: // The reconstructed frame counter value. int _frameCtr = 0; }; //---------------------------------------------------------------------------- inline void FrameCtr::update(int frame_ctr_lsb, int frame_ctr_lsb_bits) { int window = (1 << frame_ctr_lsb_bits) >> 1; int curLsb = unsigned(_frameCtr) & ((1 << frame_ctr_lsb_bits) - 1); int curMsb = unsigned(_frameCtr) >> frame_ctr_lsb_bits; if (frame_ctr_lsb < curLsb && curLsb - frame_ctr_lsb >= window) curMsb++; else if (frame_ctr_lsb > curLsb && frame_ctr_lsb - curLsb > window) curMsb--; _frameCtr = (curMsb << frame_ctr_lsb_bits) + frame_ctr_lsb; } //============================================================================ } // namespace pcc
3,146
38.835443
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/geometry.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <memory> #include <vector> #include "PCCPointSet.h" #include "geometry_params.h" #include "entropy.h" #include "hls.h" #include "partitioning.h" namespace pcc { //============================================================================ struct GeometryOctreeContexts; struct PredGeomContexts; //============================================================================ void encodeGeometryOctree( const OctreeEncOpts& opt, const GeometryParameterSet& gps, GeometryBrickHeader& gbh, PCCPointSet3& pointCloud, GeometryOctreeContexts& ctxtMem, std::vector<std::unique_ptr<EntropyEncoder>>& arithmeticEncoder); void decodeGeometryOctree( const GeometryParameterSet& gps, const GeometryBrickHeader& gbh, PCCPointSet3& pointCloud, GeometryOctreeContexts& ctxtMem, EntropyDecoder& arithmeticDecoder); void decodeGeometryOctreeScalable( const GeometryParameterSet& gps, const GeometryBrickHeader& gbh, int minGeomNodeSizeLog2, PCCPointSet3& pointCloud, GeometryOctreeContexts& ctxtMem, EntropyDecoder& arithmeticDecoder); //---------------------------------------------------------------------------- void encodeGeometryTrisoup( const OctreeEncOpts& opt, const GeometryParameterSet& gps, GeometryBrickHeader& gbh, PCCPointSet3& pointCloud, GeometryOctreeContexts& ctxtMem, std::vector<std::unique_ptr<EntropyEncoder>>& arithmeticEncoder); void decodeGeometryTrisoup( const GeometryParameterSet& gps, const GeometryBrickHeader& gbh, PCCPointSet3& pointCloud, GeometryOctreeContexts& ctxtMem, EntropyDecoder& arithmeticDecoder); //---------------------------------------------------------------------------- void encodePredictiveGeometry( const PredGeomEncOpts& opt, const GeometryParameterSet& gps, GeometryBrickHeader& gbh, PCCPointSet3& pointCloud, std::vector<Vec3<int32_t>>* reconPosSph, PredGeomContexts& ctxtMem, EntropyEncoder* arithmeticEncoder); void decodePredictiveGeometry( const GeometryParameterSet& gps, const GeometryBrickHeader& gbh, PCCPointSet3& pointCloud, std::vector<Vec3<int32_t>>* reconPosSph, PredGeomContexts& ctxtMem, EntropyDecoder& arithmeticDecoder); //============================================================================ } // namespace pcc
4,122
33.940678
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/geometry_intra_pred.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "geometry_intra_pred.h" namespace pcc { //============================================================================ // Mapping of neighbour influence to affected (childIdx + 1). // NB: zero indicates no influence. static const int8_t kNeighToChildIdx[26][4] = { {1, 0, 0, 0}, {1, 2, 0, 0}, {2, 0, 0, 0}, {1, 3, 0, 0}, {1, 2, 3, 4}, {2, 4, 0, 0}, {3, 0, 0, 0}, {3, 4, 0, 0}, {4, 0, 0, 0}, {1, 5, 0, 0}, {1, 2, 5, 6}, {2, 6, 0, 0}, {1, 3, 5, 7}, {2, 4, 6, 8}, {3, 7, 0, 0}, {3, 4, 7, 8}, {4, 8, 0, 0}, {5, 0, 0, 0}, {5, 6, 0, 0}, {6, 0, 0, 0}, {5, 7, 0, 0}, {5, 6, 7, 8}, {6, 8, 0, 0}, {7, 0, 0, 0}, {7, 8, 0, 0}, {8, 0, 0, 0}}; //============================================================================ void predictGeometryOccupancyIntra( const MortonMap3D& occupancyAtlas, Vec3<int32_t> pos, const int atlasShift, int* occupancyIsPredicted, int* occupancyPrediction) { uint32_t mask = occupancyAtlas.cubeSize() - 1; int32_t x = pos[0] & mask; int32_t y = pos[1] & mask; int32_t z = pos[2] & mask; int score[8] = {0, 0, 0, 0, 0, 0, 0, 0}; const int8_t* kNeigh = &kNeighToChildIdx[0][0]; int numOccupied = 0; const int shiftX = (atlasShift & 4 ? 1 : 0); const int shiftY = (atlasShift & 2 ? 1 : 0); const int shiftZ = (atlasShift & 1 ? 1 : 0); for (int dx = -1; dx <= 1; dx++) { for (int dy = -1; dy <= 1; dy++) { for (int dz = -1; dz <= 1; dz++) { if (dz == 0 && dy == 0 && dx == 0) continue; // todo(df): remove unnecessary checks bool occupied = occupancyAtlas.getWithCheck( x + dx, y + dy, z + dz, shiftX, shiftY, shiftZ); if (occupied) { numOccupied++; for (int i = 0; i < 4; i++) { if (!kNeigh[i]) break; score[kNeigh[i] - 1]++; } } // next pattern kNeigh += 4; } } } int th0 = 2; int th1 = numOccupied < 14 ? 4 : 5; int occIsPredicted = 0; int occPrediction = 0; for (int i = 0; i < 8; i++) { if (score[i] <= th0) occIsPredicted |= 1 << i; else if (score[i] >= th1) { occIsPredicted |= 1 << i; occPrediction |= 1 << i; } } *occupancyIsPredicted = occIsPredicted; *occupancyPrediction = occPrediction; } //============================================================================ } // namespace pcc
4,219
34.166667
78
cpp
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/geometry_intra_pred.h
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <cstdint> #include "OctreeNeighMap.h" #include "PCCMath.h" namespace pcc { //============================================================================ void predictGeometryOccupancyIntra( const MortonMap3D& occupancyAtlas, Vec3<int32_t> pos, const int atlasShift, int* occupacyIsPredIntra, int* occupacyPredIntra); //============================================================================ } // namespace pcc
2,280
39.017544
78
h
mpeg-pcc-tmc13
mpeg-pcc-tmc13-master/tmc3/geometry_octree.cpp
/* The copyright in this software is being made available under the BSD * Licence, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such * rights are granted under this licence. * * Copyright (c) 2017-2018, ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the ISO/IEC nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "geometry_octree.h" #include <algorithm> #include <iterator> #include "PCCMisc.h" #include "geometry_params.h" #include "quantization.h" #include "tables.h" namespace pcc { //============================================================================ Vec3<int> oneQtBtDecision( const QtBtParameters& qtbt, Vec3<int> nodeSizeLog2, int maxNumQtbtBeforeOt, int minDepthQtbt) { int maxNodeMinDimLog2ToSplitZ = qtbt.angularMaxNodeMinDimLog2ToSplitV; int maxDiffToSplitZ = qtbt.angularMaxDiffToSplitZ; int nodeMinDimLog2 = nodeSizeLog2.min(); if (maxNumQtbtBeforeOt || nodeMinDimLog2 == minDepthQtbt) { int nodeMaxDimLog2 = nodeSizeLog2.max(); for (int k = 0; k < 3; k++) { if (nodeSizeLog2[k] == nodeMaxDimLog2) nodeSizeLog2[k]--; } } else if ( qtbt.angularTweakEnabled && minDepthQtbt >= 0 && nodeSizeLog2[2] <= maxNodeMinDimLog2ToSplitZ && (maxNodeMinDimLog2ToSplitZ + maxDiffToSplitZ > 0)) { // do not split z int nodeXYMaxDimLog2 = std::max(nodeSizeLog2[0], nodeSizeLog2[1]); for (int k = 0; k < 2; k++) { if (nodeSizeLog2[k] == nodeXYMaxDimLog2) nodeSizeLog2[k]--; } if ( (nodeMinDimLog2 <= maxNodeMinDimLog2ToSplitZ && nodeSizeLog2[2] >= nodeXYMaxDimLog2 + maxDiffToSplitZ) || (nodeXYMaxDimLog2 >= maxNodeMinDimLog2ToSplitZ + maxDiffToSplitZ && nodeSizeLog2[2] >= nodeXYMaxDimLog2)) nodeSizeLog2[2]--; } else // octree partition nodeSizeLog2 = nodeSizeLog2 - 1; return nodeSizeLog2; } //--------------------------------------------------------------------------- void updateQtBtParameters( const Vec3<int>& nodeSizeLog2, bool trisoup_enabled_flag, int* maxNumQtbtBeforeOt, int* minSizeQtbt) { int nodeMinDimLog2 = nodeSizeLog2.min(); int nodeMaxDimLog2 = nodeSizeLog2.max(); // max number of qtbt partitions before ot is bounded by difference between // max and min node size if (*maxNumQtbtBeforeOt > (nodeMaxDimLog2 - nodeMinDimLog2)) *maxNumQtbtBeforeOt = nodeMaxDimLog2 - nodeMinDimLog2; // min depth of qtbt partition is bounded by min node size if (*minSizeQtbt > nodeMinDimLog2) *minSizeQtbt = nodeMinDimLog2; // if all dimensions have same size, min depth of qtbt should be 0 if (nodeMaxDimLog2 == nodeMinDimLog2) { *minSizeQtbt = 0; } // if trisoup is enabled, perform qtbt first before ot if (trisoup_enabled_flag) { *maxNumQtbtBeforeOt = nodeMaxDimLog2 - nodeMinDimLog2; *minSizeQtbt = 0; } } //--------------------------------------------------------------------------- std::vector<Vec3<int>> mkQtBtNodeSizeList( const GeometryParameterSet& gps, const QtBtParameters& qtbt, const GeometryBrickHeader& gbh) { std::vector<Vec3<int>> nodeSizeLog2List; // size of the current node (each dimension can vary due to qtbt) Vec3<int> nodeSizeLog2 = gbh.rootNodeSizeLog2; nodeSizeLog2List.push_back(nodeSizeLog2); // update qtbt parameters int maxNumQtbtBeforeOt = qtbt.maxNumQtBtBeforeOt; int minSizeQtbt = qtbt.minQtbtSizeLog2; updateQtBtParameters( nodeSizeLog2, qtbt.trisoupEnabled, &maxNumQtbtBeforeOt, &minSizeQtbt); while (!isLeafNode(nodeSizeLog2)) { if (!gps.qtbt_enabled_flag) nodeSizeLog2 -= 1; else nodeSizeLog2 = oneQtBtDecision(qtbt, nodeSizeLog2, maxNumQtbtBeforeOt, minSizeQtbt); nodeSizeLog2List.push_back(nodeSizeLog2); if (maxNumQtbtBeforeOt) maxNumQtbtBeforeOt--; // if all dimensions have same size, then use octree for remaining nodes if ( nodeSizeLog2[0] == minSizeQtbt && nodeSizeLog2[0] == nodeSizeLog2[1] && nodeSizeLog2[1] == nodeSizeLog2[2]) minSizeQtbt = -1; } return nodeSizeLog2List; } //------------------------------------------------------------------------- // map the @occupancy pattern bits to take into account symmetries in the // neighbour configuration @neighPattern. // uint8_t mapGeometryOccupancy(uint8_t occupancy, uint8_t neighPattern) { switch (kOccMapRotateZIdFromPatternXY[neighPattern & 15]) { case 1: occupancy = kOccMapRotateZ090[occupancy]; break; case 2: occupancy = kOccMapRotateZ180[occupancy]; break; case 3: occupancy = kOccMapRotateZ270[occupancy]; break; } bool flag_ud = (neighPattern & 16) && !(neighPattern & 32); if (flag_ud) { occupancy = kOccMapMirrorXY[occupancy]; } if (kOccMapRotateYIdFromPattern[neighPattern]) { occupancy = kOccMapRotateY270[occupancy]; } switch (kOccMapRotateXIdFromPattern[neighPattern]) { case 1: occupancy = kOccMapRotateX090[occupancy]; break; case 2: occupancy = kOccMapRotateX270Y180[occupancy]; break; case 3: occupancy = kOccMapRotateX090Y180[occupancy]; break; } return occupancy; } //------------------------------------------------------------------------- // map the @occupancy pattern bits to take into account symmetries in the // neighbour configuration @neighPattern. // uint8_t mapGeometryOccupancyInv(uint8_t occupancy, uint8_t neighPattern) { switch (kOccMapRotateXIdFromPattern[neighPattern]) { case 1: occupancy = kOccMapRotateX270[occupancy]; break; case 2: occupancy = kOccMapRotateX270Y180[occupancy]; break; case 3: occupancy = kOccMapRotateX090Y180[occupancy]; break; } if (kOccMapRotateYIdFromPattern[neighPattern]) { occupancy = kOccMapRotateY090[occupancy]; } bool flag_ud = (neighPattern & 16) && !(neighPattern & 32); if (flag_ud) { occupancy = kOccMapMirrorXY[occupancy]; } switch (kOccMapRotateZIdFromPatternXY[neighPattern & 15]) { case 1: occupancy = kOccMapRotateZ270[occupancy]; break; case 2: occupancy = kOccMapRotateZ180[occupancy]; break; case 3: occupancy = kOccMapRotateZ090[occupancy]; break; } return occupancy; } //============================================================================ // Derive the neighbour pattern for the three siblings of a node // from the parent's occupancy byte. // // @param pos index of the node in the occupancy scan order. // @param occupancy occupancy byte of the parent node // // @returns the six-neighbour pattern. int neighPatternFromOccupancy(int pos, int occupancy) { /* The following maps the three neighbours of a child at position pos * to form a six-neighbour pattern from occupancy: * pos | occupancy | neighpat * xyz | 76543210 | udfblr * 000 | ...r.fu. | 1.2..4 * 001 | ..r.f..d | .03..5 * 010 | .r..u..b | 3..0.6 * 011 | r....db. | .2.1.7 * 100 | .fu....l | 5.6.0. * 101 | f..d..l. | .47.1. * 110 | u..b.l.. | 7..42. * 111 | .db.l... | .6.53. */ int neighPat = 0; neighPat |= ((occupancy >> (pos ^ 4)) & 1) << (0 + ((pos >> 2) & 1)); // x neighPat |= ((occupancy >> (pos ^ 2)) & 1) << (2 + ((~pos >> 1) & 1)); // y neighPat |= ((occupancy >> (pos ^ 1)) & 1) << (4 + ((~pos >> 0) & 1)); // z return neighPat; } //============================================================================ CtxMapOctreeOccupancy::CtxMapOctreeOccupancy(const CtxMapOctreeOccupancy& rhs) : CtxMapOctreeOccupancy() { *this->map = *rhs.map; } //---------------------------------------------------------------------------- CtxMapOctreeOccupancy::CtxMapOctreeOccupancy(CtxMapOctreeOccupancy&& rhs) { std::swap(this->map, rhs.map); std::swap(this->b, rhs.b); } //---------------------------------------------------------------------------- CtxMapOctreeOccupancy& CtxMapOctreeOccupancy::operator=(const CtxMapOctreeOccupancy& rhs) { *this->map = *rhs.map; return *this; } //---------------------------------------------------------------------------- CtxMapOctreeOccupancy& CtxMapOctreeOccupancy::operator=(CtxMapOctreeOccupancy&& rhs) { std::swap(this->map, rhs.map); std::swap(this->b, rhs.b); return *this; } //---------------------------------------------------------------------------- CtxMapOctreeOccupancy::CtxMapOctreeOccupancy() { map.reset(new CtxIdxMap); b[0] = map->b0; b[1] = map->b1; b[2] = map->b2; b[3] = map->b3; b[4] = map->b4; b[5] = map->b5; b[6] = map->b6; b[7] = map->b7; using std::begin; using std::end; std::fill(begin(map->b0), end(map->b0), 127); std::fill(begin(map->b1), end(map->b1), 127); std::fill(begin(map->b2), end(map->b2), 127); std::fill(begin(map->b3), end(map->b3), 127); std::fill(begin(map->b4), end(map->b4), 127); std::fill(begin(map->b5), end(map->b5), 127); std::fill(begin(map->b6), end(map->b6), 127); std::fill(begin(map->b7), end(map->b7), 127); } //============================================================================ uint32_t mkIdcmEnableMask(const GeometryParameterSet& gps) { if (!gps.inferred_direct_coding_mode) return 0; // intense IDCM requires idcm to be enabled all the time if (gps.inferred_direct_coding_mode != 1) return 0xffffffff; // if planar is disabled, there is no control over the rate if (!gps.geom_planar_mode_enabled_flag) return 0xffffffff; int mask = 0, acc = 0; for (int i = 0; i < 32; i++) { acc += gps.geom_idcm_rate_minus1 + 1; mask |= (acc >= 32) << i; acc &= 0x1f; } return mask; } //============================================================================ // determine if a 222 block is planar void setPlanesFromOccupancy(int occupancy, OctreeNodePlanar& planar) { uint8_t plane0 = 0; plane0 |= !!(occupancy & 0x0f) << 0; plane0 |= !!(occupancy & 0x33) << 1; plane0 |= !!(occupancy & 0x55) << 2; uint8_t plane1 = 0; plane1 |= !!(occupancy & 0xf0) << 0; plane1 |= !!(occupancy & 0xcc) << 1; plane1 |= !!(occupancy & 0xaa) << 2; // Only planar if a single plane normal to an axis is occupied planar.planarMode = plane0 ^ plane1; planar.planePosBits = planar.planarMode & plane1; } //============================================================================ // :: Default planar buffer methods OctreePlanarBuffer::OctreePlanarBuffer() = default; OctreePlanarBuffer::OctreePlanarBuffer(OctreePlanarBuffer&& rhs) = default; OctreePlanarBuffer::~OctreePlanarBuffer() = default; OctreePlanarBuffer& OctreePlanarBuffer:: operator=(OctreePlanarBuffer&& rhs) = default; //---------------------------------------------------------------------------- // :: Copying the planar buffer OctreePlanarBuffer::OctreePlanarBuffer(const OctreePlanarBuffer& rhs) { *this = rhs; } //---------------------------------------------------------------------------- OctreePlanarBuffer& OctreePlanarBuffer::operator=(const OctreePlanarBuffer& rhs) { _buf = rhs._buf; _col = rhs._col; // Afjust the column offsets to the new base address auto oldBase = _col[0]; auto newBase = reinterpret_cast<Row*>(&_buf.front()); for (auto& ptr : _col) ptr = ptr - oldBase + newBase; return *this; } //---------------------------------------------------------------------------- // :: Planar buffer management void OctreePlanarBuffer::resize(Vec3<int> numBufferRows) { if (maskC < numBufferRows[0]) numBufferRows[0] = maskC + 1; if (maskC < numBufferRows[1]) numBufferRows[1] = maskC + 1; if (maskC < numBufferRows[2]) numBufferRows[2] = maskC + 1; // NB: based upon the expected max buffer size of 3*14k, just allocate the // maximum buffer size. int size = numBufferRows[0] + numBufferRows[1] + numBufferRows[2]; _buf.clear(); _buf.reserve(rowSize * 3 * (maskC + 1)); _buf.resize(rowSize * size, Elmt{0, -2}); // NB: the flat backing buffer is cast with a row stride for access _col[0] = reinterpret_cast<Row*>(&_buf.front()); _col[1] = _col[0] + numBufferRows[0]; _col[2] = _col[1] + numBufferRows[1]; } //---------------------------------------------------------------------------- void OctreePlanarBuffer::clear() { _buf.clear(); _col = {nullptr, nullptr, nullptr}; } //============================================================================ // intitialize planes for planar pred OctreePlanarState::OctreePlanarState(const GeometryParameterSet& gps) { _planarBufferEnabled = gps.geom_planar_mode_enabled_flag && !gps.planar_buffer_disabled_flag; _rateThreshold[0] = gps.geom_planar_threshold0 << 4; _rateThreshold[1] = gps.geom_planar_threshold1 << 4; _rateThreshold[2] = gps.geom_planar_threshold2 << 4; } void OctreePlanarState::initPlanes(const Vec3<int>& depthXyz) { if (!_planarBufferEnabled) return; Vec3<int> numBufferRows = {1 << depthXyz[0], 1 << depthXyz[1], 1 << depthXyz[2]}; _planarBuffer.resize(numBufferRows); } //============================================================================ // update the plane rate depending on the occupancy void OctreePlanarState::updateRate(int occupancy, int numSiblings) { bool isPlanarX = !((occupancy & 0xf0) && (occupancy & 0x0f)); bool isPlanarY = !((occupancy & 0xcc) && (occupancy & 0x33)); bool isPlanarZ = !((occupancy & 0x55) && (occupancy & 0xaa)); _rate[0] = (255 * _rate[0] + (isPlanarX ? 256 * 8 : 0) + 128) >> 8; _rate[1] = (255 * _rate[1] + (isPlanarY ? 256 * 8 : 0) + 128) >> 8; _rate[2] = (255 * _rate[2] + (isPlanarZ ? 256 * 8 : 0) + 128) >> 8; _localDensity = (255 * _localDensity + 1024 * numSiblings) >> 8; } //============================================================================ // planar eligbility void OctreePlanarState::isEligible(bool eligible[3]) { eligible[0] = false; eligible[1] = false; eligible[2] = false; if (_localDensity >= 3 * 1024) { return; } if (_rate[0] >= _rate[1] && _rate[0] >= _rate[2]) { // planar x dominates eligible[0] = _rate[0] >= _rateThreshold[0]; if (_rate[1] >= _rate[2]) { eligible[1] = _rate[1] >= _rateThreshold[1]; eligible[2] = _rate[2] >= _rateThreshold[2]; } else { eligible[2] = _rate[2] >= _rateThreshold[1]; eligible[1] = _rate[1] >= _rateThreshold[2]; } } else if (_rate[1] >= _rate[0] && _rate[1] >= _rate[2]) { // planar y dominates eligible[1] = _rate[1] >= _rateThreshold[0]; if (_rate[0] >= _rate[2]) { eligible[0] = _rate[0] >= _rateThreshold[1]; eligible[2] = _rate[2] >= _rateThreshold[2]; } else { eligible[2] = _rate[2] >= _rateThreshold[1]; eligible[0] = _rate[0] >= _rateThreshold[2]; } } else if (_rate[2] >= _rate[0] && _rate[2] >= _rate[1]) { // planar z dominates eligible[2] = _rate[2] >= _rateThreshold[0]; if (_rate[0] >= _rate[1]) { eligible[0] = _rate[0] >= _rateThreshold[1]; eligible[1] = _rate[1] >= _rateThreshold[2]; } else { eligible[1] = _rate[1] >= _rateThreshold[1]; eligible[0] = _rate[0] >= _rateThreshold[2]; } } } //---------------------------------------------------------------------------- OctreePlanarState::OctreePlanarState(const OctreePlanarState& rhs) { *this = rhs; } //---------------------------------------------------------------------------- OctreePlanarState::OctreePlanarState(OctreePlanarState&& rhs) { *this = std::move(rhs); } //---------------------------------------------------------------------------- OctreePlanarState& OctreePlanarState::operator=(const OctreePlanarState& rhs) { _planarBuffer = rhs._planarBuffer; _rate = rhs._rate; _localDensity = rhs._localDensity; _rateThreshold = rhs._rateThreshold; return *this; } //---------------------------------------------------------------------------- OctreePlanarState& OctreePlanarState::operator=(OctreePlanarState&& rhs) { _planarBuffer = std::move(rhs._planarBuffer); _rate = std::move(rhs._rateThreshold); _localDensity = std::move(rhs._localDensity); _rateThreshold = std::move(rhs._rateThreshold); return *this; } //============================================================================ // directional mask depending on the planarity int maskPlanarX(const OctreeNodePlanar& planar) { if ((planar.planarMode & 1) == 0) return 0; return (planar.planePosBits & 1) ? 0x0f : 0xf0; } //---------------------------------------------------------------------------- int maskPlanarY(const OctreeNodePlanar& planar) { if ((planar.planarMode & 2) == 0) return 0; return (planar.planePosBits & 2) ? 0x33 : 0xcc; } //---------------------------------------------------------------------------- int maskPlanarZ(const OctreeNodePlanar& planar) { if ((planar.planarMode & 4) == 0) return 0; return (planar.planePosBits & 4) ? 0x55 : 0xaa; } //---------------------------------------------------------------------------- // three direction mask void maskPlanar(OctreeNodePlanar& planar, int mask[3], int codedAxes) { for (int k = 0; k <= 2; k++) { // QTBT does not split in this direction // => infer the mask low for occupancy bit coding if (!(codedAxes & (4 >> k))) { planar.planePosBits &= ~(1 << k); planar.planarMode |= 1 << k; } } mask[0] = maskPlanarX(planar); mask[1] = maskPlanarY(planar); mask[2] = maskPlanarZ(planar); } //---------------------------------------------------------------------------- // determine angular context for planar integer implementation. int determineContextAngleForPlanar( PCCOctree3Node& node, const Vec3<int>& nodeSizeLog2, const Vec3<int>& angularOrigin, const int* zLaser, const int* thetaLaser, const int numLasers, int deltaAngle, const AzimuthalPhiZi& phiZi, int* phiBuffer, int* contextAnglePhiX, int* contextAnglePhiY, Vec3<uint32_t> quantMasks) { Vec3<int> nodePos = node.pos << nodeSizeLog2; Vec3<int> midNode = (1 << nodeSizeLog2) >> 1; Vec3<int> nodeSize = 1 << nodeSizeLog2; if (node.qp) { OctreeAngPosScaler quant(node.qp, quantMasks); nodePos = quant.scaleNs(nodePos); midNode = quant.scaleNs(midNode); nodeSize = quant.scaleNs(nodeSize); } // eligibility auto nodePosLidar = nodePos - angularOrigin; uint64_t xLidar = std::abs(((nodePosLidar[0] + midNode[0]) << 8) - 128); uint64_t yLidar = std::abs(((nodePosLidar[1] + midNode[1]) << 8) - 128); uint64_t rL1 = (xLidar + yLidar) >> 1; uint64_t deltaAngleR = deltaAngle * rL1; if (numLasers > 1 && deltaAngleR <= uint64_t(midNode[2]) << 26) return -1; // determine inverse of r (1/sqrt(r2) = irsqrt(r2)) uint64_t r2 = xLidar * xLidar + yLidar * yLidar; uint64_t rInv = irsqrt(r2); // determine non-corrected theta int64_t zLidar = ((nodePosLidar[2] + midNode[2]) << 1) - 1; int64_t theta = zLidar * rInv; int theta32 = theta >= 0 ? theta >> 15 : -((-theta) >> 15); // determine laser int laserIndex = int(node.laserIndex); if (numLasers == 1) laserIndex = 0; else if (laserIndex == 255 || deltaAngleR <= uint64_t(midNode[2]) << 28) { auto end = thetaLaser + numLasers - 1; auto it = std::upper_bound(thetaLaser + 1, end, theta32); if (theta32 - *std::prev(it) <= *it - theta32) --it; laserIndex = std::distance(thetaLaser, it); node.laserIndex = uint8_t(laserIndex); } // -- PHI -- //angles int posx = nodePosLidar[0]; int posy = nodePosLidar[1]; int phiNode = iatan2(posy + midNode[1], posx + midNode[0]); int phiNode0 = iatan2(posy, posx); // find predictor int predPhi = phiBuffer[laserIndex]; if (predPhi == 0x80000000) predPhi = phiNode; // use predictor if (predPhi != 0x80000000) { // elementary shift predictor int Nshift = ((predPhi - phiNode) * phiZi.invDelta(laserIndex) + (1 << 29)) >> 30; predPhi -= phiZi.delta(laserIndex) * Nshift; // ctx azimutal x or y int angleL = phiNode0 - predPhi; int angleR = phiNode - predPhi; int contextAnglePhi = (angleL >= 0 && angleR >= 0) || (angleL < 0 && angleR < 0) ? 2 : 0; angleL = std::abs(angleL); angleR = std::abs(angleR); if (angleL > angleR) { contextAnglePhi++; std::swap(angleL, angleR); } if (angleR > (angleL << 2)) contextAnglePhi += 4; if (std::abs(posx) <= std::abs(posy)) *contextAnglePhiX = contextAnglePhi; else *contextAnglePhiY = contextAnglePhi; } // -- THETA -- int thetaLaserDelta = thetaLaser[laserIndex] - theta32; int64_t hr = zLaser[laserIndex] * rInv; thetaLaserDelta += hr >= 0 ? -(hr >> 17) : ((-hr) >> 17); int64_t zShift = (rInv * nodeSize[2]) >> 20; int thetaLaserDeltaBot = thetaLaserDelta + zShift; int thetaLaserDeltaTop = thetaLaserDelta - zShift; int contextAngle = thetaLaserDelta >= 0 ? 0 : 1; if (thetaLaserDeltaTop >= 0) contextAngle += 2; else if (thetaLaserDeltaBot < 0) contextAngle += 2; return contextAngle; } //============================================================================ int findLaser(pcc::point_t point, const int* thetaList, const int numTheta) { if (numTheta == 1) return 0; int64_t xLidar = int64_t(point[0]) << 8; int64_t yLidar = int64_t(point[1]) << 8; int64_t rInv = irsqrt(xLidar * xLidar + yLidar * yLidar); int theta32 = (point[2] * rInv) >> 14; auto end = thetaList + numTheta - 1; auto it = std::upper_bound(thetaList + 1, end, theta32); if (theta32 - *std::prev(it) <= *it - theta32) --it; return std::distance(thetaList, it); } //============================================================================ } // namespace pcc
23,050
29.612218
78
cpp