repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Comp2Comp | Comp2Comp-master/comp2comp/io/io_utils.py | """
@author: louisblankemeier
"""
import os
import nibabel as nib
def find_dicom_files(input_path):
dicom_series = []
if not os.path.isdir(input_path):
dicom_series = [str(os.path.abspath(input_path))]
else:
for root, _, files in os.walk(input_path):
for file in files:
if file.endswith(".dcm") or file.endswith(".dicom"):
dicom_series.append(os.path.join(root, file))
return dicom_series
def get_dicom_paths_and_num(path):
"""
Get all paths under a path that contain only dicom files.
Args:
path (str): Path to search.
Returns:
list: List of paths.
"""
dicom_paths = []
for root, _, files in os.walk(path):
if len(files) > 0:
if all(file.endswith(".dcm") or file.endswith(".dicom") for file in files):
dicom_paths.append((root, len(files)))
if len(dicom_paths) == 0:
raise ValueError("No scans were found in:\n" + path)
return dicom_paths
def get_dicom_or_nifti_paths_and_num(path):
"""Get all paths under a path that contain only dicom files or a nifti file.
Args:
path (str): Path to search.
Returns:
list: List of paths.
"""
if path.endswith(".nii") or path.endswith(".nii.gz"):
return [(path, 1)]
dicom_nifti_paths = []
for root, _, files in os.walk(path):
if len(files) > 0:
if all(file.endswith(".dcm") or file.endswith(".dicom") for file in files):
dicom_nifti_paths.append((root, len(files)))
else:
for file in files:
if file.endswith(".nii") or file.endswith(".nii.gz"):
num_slices = nib.load(path).shape[2]
dicom_nifti_paths.append((os.path.join(root, file), num_slices))
return dicom_nifti_paths
| 1,883 | 28.904762 | 88 | py |
Comp2Comp | Comp2Comp-master/comp2comp/aortic_calcium/visualization.py | import os
import numpy as np
from comp2comp.inference_class_base import InferenceClass
class AorticCalciumVisualizer(InferenceClass):
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
self.output_dir = inference_pipeline.output_dir
self.output_dir_images_organs = os.path.join(self.output_dir, "images/")
inference_pipeline.output_dir_images_organs = self.output_dir_images_organs
if not os.path.exists(self.output_dir_images_organs):
os.makedirs(self.output_dir_images_organs)
return {}
class AorticCalciumPrinter(InferenceClass):
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
metrics = inference_pipeline.metrics
inference_pipeline.csv_output_dir = os.path.join(inference_pipeline.output_dir, "metrics")
os.makedirs(inference_pipeline.csv_output_dir, exist_ok=True)
with open(
os.path.join(inference_pipeline.csv_output_dir, "aortic_calcification.csv"), "w"
) as f:
f.write("Volume (cm^3),Mean HU,Median HU,Max HU\n")
for vol, mean, median, max in zip(
metrics["volume"], metrics["mean_hu"], metrics["median_hu"], metrics["max_hu"]
):
f.write("{},{:.1f},{:.1f},{:.1f}\n".format(vol, mean, median, max))
with open(
os.path.join(inference_pipeline.csv_output_dir, "aortic_calcification_total.csv"), "w"
) as f:
f.write("Total number,{}\n".format(metrics["num_calc"]))
f.write("Total volume (cm^3),{}\n".format(metrics["volume_total"]))
distance = 25
print("\n")
if metrics["num_calc"] == 0:
print("No aortic calcifications were found.")
else:
print("Statistics on aortic calcifications:")
print("{:<{}}{}".format("Total number:", distance, metrics["num_calc"]))
print("{:<{}}{:.3f}".format("Total volume (cm³):", distance, metrics["volume_total"]))
print(
"{:<{}}{:.1f}+/-{:.1f}".format(
"Mean HU:", distance, np.mean(metrics["mean_hu"]), np.std(metrics["mean_hu"])
)
)
print(
"{:<{}}{:.1f}+/-{:.1f}".format(
"Median HU:",
distance,
np.mean(metrics["median_hu"]),
np.std(metrics["median_hu"]),
)
)
print(
"{:<{}}{:.1f}+/-{:.1f}".format(
"Max HU:", distance, np.mean(metrics["max_hu"]), np.std(metrics["max_hu"])
)
)
print(
"{:<{}}{:.3f}+/-{:.3f}".format(
"Mean volume (cm³):",
distance,
np.mean(metrics["volume"]),
np.std(metrics["volume"]),
)
)
print(
"{:<{}}{:.3f}".format(
"Median volume (cm³):", distance, np.median(metrics["volume"])
)
)
print("{:<{}}{:.3f}".format("Max volume (cm³):", distance, np.max(metrics["volume"])))
print("{:<{}}{:.3f}".format("Min volume (cm³):", distance, np.min(metrics["volume"])))
print("\n")
return {}
| 3,395 | 34.747368 | 98 | py |
Comp2Comp | Comp2Comp-master/comp2comp/aortic_calcium/aortic_calcium.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 20:36:05 2023
@author: maltejensen
"""
import os
import time
from pathlib import Path
from typing import Union
import numpy as np
from scipy import ndimage
from totalsegmentator.libs import (
download_pretrained_weights,
nostdout,
setup_nnunet,
)
from comp2comp.inference_class_base import InferenceClass
class AortaSegmentation(InferenceClass):
"""Aorta segmentation."""
def __init__(self):
super().__init__()
# self.input_path = input_path
def __call__(self, inference_pipeline):
# inference_pipeline.dicom_series_path = self.input_path
self.output_dir = inference_pipeline.output_dir
self.output_dir_segmentations = os.path.join(self.output_dir, "segmentations/")
inference_pipeline.output_dir_segmentations = os.path.join(
self.output_dir, "segmentations/"
)
if not os.path.exists(self.output_dir_segmentations):
os.makedirs(self.output_dir_segmentations)
self.model_dir = inference_pipeline.model_dir
mv, seg = self.aorta_seg(
os.path.join(self.output_dir_segmentations, "converted_dcm.nii.gz"),
self.output_dir_segmentations + "organs.nii.gz",
inference_pipeline.model_dir,
)
# the medical volume is already set by the spine segmentation model
# the toCanonical methods looks for "segmentation", so it's overridden
inference_pipeline.spine_segmentation = inference_pipeline.segmentation
inference_pipeline.segmentation = seg
return {}
def aorta_seg(self, input_path: Union[str, Path], output_path: Union[str, Path], model_dir):
"""Run organ segmentation.
Args:
input_path (Union[str, Path]): Input path.
output_path (Union[str, Path]): Output path.
"""
print("Segmenting aorta...")
st = time.time()
os.environ["SCRATCH"] = self.model_dir
# Setup nnunet
model = "3d_fullres"
folds = [0]
trainer = "nnUNetTrainerV2_ep4000_nomirror"
crop_path = None
task_id = [251]
setup_nnunet()
download_pretrained_weights(task_id[0])
from totalsegmentator.nnunet import nnUNet_predict_image
with nostdout():
seg, mvs = nnUNet_predict_image(
input_path,
output_path,
task_id,
model=model,
folds=folds,
trainer=trainer,
tta=False,
multilabel_image=True,
resample=1.5,
crop=None,
crop_path=crop_path,
task_name="total",
nora_tag="None",
preview=False,
nr_threads_resampling=1,
nr_threads_saving=6,
quiet=False,
verbose=True,
test=0,
)
end = time.time()
# Log total time for spine segmentation
print(f"Total time for aorta segmentation: {end-st:.2f}s.")
return seg, mvs
class AorticCalciumSegmentation(InferenceClass):
"""Segmentaiton of aortic calcium"""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
ct = inference_pipeline.medical_volume.get_fdata()
aorta_mask = inference_pipeline.segmentation.get_fdata() == 7
spine_mask = inference_pipeline.spine_segmentation.get_fdata() > 0
inference_pipeline.calc_mask = self.detectCalcifications(
ct, aorta_mask, exclude_mask=spine_mask, remove_size=3
)
self.output_dir = inference_pipeline.output_dir
self.output_dir_images_organs = os.path.join(self.output_dir, "images/")
inference_pipeline.output_dir_images_organs = self.output_dir_images_organs
if not os.path.exists(self.output_dir_images_organs):
os.makedirs(self.output_dir_images_organs)
# np.save(os.path.join(self.output_dir_images_organs, 'ct.npy'), ct)
# np.save(os.path.join(self.output_dir_images_organs, "aorta_mask.npy"), aorta_mask)
# np.save(os.path.join(self.output_dir_images_organs, "spine_mask.npy"), spine_mask)
# np.save(
# os.path.join(self.output_dir_images_organs, "calcium_mask.npy"),
# inference_pipeline.calc_mask,
# )
# np.save(
# os.path.join(self.output_dir_images_organs, "ct_scan.npy"),
# inference_pipeline.medical_volume.get_fdata(),
# )
return {}
def detectCalcifications(
self,
ct,
aorta_mask,
exclude_mask=None,
return_dilated_mask=False,
dilation=(3, 1),
dilation_iteration=4,
return_dilated_exclude=False,
dilation_exclude_mask=(3, 1),
dilation_iteration_exclude=3,
show_time=False,
num_std=3,
remove_size=None,
verbose=False,
exclude_center_aorta=True,
return_eroded_aorta=False,
aorta_erode_iteration=6,
):
"""
Function that takes in a CT image and aorta segmentation (and optionally volumes to use
for exclusion of the segmentations), And returns a mask of the segmented calcifications
(and optionally other volumes). The calcium threshold is adapative and uses the median
of the CT points inside the aorta together with one standard devidation to the left, as
this is more stable. The num_std is multiplied with the distance between the median
and the one standard deviation mark, and can be used to control the threshold.
Args:
ct (array): CT image.
aorta_mask (array): Mask of the aorta.
exclude_mask (array, optional):
Mask for structures to exclude e.g. spine. Defaults to None.
return_dilated_mask (bool, optional):
Return the dilated aorta mask. Defaults to False.
dilation (list, optional):
Structuring element for aorta dilation. Defaults to (3,1).
dilation_iteration (int, optional):
Number of iterations for the strcturing element. Defaults to 4.
return_dilated_exclude (bool, optional):
Return the dilated exclusio mask. Defaults to False.
dilation_exclude_mask (list, optional):
Structering element for the exclusio mask. Defaults to (3,1).
dilation_iteration_exclude (int, optional):
Number of iterations for the strcturing element. Defaults to 3.
show_time (bool, optional):
Show time for each operation. Defaults to False.
num_std (float, optional):
How many standard deviations out the threshold will be set at. Defaults to 3.
remove_size (int, optional):
Remove foci under a certain size. Warning: quite slow. Defaults to None.
verbose (bool, optional):
Give verbose feedback on operations. Defaults to False.
exclude_center_aorta (bool, optional):
Use eroded aorta to exclude center of the aorta. Defaults to True.
return_eroded_aorta (bool, optional):
Return the eroded center aorta. Defaults to False.
aorta_erode_iteration (int, optional):
Number of iterations for the strcturing element. Defaults to 6.
Returns:
results: array of only the mask is returned, or dict if other volumes are also returned.
"""
def slicedDilationOrErosion(input_mask, struct, num_iteration, operation):
"""
Perform the dilation on the smallest slice that will fit the
segmentation
"""
margin = 2 if num_iteration is None else num_iteration + 1
x_idx = np.where(input_mask.sum(axis=(1, 2)))[0]
x_start, x_end = x_idx[0] - margin, x_idx[-1] + margin
y_idx = np.where(input_mask.sum(axis=(0, 2)))[0]
y_start, y_end = y_idx[0] - margin, y_idx[-1] + margin
if operation == "dilate":
mask_slice = ndimage.binary_dilation(
input_mask[x_start:x_end, y_start:y_end, :], structure=struct
).astype(np.int8)
elif operation == "erode":
mask_slice = ndimage.binary_erosion(
input_mask[x_start:x_end, y_start:y_end, :], structure=struct
).astype(np.int8)
output_mask = input_mask.copy()
output_mask[x_start:x_end, y_start:y_end, :] = mask_slice
return output_mask
# remove parts that are not the abdominal aorta
labelled_aorta, num_classes = ndimage.label(aorta_mask)
if num_classes > 1:
if verbose:
print("Removing {} parts".format(num_classes - 1))
aorta_vols = []
for i in range(1, num_classes + 1):
aorta_vols.append((labelled_aorta == i).sum())
biggest_idx = np.argmax(aorta_vols) + 1
aorta_mask[labelled_aorta != biggest_idx] = 0
# Get aortic CT point to set adaptive threshold
aorta_ct_points = ct[aorta_mask == 1]
# equal to one standard deviation to the left of the curve
quant = 0.158
quantile_median_dist = np.median(aorta_ct_points) - np.quantile(aorta_ct_points, q=quant)
calc_thres = np.median(aorta_ct_points) + quantile_median_dist * num_std
t0 = time.time()
if dilation is not None:
struct = ndimage.generate_binary_structure(*dilation)
if dilation_iteration is not None:
struct = ndimage.iterate_structure(struct, dilation_iteration)
aorta_dilated = slicedDilationOrErosion(
aorta_mask, struct=struct, num_iteration=dilation_iteration, operation="dilate"
)
if show_time:
print("dilation mask time: {:.2f}".format(time.time() - t0))
t0 = time.time()
calc_mask = np.logical_and(aorta_dilated == 1, ct >= calc_thres)
if show_time:
print("find calc time: {:.2f}".format(time.time() - t0))
if exclude_center_aorta:
t0 = time.time()
struct = ndimage.generate_binary_structure(3, 1)
struct = ndimage.iterate_structure(struct, aorta_erode_iteration)
aorta_eroded = slicedDilationOrErosion(
aorta_mask, struct=struct, num_iteration=aorta_erode_iteration, operation="erode"
)
calc_mask = calc_mask * (aorta_eroded == 0)
if show_time:
print("exclude center aorta time: {:.2f} sec".format(time.time() - t0))
t0 = time.time()
if exclude_mask is not None:
if dilation_exclude_mask is not None:
struct_exclude = ndimage.generate_binary_structure(*dilation_exclude_mask)
if dilation_iteration_exclude is not None:
struct_exclude = ndimage.iterate_structure(
struct_exclude, dilation_iteration_exclude
)
exclude_mask = slicedDilationOrErosion(
exclude_mask,
struct=struct_exclude,
num_iteration=dilation_iteration_exclude,
operation="dilate",
)
if show_time:
print("exclude dilation time: {:.2f}".format(time.time() - t0))
t0 = time.time()
calc_mask = calc_mask * (exclude_mask == 0)
if show_time:
print("exclude time: {:.2f}".format(time.time() - t0))
if remove_size is not None:
t0 = time.time()
labels, num_features = ndimage.label(calc_mask)
counter = 0
for n in range(1, num_features + 1):
idx_tmp = labels == n
if idx_tmp.sum() <= remove_size:
calc_mask[idx_tmp] = 0
counter += 1
if show_time:
print("Size exclusion time: {:.1f} sec".format(time.time() - t0))
if verbose:
print("Excluded {} foci under {}".format(counter, remove_size))
if not all([return_dilated_mask, return_dilated_exclude]):
return calc_mask.astype(np.int8)
else:
results = {}
results["calc_mask"] = calc_mask.astype(np.int8)
if return_dilated_mask:
results["dilated_mask"] = aorta_dilated
if return_dilated_exclude:
results["dilated_exclude"] = exclude_mask
if return_eroded_aorta:
results["aorta_eroded"] = aorta_eroded
results["threshold"] = calc_thres
return results
class AorticCalciumMetrics(InferenceClass):
"""Calculate metrics for the aortic calcifications"""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
calc_mask = inference_pipeline.calc_mask
inference_pipeline.pix_dims = inference_pipeline.medical_volume.header["pixdim"][1:4]
# divided with 10 to get in cm
inference_pipeline.vol_per_pixel = np.prod(inference_pipeline.pix_dims / 10)
# count statistics for individual calcifications
labelled_calc, num_lesions = ndimage.label(calc_mask)
metrics = {
"volume": [],
"mean_hu": [],
"median_hu": [],
"max_hu": [],
}
ct = inference_pipeline.medical_volume.get_fdata()
for i in range(1, num_lesions + 1):
tmp_mask = labelled_calc == i
tmp_ct_vals = ct[tmp_mask]
metrics["volume"].append(len(tmp_ct_vals) * inference_pipeline.vol_per_pixel)
metrics["mean_hu"].append(np.mean(tmp_ct_vals))
metrics["median_hu"].append(np.median(tmp_ct_vals))
metrics["max_hu"].append(np.max(tmp_ct_vals))
# Volume of calcificaitons
calc_vol = np.sum(metrics["volume"])
metrics["volume_total"] = calc_vol
metrics["num_calc"] = len(metrics["volume"])
inference_pipeline.metrics = metrics
return {}
| 14,459 | 35.515152 | 100 | py |
Comp2Comp | Comp2Comp-master/comp2comp/models/models.py | import enum
import os
from pathlib import Path
from typing import Dict, Sequence
import wget
from keras.models import load_model
class Models(enum.Enum):
ABCT_V_0_0_1 = (
1,
"abCT_v0.0.1",
{"muscle": 0, "imat": 1, "vat": 2, "sat": 3},
False,
("soft", "bone", "custom"),
)
STANFORD_V_0_0_1 = (
2,
"stanford_v0.0.1",
# ("background", "muscle", "bone", "vat", "sat", "imat"),
# Category name mapped to channel index
{"muscle": 1, "vat": 3, "sat": 4, "imat": 5},
True,
("soft", "bone", "custom"),
)
TS_SPINE_FULL = (
3,
"ts_spine_full",
# Category name mapped to channel index
{
"L5": 18,
"L4": 19,
"L3": 20,
"L2": 21,
"L1": 22,
"T12": 23,
"T11": 24,
"T10": 25,
"T9": 26,
"T8": 27,
"T7": 28,
"T6": 29,
"T5": 30,
"T4": 31,
"T3": 32,
"T2": 33,
"T1": 34,
"C7": 35,
"C6": 36,
"C5": 37,
"C4": 38,
"C3": 39,
"C2": 40,
"C1": 41,
},
False,
(),
)
TS_SPINE = (
4,
"ts_spine",
# Category name mapped to channel index
{"L5": 18, "L4": 19, "L3": 20, "L2": 21, "L1": 22, "T12": 23},
False,
(),
)
STANFORD_SPINE_V_0_0_1 = (
5,
"stanford_spine_v0.0.1",
# Category name mapped to channel index
{"L5": 24, "L4": 23, "L3": 22, "L2": 21, "L1": 20, "T12": 19},
False,
(),
)
TS_HIP = (
6,
"ts_hip",
# Category name mapped to channel index
{"femur_left": 88, "femur_right": 89},
False,
(),
)
def __new__(
cls,
value: int,
model_name: str,
categories: Dict[str, int],
use_softmax: bool,
windows: Sequence[str],
):
obj = object.__new__(cls)
obj._value_ = value
obj.model_name = model_name
obj.categories = categories
obj.use_softmax = use_softmax
obj.windows = windows
return obj
def load_model(self, model_dir):
"""Load the model from the models directory.
Args:
logger (logging.Logger): Logger.
Returns:
keras.models.Model: Model.
"""
try:
filename = Models.find_model_weights(self.model_name, model_dir)
except Exception:
print("Downloading muscle/fat model from hugging face")
Path(model_dir).mkdir(parents=True, exist_ok=True)
wget.download(
f"https://huggingface.co/stanfordmimi/stanford_abct_v0.0.1/resolve/main/{self.model_name}.h5",
out=os.path.join(model_dir, f"{self.model_name}.h5"),
)
filename = Models.find_model_weights(self.model_name, model_dir)
print("")
print("Loading muscle/fat model from {}".format(filename))
return load_model(filename)
@staticmethod
def model_from_name(model_name):
"""Get the model enum from the model name.
Args:
model_name (str): Model name.
Returns:
Models: Model enum.
"""
for model in Models:
if model.model_name == model_name:
return model
return None
@staticmethod
def find_model_weights(file_name, model_dir):
for root, _, files in os.walk(model_dir):
for file in files:
if file.startswith(file_name):
filename = os.path.join(root, file)
return filename
| 3,821 | 24.651007 | 110 | py |
Comp2Comp | Comp2Comp-master/comp2comp/contrast_phase/contrast_inf.py | import argparse
import os
import pickle
import sys
import nibabel as nib
import numpy as np
import scipy
import SimpleITK as sitk
from scipy import ndimage as ndi
def loadNiiToArray(path):
NiImg = nib.load(path)
array = np.array(NiImg.dataobj)
return array
def loadNiiWithSitk(path):
reader = sitk.ImageFileReader()
reader.SetImageIO("NiftiImageIO")
reader.SetFileName(path)
image = reader.Execute()
array = sitk.GetArrayFromImage(image)
return array
def loadNiiImageWithSitk(path):
reader = sitk.ImageFileReader()
reader.SetImageIO("NiftiImageIO")
reader.SetFileName(path)
image = reader.Execute()
# invert the image to be compatible with Nibabel
image = sitk.Flip(image, [False, True, False])
return image
def keep_masked_values(arr, mask):
# Get the indices of the non-zero elements in the mask
mask_indices = np.nonzero(mask)
# Use the indices to select the corresponding elements from the array
masked_values = arr[mask_indices]
# Return the selected elements as a new array
return masked_values
def get_stats(arr):
# # Get the indices of the non-zero elements in the array
# nonzero_indices = np.nonzero(arr)
# # Use the indices to get the non-zero elements of the array
# nonzero_elements = arr[nonzero_indices]
nonzero_elements = arr
# Calculate the stats for the non-zero elements
max_val = np.max(nonzero_elements)
min_val = np.min(nonzero_elements)
mean_val = np.mean(nonzero_elements)
median_val = np.median(nonzero_elements)
std_val = np.std(nonzero_elements)
variance_val = np.var(nonzero_elements)
return max_val, min_val, mean_val, median_val, std_val, variance_val
def getMaskAnteriorAtrium(mask):
erasePreAtriumMask = mask.copy()
for sliceNum in range(mask.shape[-1]):
mask2D = mask[:, :, sliceNum]
itemindex = np.where(mask2D == 1)
if itemindex[0].size > 0:
row = itemindex[0][0]
erasePreAtriumMask[:, :, sliceNum][:row, :] = 1
return erasePreAtriumMask
"""
Function from
https://stackoverflow.com/questions/46310603/how-to-compute-convex-hull-image-volume-in-3d-numpy-arrays/46314485#46314485
"""
def fill_hull(image):
points = np.transpose(np.where(image))
hull = scipy.spatial.ConvexHull(points)
deln = scipy.spatial.Delaunay(points[hull.vertices])
idx = np.stack(np.indices(image.shape), axis=-1)
out_idx = np.nonzero(deln.find_simplex(idx) + 1)
out_img = np.zeros(image.shape)
out_img[out_idx] = 1
return out_img
def getClassBinaryMask(TSOutArray, classNum):
binaryMask = np.zeros(TSOutArray.shape)
binaryMask[TSOutArray == classNum] = 1
return binaryMask
def loadNiftis(TSNiftiPath, imageNiftiPath):
TSArray = loadNiiToArray(TSNiftiPath)
scanArray = loadNiiToArray(imageNiftiPath)
return TSArray, scanArray
# function to select one slice from 3D volume of SimpleITK image
def selectSlice(scanImage, zslice):
size = list(scanImage.GetSize())
size[2] = 0
index = [0, 0, zslice]
Extractor = sitk.ExtractImageFilter()
Extractor.SetSize(size)
Extractor.SetIndex(index)
sliceImage = Extractor.Execute(scanImage)
return sliceImage
# function to apply windowing
def windowing(sliceImage, center=400, width=400):
windowMinimum = center - (width / 2)
windowMaximum = center + (width / 2)
img_255 = sitk.Cast(
sitk.IntensityWindowing(
sliceImage,
windowMinimum=-windowMinimum,
windowMaximum=windowMaximum,
outputMinimum=0.0,
outputMaximum=255.0,
),
sitk.sitkUInt8,
)
return img_255
def selectSampleSlice(kidneyLMask, adRMask, scanImage):
# Get the middle slice of the kidney mask from where there is the first 1 value to the last 1 value
middleSlice = np.where(kidneyLMask.sum(axis=(0, 1)) > 0)[0][0] + int(
(
np.where(kidneyLMask.sum(axis=(0, 1)) > 0)[0][-1]
- np.where(kidneyLMask.sum(axis=(0, 1)) > 0)[0][0]
)
/ 2
)
# print("Middle slice: ", middleSlice)
# make middleSlice int
middleSlice = int(middleSlice)
# select one slice using simple itk
sliceImageK = selectSlice(scanImage, middleSlice)
# Get the middle slice of the addrenal mask from where there is the first 1 value to the last 1 value
middleSlice = np.where(adRMask.sum(axis=(0, 1)) > 0)[0][0] + int(
(
np.where(adRMask.sum(axis=(0, 1)) > 0)[0][-1]
- np.where(adRMask.sum(axis=(0, 1)) > 0)[0][0]
)
/ 2
)
# print("Middle slice: ", middleSlice)
# make middleSlice int
middleSlice = int(middleSlice)
# select one slice using simple itk
sliceImageA = selectSlice(scanImage, middleSlice)
sliceImageK = windowing(sliceImageK)
sliceImageA = windowing(sliceImageA)
return sliceImageK, sliceImageA
def getFeatures(TSArray, scanArray):
aortaMask = getClassBinaryMask(TSArray, 7)
IVCMask = getClassBinaryMask(TSArray, 8)
portalMask = getClassBinaryMask(TSArray, 9)
atriumMask = getClassBinaryMask(TSArray, 45)
kidneyLMask = getClassBinaryMask(TSArray, 3)
kidneyRMask = getClassBinaryMask(TSArray, 2)
adRMask = getClassBinaryMask(TSArray, 11)
# Remove toraccic aorta adn IVC from aorta and IVC masks
anteriorAtriumMask = getMaskAnteriorAtrium(atriumMask)
aortaMask = aortaMask * (anteriorAtriumMask == 0)
IVCMask = IVCMask * (anteriorAtriumMask == 0)
# Erode vessels to get only the center of the vessels
struct2 = np.ones((3, 3, 3))
aortaMaskEroded = ndi.binary_erosion(aortaMask, structure=struct2).astype(aortaMask.dtype)
IVCMaskEroded = ndi.binary_erosion(IVCMask, structure=struct2).astype(IVCMask.dtype)
struct3 = np.ones((1, 1, 1))
portalMaskEroded = ndi.binary_erosion(portalMask, structure=struct3).astype(portalMask.dtype)
# If portalMaskEroded has less then 500 values, use the original portalMask
if np.count_nonzero(portalMaskEroded) < 500:
portalMaskEroded = portalMask
# Get masked values from scan
aortaArray = keep_masked_values(scanArray, aortaMaskEroded)
IVCArray = keep_masked_values(scanArray, IVCMaskEroded)
portalArray = keep_masked_values(scanArray, portalMaskEroded)
kidneyLArray = keep_masked_values(scanArray, kidneyLMask)
kidneyRArray = keep_masked_values(scanArray, kidneyRMask)
"""Put this on a separate function and return only the pelvis arrays"""
# process the Renal Pelvis masks from the Kidney masks
# create the convex hull of the Left Kidney
kidneyLHull = fill_hull(kidneyLMask)
# exclude the Left Kidney mask from the Left Convex Hull
kidneyLHull = kidneyLHull * (kidneyLMask == 0)
# erode the kidneyHull to remove the edges
struct = np.ones((3, 3, 3))
kidneyLHull = ndi.binary_erosion(kidneyLHull, structure=struct).astype(kidneyLHull.dtype)
# keep the values of the scanArray that are in the Left Convex Hull
pelvisLArray = keep_masked_values(scanArray, kidneyLHull)
# create the convex hull of the Right Kidney
kidneyRHull = fill_hull(kidneyRMask)
# exclude the Right Kidney mask from the Right Convex Hull
kidneyRHull = kidneyRHull * (kidneyRMask == 0)
# erode the kidneyHull to remove the edges
struct = np.ones((3, 3, 3))
kidneyRHull = ndi.binary_erosion(kidneyRHull, structure=struct).astype(kidneyRHull.dtype)
# keep the values of the scanArray that are in the Right Convex Hull
pelvisRArray = keep_masked_values(scanArray, kidneyRHull)
# Get the stats
# Get the stats for the aortaArray
(
aorta_max_val,
aorta_min_val,
aorta_mean_val,
aorta_median_val,
aorta_std_val,
aorta_variance_val,
) = get_stats(aortaArray)
# Get the stats for the IVCArray
(
IVC_max_val,
IVC_min_val,
IVC_mean_val,
IVC_median_val,
IVC_std_val,
IVC_variance_val,
) = get_stats(IVCArray)
# Get the stats for the portalArray
(
portal_max_val,
portal_min_val,
portal_mean_val,
portal_median_val,
portal_std_val,
portal_variance_val,
) = get_stats(portalArray)
# Get the stats for the kidneyLArray and kidneyRArray
(
kidneyL_max_val,
kidneyL_min_val,
kidneyL_mean_val,
kidneyL_median_val,
kidneyL_std_val,
kidneyL_variance_val,
) = get_stats(kidneyLArray)
(
kidneyR_max_val,
kidneyR_min_val,
kidneyR_mean_val,
kidneyR_median_val,
kidneyR_std_val,
kidneyR_variance_val,
) = get_stats(kidneyRArray)
(
pelvisL_max_val,
pelvisL_min_val,
pelvisL_mean_val,
pelvisL_median_val,
pelvisL_std_val,
pelvisL_variance_val,
) = get_stats(pelvisLArray)
(
pelvisR_max_val,
pelvisR_min_val,
pelvisR_mean_val,
pelvisR_median_val,
pelvisR_std_val,
pelvisR_variance_val,
) = get_stats(pelvisRArray)
# create three new columns for the decision tree
# aorta - porta, Max min and mean columns
aorta_porta_max = aorta_max_val - portal_max_val
aorta_porta_min = aorta_min_val - portal_min_val
aorta_porta_mean = aorta_mean_val - portal_mean_val
# aorta - IVC, Max min and mean columns
aorta_IVC_max = aorta_max_val - IVC_max_val
aorta_IVC_min = aorta_min_val - IVC_min_val
aorta_IVC_mean = aorta_mean_val - IVC_mean_val
# Save stats in CSV:
# Create a list to store the stats
stats = []
# Add the stats for the aortaArray to the list
stats.extend(
[
aorta_max_val,
aorta_min_val,
aorta_mean_val,
aorta_median_val,
aorta_std_val,
aorta_variance_val,
]
)
# Add the stats for the IVCArray to the list
stats.extend(
[IVC_max_val, IVC_min_val, IVC_mean_val, IVC_median_val, IVC_std_val, IVC_variance_val]
)
# Add the stats for the portalArray to the list
stats.extend(
[
portal_max_val,
portal_min_val,
portal_mean_val,
portal_median_val,
portal_std_val,
portal_variance_val,
]
)
# Add the stats for the kidneyLArray and kidneyRArray to the list
stats.extend(
[
kidneyL_max_val,
kidneyL_min_val,
kidneyL_mean_val,
kidneyL_median_val,
kidneyL_std_val,
kidneyL_variance_val,
]
)
stats.extend(
[
kidneyR_max_val,
kidneyR_min_val,
kidneyR_mean_val,
kidneyR_median_val,
kidneyR_std_val,
kidneyR_variance_val,
]
)
# Add the stats for the kidneyLHull and kidneyRHull to the list
stats.extend(
[
pelvisL_max_val,
pelvisL_min_val,
pelvisL_mean_val,
pelvisL_median_val,
pelvisL_std_val,
pelvisL_variance_val,
]
)
stats.extend(
[
pelvisR_max_val,
pelvisR_min_val,
pelvisR_mean_val,
pelvisR_median_val,
pelvisR_std_val,
pelvisR_variance_val,
]
)
stats.extend(
[
aorta_porta_max,
aorta_porta_min,
aorta_porta_mean,
aorta_IVC_max,
aorta_IVC_min,
aorta_IVC_mean,
]
)
return stats, kidneyLMask, adRMask
def loadModel():
c2cPath = os.path.dirname(sys.path[0])
filename = os.path.join(c2cPath, "comp2comp", "contrast_phase", "xgboost.pkl")
model = pickle.load(open(filename, "rb"))
return model
def predict_phase(TS_path, scan_path, outputPath=None, save_sample=False):
TS_array, image_array = loadNiftis(TS_path, scan_path)
model = loadModel()
# TS_array, image_array = loadNiftis(TS_output_nifti_path, image_nifti_path)
featureArray, kidneyLMask, adRMask = getFeatures(TS_array, image_array)
y_pred = model.predict([featureArray])
if y_pred == 0:
pred_phase = "non-contrast"
if y_pred == 1:
pred_phase = "arterial"
if y_pred == 2:
pred_phase = "venous"
if y_pred == 3:
pred_phase = "delayed"
output_path_metrics = os.path.join(outputPath, "metrics")
if not os.path.exists(output_path_metrics):
os.makedirs(output_path_metrics)
outputTxt = os.path.join(output_path_metrics, "phase_prediction.txt")
with open(outputTxt, "w") as text_file:
text_file.write(pred_phase)
print(pred_phase)
output_path_images = os.path.join(outputPath, "images")
if not os.path.exists(output_path_images):
os.makedirs(output_path_images)
scanImage = loadNiiImageWithSitk(scan_path)
sliceImageK, sliceImageA = selectSampleSlice(kidneyLMask, adRMask, scanImage)
outJpgK = os.path.join(output_path_images, "sampleSliceKidney.png")
sitk.WriteImage(sliceImageK, outJpgK)
outJpgA = os.path.join(output_path_images, "sampleSliceAdrenal.png")
sitk.WriteImage(sliceImageA, outJpgA)
if __name__ == "__main__":
# parse arguments optional
parser = argparse.ArgumentParser()
parser.add_argument("--TS_path", type=str, required=True, help="Input image")
parser.add_argument("--scan_path", type=str, required=True, help="Input image")
parser.add_argument(
"--output_dir", type=str, required=False, help="Output .txt prediction", default=None
)
parser.add_argument(
"--save_sample", type=bool, required=False, help="Save jpeg sample ", default=False
)
args = parser.parse_args()
predict_phase(args.TS_path, args.scan_path, args.output_dir, args.save_sample)
| 13,957 | 30.436937 | 121 | py |
Comp2Comp | Comp2Comp-master/comp2comp/contrast_phase/contrast_phase.py | import os
from pathlib import Path
from time import time
from typing import Union
from totalsegmentator.libs import (
download_pretrained_weights,
nostdout,
setup_nnunet,
)
from comp2comp.contrast_phase.contrast_inf import predict_phase
from comp2comp.inference_class_base import InferenceClass
class ContrastPhaseDetection(InferenceClass):
"""Contrast Phase Detection."""
def __init__(self, input_path):
super().__init__()
self.input_path = input_path
def __call__(self, inference_pipeline):
self.output_dir = inference_pipeline.output_dir
self.output_dir_segmentations = os.path.join(self.output_dir, "segmentations/")
if not os.path.exists(self.output_dir_segmentations):
os.makedirs(self.output_dir_segmentations)
self.model_dir = inference_pipeline.model_dir
seg, img = self.run_segmentation(
os.path.join(self.output_dir_segmentations, "converted_dcm.nii.gz"),
self.output_dir_segmentations + "s01.nii.gz",
inference_pipeline.model_dir,
)
# segArray, imgArray = self.convertNibToNumpy(seg, img)
imgNiftiPath = os.path.join(self.output_dir_segmentations, "converted_dcm.nii.gz")
segNiftPath = os.path.join(self.output_dir_segmentations, "s01.nii.gz")
predict_phase(segNiftPath, imgNiftiPath, outputPath=self.output_dir)
return {}
def run_segmentation(
self, input_path: Union[str, Path], output_path: Union[str, Path], model_dir
):
"""Run segmentation.
Args:
input_path (Union[str, Path]): Input path.
output_path (Union[str, Path]): Output path.
"""
print("Segmenting...")
st = time()
os.environ["SCRATCH"] = self.model_dir
# Setup nnunet
model = "3d_fullres"
folds = [0]
trainer = "nnUNetTrainerV2_ep4000_nomirror"
crop_path = None
task_id = [251]
setup_nnunet()
for task_id in [251]:
download_pretrained_weights(task_id)
from totalsegmentator.nnunet import nnUNet_predict_image
with nostdout():
img, seg = nnUNet_predict_image(
input_path,
output_path,
task_id,
model=model,
folds=folds,
trainer=trainer,
tta=False,
multilabel_image=True,
resample=1.5,
crop=None,
crop_path=crop_path,
task_name="total",
nora_tag=None,
preview=False,
nr_threads_resampling=1,
nr_threads_saving=6,
quiet=False,
verbose=False,
test=0,
)
end = time()
# Log total time for spine segmentation
print(f"Total time for segmentation: {end-st:.2f}s.")
return seg, img
def convertNibToNumpy(self, TSNib, ImageNib):
"""Convert nifti to numpy array.
Args:
TSNib (nibabel.nifti1.Nifti1Image): TotalSegmentator output.
ImageNib (nibabel.nifti1.Nifti1Image): Input image.
Returns:
numpy.ndarray: TotalSegmentator output.
numpy.ndarray: Input image.
"""
TS_array = TSNib.get_fdata()
img_array = ImageNib.get_fdata()
return TS_array, img_array
| 3,465 | 28.87931 | 90 | py |
Comp2Comp | Comp2Comp-master/comp2comp/metrics/metrics.py | from abc import ABC, abstractmethod
from typing import Callable, Sequence, Union
import numpy as np
def flatten_non_category_dims(
xs: Union[np.ndarray, Sequence[np.ndarray]], category_dim: int = None
):
"""Flattens all non-category dimensions into a single dimension.
Args:
xs (ndarrays): Sequence of ndarrays with the same category dimension.
category_dim: The dimension/axis corresponding to different categories.
i.e. `C`. If `None`, behaves like `np.flatten(x)`.
Returns:
ndarray: Shape (C, -1) if `category_dim` specified else shape (-1,)
"""
single_item = isinstance(xs, np.ndarray)
if single_item:
xs = [xs]
if category_dim is not None:
dims = (xs[0].shape[category_dim], -1)
xs = (np.moveaxis(x, category_dim, 0).reshape(dims) for x in xs)
else:
xs = (x.flatten() for x in xs)
if single_item:
return list(xs)[0]
else:
return xs
class Metric(Callable, ABC):
"""Interface for new metrics.
A metric should be implemented as a callable with explicitly defined
arguments. In other words, metrics should not have `**kwargs` or `**args`
options in the `__call__` method.
While not explicitly constrained to the return type, metrics typically
return float value(s). The number of values returned corresponds to the
number of categories.
* metrics should have different name() for different functionality.
* `category_dim` duck type if metric can process multiple categories at
once.
To compute metrics:
.. code-block:: python
metric = Metric()
results = metric(...)
"""
def __init__(self, units: str = ""):
self.units = units
def name(self):
return type(self).__name__
def display_name(self):
"""Name to use for pretty printing and display purposes."""
name = self.name()
return "{} {}".format(name, self.units) if self.units else name
@abstractmethod
def __call__(self, *args, **kwargs):
pass
class HounsfieldUnits(Metric):
FULL_NAME = "Hounsfield Unit"
def __init__(self, units="hu"):
super().__init__(units)
def __call__(self, mask, x, category_dim: int = None):
mask = mask.astype(np.bool)
if category_dim is None:
return np.mean(x[mask])
assert category_dim == -1
num_classes = mask.shape[-1]
return np.array([np.mean(x[mask[..., c]]) for c in range(num_classes)])
def name(self):
return self.FULL_NAME
class CrossSectionalArea(Metric):
def __call__(self, mask, spacing=None, category_dim: int = None):
pixel_area = np.prod(spacing) if spacing else 1
mask = mask.astype(np.bool)
mask = flatten_non_category_dims(mask, category_dim)
return pixel_area * np.count_nonzero(mask, -1) / 100.0
def name(self):
if self.units:
return "Cross-sectional Area ({})".format(self.units)
else:
return "Cross-sectional Area"
def manifest_to_map(manifest, model_type):
"""Converts a manifest to a map of metric name to metric instance.
Args:
manifest (dict): A dictionary of metric name to metric instance.
Returns:
dict: A dictionary of metric name to metric instance.
"""
# TODO: hacky. Update this
figure_text_key = {}
for manifest_dict in manifest:
try:
key = manifest_dict["Level"]
except BaseException:
key = ".".join((manifest_dict["File"].split("/")[-1]).split(".")[:-1])
muscle_hu = f"{manifest_dict['Hounsfield Unit (muscle)']:.2f}"
muscle_area = f"{manifest_dict['Cross-sectional Area (cm^2) (muscle)']:.2f}"
vat_hu = f"{manifest_dict['Hounsfield Unit (vat)']:.2f}"
vat_area = f"{manifest_dict['Cross-sectional Area (cm^2) (vat)']:.2f}"
sat_hu = f"{manifest_dict['Hounsfield Unit (sat)']:.2f}"
sat_area = f"{manifest_dict['Cross-sectional Area (cm^2) (sat)']:.2f}"
imat_hu = f"{manifest_dict['Hounsfield Unit (imat)']:.2f}"
imat_area = f"{manifest_dict['Cross-sectional Area (cm^2) (imat)']:.2f}"
if model_type.model_name == "abCT_v0.0.1":
figure_text_key[key] = [
muscle_hu,
muscle_area,
imat_hu,
imat_area,
vat_hu,
vat_area,
sat_hu,
sat_area,
]
else:
figure_text_key[key] = [
muscle_hu,
muscle_area,
vat_hu,
vat_area,
sat_hu,
sat_area,
imat_hu,
imat_area,
]
return figure_text_key
| 4,820 | 29.707006 | 84 | py |
Comp2Comp | Comp2Comp-master/comp2comp/muscle_adipose_tissue/data.py | import math
from typing import List, Sequence
import keras.utils as k_utils
import numpy as np
import pydicom
from keras.utils.data_utils import OrderedEnqueuer
from tqdm import tqdm
def parse_windows(windows):
"""Parse windows provided by the user.
These windows can either be strings corresponding to popular windowing
thresholds for CT or tuples of (upper, lower) bounds.
Args:
windows (list): List of strings or tuples.
Returns:
list: List of tuples of (upper, lower) bounds.
"""
windowing = {
"soft": (400, 50),
"bone": (1800, 400),
"liver": (150, 30),
"spine": (250, 50),
"custom": (500, 50),
}
vals = []
for w in windows:
if isinstance(w, Sequence) and len(w) == 2:
assert_msg = "Expected tuple of (lower, upper) bound"
assert len(w) == 2, assert_msg
assert isinstance(w[0], (float, int)), assert_msg
assert isinstance(w[1], (float, int)), assert_msg
assert w[0] < w[1], assert_msg
vals.append(w)
continue
if w not in windowing:
raise KeyError("Window {} not found".format(w))
window_width = windowing[w][0]
window_level = windowing[w][1]
upper = window_level + window_width / 2
lower = window_level - window_width / 2
vals.append((lower, upper))
return tuple(vals)
def _window(xs, bounds):
"""Apply windowing to an array of CT images.
Args:
xs (ndarray): NxHxW
bounds (tuple): (lower, upper) bounds
Returns:
ndarray: Windowed images.
"""
imgs = []
for lb, ub in bounds:
imgs.append(np.clip(xs, a_min=lb, a_max=ub))
if len(imgs) == 1:
return imgs[0]
elif xs.shape[-1] == 1:
return np.concatenate(imgs, axis=-1)
else:
return np.stack(imgs, axis=-1)
class Dataset(k_utils.Sequence):
def __init__(self, files: List[str], batch_size: int = 16, windows=None):
self._files = files
self._batch_size = batch_size
self.windows = windows
def __len__(self):
return math.ceil(len(self._files) / self._batch_size)
def __getitem__(self, idx):
files = self._files[idx * self._batch_size : (idx + 1) * self._batch_size]
dcms = [pydicom.read_file(f, force=True) for f in files]
xs = [(x.pixel_array + int(x.RescaleIntercept)).astype("float32") for x in dcms]
params = [{"spacing": header.PixelSpacing, "image": x} for header, x in zip(dcms, xs)]
# Preprocess xs via windowing.
xs = np.stack(xs, axis=0)
if self.windows:
xs = _window(xs, parse_windows(self.windows))
else:
xs = xs[..., np.newaxis]
return xs, params
def _swap_muscle_imap(xs, ys, muscle_idx: int, imat_idx: int, threshold=-30.0):
"""
If pixel labeled as muscle but has HU < threshold, change label to imat.
Args:
xs (ndarray): NxHxWxC
ys (ndarray): NxHxWxC
muscle_idx (int): Index of the muscle label.
imat_idx (int): Index of the imat label.
threshold (float): Threshold for HU value.
Returns:
ndarray: Segmentation mask with swapped labels.
"""
labels = ys.copy()
muscle_mask = (labels[..., muscle_idx] > 0.5).astype(int)
imat_mask = labels[..., imat_idx]
imat_mask[muscle_mask.astype(np.bool) & (xs < threshold)] = 1
muscle_mask[xs < threshold] = 0
labels[..., muscle_idx] = muscle_mask
labels[..., imat_idx] = imat_mask
return labels
def postprocess(xs: np.ndarray, ys: np.ndarray):
"""Built-in post-processing.
TODO: Make this configurable.
Args:
xs (ndarray): NxHxW
ys (ndarray): NxHxWxC
params (dictionary): Post-processing parameters. Must contain
"categories".
Returns:
ndarray: Post-processed labels.
"""
# Add another channel full of zeros to ys
ys = np.concatenate([ys, np.zeros_like(ys[..., :1])], axis=-1)
# If muscle hu is < -30, assume it is imat.
"""
if "muscle" in categories and "imat" in categories:
ys = _swap_muscle_imap(
xs,
ys,
muscle_idx=categories["muscle"],
imat_idx=categories["imat"],
)
"""
return ys
def predict(
model,
dataset: Dataset,
batch_size: int = 16,
num_workers: int = 1,
max_queue_size: int = 10,
use_multiprocessing: bool = False,
):
"""Predict segmentation masks for a dataset.
Args:
model (keras.Model): Model to use for prediction.
dataset (Dataset): Dataset to predict on.
batch_size (int): Batch size.
num_workers (int): Number of workers.
max_queue_size (int): Maximum queue size.
use_multiprocessing (bool): Use multiprocessing.
use_postprocessing (bool): Use built-in post-processing.
postprocessing_params (dict): Post-processing parameters.
Returns:
List: List of segmentation masks.
"""
if num_workers > 0:
enqueuer = OrderedEnqueuer(dataset, use_multiprocessing=use_multiprocessing, shuffle=False)
enqueuer.start(workers=num_workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
output_generator = iter(dataset)
num_scans = len(dataset)
xs = []
ys = []
params = []
for _ in tqdm(range(num_scans)):
x, p_dicts = next(output_generator)
y = model.predict(x, batch_size=batch_size)
image = np.stack([out["image"] for out in p_dicts], axis=0)
y = postprocess(image, y)
params.extend(p_dicts)
xs.extend([x[i, ...] for i in range(len(x))])
ys.extend([y[i, ...] for i in range(len(y))])
return xs, ys, params
| 5,857 | 26.763033 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/muscle_adipose_tissue/muscle_adipose_tissue.py | import os
from time import perf_counter
from typing import List
import cv2
import h5py
import numpy as np
import pandas as pd
from keras import backend as K
from tqdm import tqdm
from comp2comp.inference_class_base import InferenceClass
from comp2comp.metrics.metrics import CrossSectionalArea, HounsfieldUnits
from comp2comp.models.models import Models
from comp2comp.muscle_adipose_tissue.data import Dataset, predict
class MuscleAdiposeTissueSegmentation(InferenceClass):
"""Muscle adipose tissue segmentation class."""
def __init__(self, batch_size: int, model_name: str, model_dir: str = None):
super().__init__()
self.batch_size = batch_size
self.model_name = model_name
self.model_type = Models.model_from_name(model_name)
def forward_pass_2d(self, files):
dataset = Dataset(files, windows=self.model_type.windows)
num_workers = 1
print("Computing segmentation masks using {}...".format(self.model_name))
start_time = perf_counter()
_, preds, results = predict(
self.model,
dataset,
num_workers=num_workers,
use_multiprocessing=num_workers > 1,
batch_size=self.batch_size,
)
K.clear_session()
print(
f"Completed {len(files)} segmentations in {(perf_counter() - start_time):.2f} seconds."
)
for i in range(len(results)):
results[i]["preds"] = preds[i]
return results
def __call__(self, inference_pipeline):
inference_pipeline.muscle_adipose_tissue_model_type = self.model_type
inference_pipeline.muscle_adipose_tissue_model_name = self.model_name
dicom_file_paths = inference_pipeline.dicom_file_paths
# if dicom_file_names not an attribute of inference_pipeline, add it
if not hasattr(inference_pipeline, "dicom_file_names"):
inference_pipeline.dicom_file_names = [
dicom_file_path.stem for dicom_file_path in dicom_file_paths
]
self.model = self.model_type.load_model(inference_pipeline.model_dir)
results = self.forward_pass_2d(dicom_file_paths)
images = []
for result in results:
images.append(result["image"])
preds = []
for result in results:
preds.append(result["preds"])
spacings = []
for result in results:
spacings.append(result["spacing"])
return {"images": images, "preds": preds, "spacings": spacings}
class MuscleAdiposeTissuePostProcessing(InferenceClass):
"""Post-process muscle and adipose tissue segmentation."""
def __init__(self):
super().__init__()
def preds_to_mask(self, preds):
"""Convert model predictions to a mask.
Args:
preds (np.ndarray): Model predictions.
Returns:
np.ndarray: Mask.
"""
if self.use_softmax:
# softmax
labels = np.zeros_like(preds, dtype=np.uint8)
l_argmax = np.argmax(preds, axis=-1)
for c in range(labels.shape[-1]):
labels[l_argmax == c, c] = 1
return labels.astype(np.bool)
else:
# sigmoid
return preds >= 0.5
def __call__(self, inference_pipeline, images, preds, spacings):
"""Post-process muscle and adipose tissue segmentation."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.use_softmax = self.model_type.use_softmax
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
return self.post_process(images, preds, spacings)
def remove_small_objects(self, mask, min_size=10):
mask = mask.astype(np.uint8)
components, output, stats, centroids = cv2.connectedComponentsWithStats(
mask, connectivity=8
)
sizes = stats[1:, -1]
mask = np.zeros((output.shape))
for i in range(0, components - 1):
if sizes[i] >= min_size:
mask[output == i + 1] = 1
return mask
def post_process(
self,
images,
preds,
spacings,
):
categories = self.model_type.categories
start_time = perf_counter()
masks = [self.preds_to_mask(p) for p in preds]
for i, _ in enumerate(masks):
# Keep only channels from the model_type categories dict
masks[i] = masks[i][..., [categories[cat] for cat in categories]]
masks = self.fill_holes(masks)
cats = list(categories.keys())
file_idx = 0
for mask, image in tqdm(zip(masks, images), total=len(masks)):
muscle_mask = mask[..., cats.index("muscle")]
imat_mask = mask[..., cats.index("imat")]
imat_mask = (
np.logical_and((image * muscle_mask) <= -30, (image * muscle_mask) >= -190)
).astype(int)
imat_mask = self.remove_small_objects(imat_mask)
mask[..., cats.index("imat")] += imat_mask
mask[..., cats.index("muscle")][imat_mask == 1] = 0
masks[file_idx] = mask
images[file_idx] = image
file_idx += 1
print(f"Completed post-processing in {(perf_counter() - start_time):.2f} seconds.")
return {"images": images, "masks": masks, "spacings": spacings}
# function that fills in holes in a segmentation mask
def _fill_holes(self, mask: np.ndarray, mask_id: int):
"""Fill in holes in a segmentation mask.
Args:
mask (ndarray): NxHxW
mask_id (int): Label of the mask.
Returns:
ndarray: Filled mask.
"""
int_mask = ((1 - mask) > 0.5).astype(np.int8)
components, output, stats, _ = cv2.connectedComponentsWithStats(int_mask, connectivity=8)
sizes = stats[1:, -1]
components = components - 1
# Larger threshold for SAT
# TODO make this configurable / parameter
if mask_id == 2:
min_size = 200
else:
# min_size = 50 # Smaller threshold for everything else
min_size = 20
img_out = np.ones_like(mask)
for i in range(0, components):
if sizes[i] > min_size:
img_out[output == i + 1] = 0
return img_out
def fill_holes(self, ys: List):
"""Take an array of size NxHxWxC and for each channel fill in holes.
Args:
ys (list): List of segmentation masks.
"""
segs = []
for n in range(len(ys)):
ys_out = [self._fill_holes(ys[n][..., i], i) for i in range(ys[n].shape[-1])]
segs.append(np.stack(ys_out, axis=2).astype(float))
return segs
class MuscleAdiposeTissueComputeMetrics(InferenceClass):
"""Compute muscle and adipose tissue metrics."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline, images, masks, spacings):
"""Compute muscle and adipose tissue metrics."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
metrics = self.compute_metrics_all(images, masks, spacings)
return metrics
def compute_metrics_all(self, images, masks, spacings):
"""Compute metrics for all images and masks.
Args:
images (List[np.ndarray]): Images.
masks (List[np.ndarray]): Masks.
Returns:
Dict: Results.
"""
results = []
for image, mask, spacing in zip(images, masks, spacings):
results.append(self.compute_metrics(image, mask, spacing))
return {"images": images, "results": results}
def compute_metrics(self, x, mask, spacing):
"""Compute results for a given segmentation."""
categories = self.model_type.categories
hu = HounsfieldUnits()
csa_units = "cm^2" if spacing else ""
csa = CrossSectionalArea(csa_units)
hu_vals = hu(mask, x, category_dim=-1)
csa_vals = csa(mask=mask, spacing=spacing, category_dim=-1)
assert mask.shape[-1] == len(
categories
), "{} categories found in mask, " "but only {} categories specified".format(
mask.shape[-1], len(categories)
)
results = {
cat: {
"mask": mask[..., idx],
hu.name(): hu_vals[idx],
csa.name(): csa_vals[idx],
}
for idx, cat in enumerate(categories.keys())
}
return results
class MuscleAdiposeTissueH5Saver(InferenceClass):
"""Save results to an HDF5 file."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline, results):
"""Save results to an HDF5 file."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
self.output_dir = inference_pipeline.output_dir
self.h5_output_dir = os.path.join(self.output_dir, "segmentations")
os.makedirs(self.h5_output_dir, exist_ok=True)
self.dicom_file_paths = inference_pipeline.dicom_file_paths
self.dicom_file_names = inference_pipeline.dicom_file_names
self.save_results(results)
return {"results": results}
def save_results(self, results):
"""Save results to an HDF5 file."""
categories = self.model_type.categories
cats = list(categories.keys())
for i, result in enumerate(results):
file_name = self.dicom_file_names[i]
with h5py.File(os.path.join(self.h5_output_dir, file_name + ".h5"), "w") as f:
for cat in cats:
mask = result[cat]["mask"]
f.create_dataset(name=cat, data=np.array(mask, dtype=np.uint8))
class MuscleAdiposeTissueMetricsSaver(InferenceClass):
"""Save metrics to a CSV file."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline, results):
"""Save metrics to a CSV file."""
self.model_type = inference_pipeline.muscle_adipose_tissue_model_type
self.model_name = inference_pipeline.muscle_adipose_tissue_model_name
self.output_dir = inference_pipeline.output_dir
self.csv_output_dir = os.path.join(self.output_dir, "metrics")
os.makedirs(self.csv_output_dir, exist_ok=True)
self.dicom_file_paths = inference_pipeline.dicom_file_paths
self.dicom_file_names = inference_pipeline.dicom_file_names
self.save_results(results)
return {}
def save_results(self, results):
"""Save results to a CSV file."""
categories = self.model_type.categories
cats = list(categories.keys())
df = pd.DataFrame(
columns=[
"File Name",
"File Path",
"Muscle HU",
"Muscle CSA (cm^2)",
"IMAT HU",
"IMAT CSA (cm^2)",
"SAT HU",
"SAT CSA (cm^2)",
"VAT HU",
"VAT CSA (cm^2)",
]
)
for i, result in enumerate(results):
row = []
row.append(self.dicom_file_names[i])
row.append(self.dicom_file_paths[i])
for cat in cats:
row.append(result[cat]["Hounsfield Unit"])
row.append(result[cat]["Cross-sectional Area (cm^2)"])
df.loc[i] = row
df.to_csv(
os.path.join(self.csv_output_dir, "muscle_adipose_tissue_metrics.csv"), index=False
)
| 11,794 | 34.42042 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/muscle_adipose_tissue/muscle_adipose_tissue_visualization.py | """
@author: louisblankemeier
"""
import os
from pathlib import Path
import numpy as np
from comp2comp.inference_class_base import InferenceClass
from comp2comp.visualization.detectron_visualizer import Visualizer
class MuscleAdiposeTissueVisualizer(InferenceClass):
def __init__(self):
super().__init__()
self._spine_colors = {
"L5": [255, 0, 0],
"L4": [0, 255, 0],
"L3": [255, 255, 0],
"L2": [255, 128, 0],
"L1": [0, 255, 255],
"T12": [255, 0, 255],
}
self._muscle_fat_colors = {
"muscle": [255, 136, 133],
"imat": [154, 135, 224],
"vat": [140, 197, 135],
"sat": [246, 190, 129],
}
self._SPINE_TEXT_OFFSET_FROM_TOP = 10.0
self._SPINE_TEXT_OFFSET_FROM_RIGHT = 63.0
self._SPINE_TEXT_VERTICAL_SPACING = 14.0
self._MUSCLE_FAT_TEXT_HORIZONTAL_SPACING = 40.0
self._MUSCLE_FAT_TEXT_VERTICAL_SPACING = 14.0
self._MUSCLE_FAT_TEXT_OFFSET_FROM_TOP = 22.0
self._MUSCLE_FAT_TEXT_OFFSET_FROM_RIGHT = 181.0
def __call__(self, inference_pipeline, images, results):
self.output_dir = inference_pipeline.output_dir
self.dicom_file_names = inference_pipeline.dicom_file_names
# if spine is an attribute of the inference pipeline, use it
if not hasattr(inference_pipeline, "spine"):
spine = False
else:
spine = True
for i, (image, result) in enumerate(zip(images, results)):
# now, result is a dict with keys for each tissue
dicom_file_name = self.dicom_file_names[i]
self.save_binary_segmentation_overlay(image, result, dicom_file_name, spine)
# pass along for next class in pipeline
return {"results": results}
def save_binary_segmentation_overlay(self, image, result, dicom_file_name, spine):
file_name = dicom_file_name + ".png"
img_in = image
assert img_in.shape == (512, 512), "Image shape is not 512 x 512"
img_in = np.clip(img_in, -300, 1800)
img_in = self.normalize_img(img_in) * 255.0
# Create the folder to save the images
images_base_path = Path(self.output_dir) / "images"
images_base_path.mkdir(exist_ok=True)
text_start_vertical_offset = self._MUSCLE_FAT_TEXT_OFFSET_FROM_TOP
img_in = img_in.reshape((img_in.shape[0], img_in.shape[1], 1))
img_rgb = np.tile(img_in, (1, 1, 3))
vis = Visualizer(img_rgb)
vis.draw_text(
text="Density (HU)",
position=(
img_in.shape[1] - self._MUSCLE_FAT_TEXT_OFFSET_FROM_RIGHT - 63,
text_start_vertical_offset,
),
color=[1, 1, 1],
font_size=9,
horizontal_alignment="left",
)
vis.draw_text(
text="Area (CM²)",
position=(
img_in.shape[1] - self._MUSCLE_FAT_TEXT_OFFSET_FROM_RIGHT - 63,
text_start_vertical_offset + self._MUSCLE_FAT_TEXT_VERTICAL_SPACING,
),
color=[1, 1, 1],
font_size=9,
horizontal_alignment="left",
)
if spine:
spine_color = np.array(self._spine_colors[dicom_file_name]) / 255.0
vis.draw_box(
box_coord=(1, 1, img_in.shape[0] - 1, img_in.shape[1] - 1),
alpha=1,
edge_color=spine_color,
)
# draw the level T12 - L5 in the upper left corner
if dicom_file_name == "T12":
position = (40, 15)
else:
position = (30, 15)
vis.draw_text(text=dicom_file_name, position=position, color=spine_color, font_size=24)
for idx, tissue in enumerate(result.keys()):
alpha_val = 0.9
color = np.array(self._muscle_fat_colors[tissue]) / 255.0
edge_color = color
mask = result[tissue]["mask"]
vis.draw_binary_mask(
mask, color=color, edge_color=edge_color, alpha=alpha_val, area_threshold=0
)
hu_val = round(result[tissue]["Hounsfield Unit"])
area_val = round(result[tissue]["Cross-sectional Area (cm^2)"])
vis.draw_text(
text=tissue,
position=(
mask.shape[1]
- self._MUSCLE_FAT_TEXT_OFFSET_FROM_RIGHT
+ self._MUSCLE_FAT_TEXT_HORIZONTAL_SPACING * (idx + 1),
text_start_vertical_offset - self._MUSCLE_FAT_TEXT_VERTICAL_SPACING,
),
color=color,
font_size=9,
horizontal_alignment="center",
)
vis.draw_text(
text=hu_val,
position=(
mask.shape[1]
- self._MUSCLE_FAT_TEXT_OFFSET_FROM_RIGHT
+ self._MUSCLE_FAT_TEXT_HORIZONTAL_SPACING * (idx + 1),
text_start_vertical_offset,
),
color=color,
font_size=9,
horizontal_alignment="center",
)
vis.draw_text(
text=area_val,
position=(
mask.shape[1]
- self._MUSCLE_FAT_TEXT_OFFSET_FROM_RIGHT
+ self._MUSCLE_FAT_TEXT_HORIZONTAL_SPACING * (idx + 1),
text_start_vertical_offset + self._MUSCLE_FAT_TEXT_VERTICAL_SPACING,
),
color=color,
font_size=9,
horizontal_alignment="center",
)
vis_obj = vis.get_output()
vis_obj.save(os.path.join(images_base_path, file_name))
def normalize_img(self, img: np.ndarray) -> np.ndarray:
"""Normalize the image.
Args:
img (np.ndarray): Input image.
Returns:
np.ndarray: Normalized image.
"""
return (img - img.min()) / (img.max() - img.min())
| 6,119 | 33.772727 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/visualization/dicom.py | import os
from pathlib import Path
import numpy as np
import pydicom
from PIL import Image
from pydicom.dataset import Dataset, FileMetaDataset
from pydicom.uid import ExplicitVRLittleEndian
def to_dicom(input, output_path, plane="axial"):
"""Converts a png image to a dicom image. Written with assistance from ChatGPT."""
if isinstance(input, str) or isinstance(input, Path):
png_path = input
dicom_path = os.path.join(output_path, os.path.basename(png_path).replace(".png", ".dcm"))
image = Image.open(png_path)
image_array = np.array(image)
image_array = image_array[:, :, :3]
else:
image_array = input
dicom_path = output_path
meta = FileMetaDataset()
meta.MediaStorageSOPClassUID = "1.2.840.10008.5.1.4.1.1.7"
meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid()
meta.TransferSyntaxUID = ExplicitVRLittleEndian
meta.ImplementationClassUID = pydicom.uid.PYDICOM_IMPLEMENTATION_UID
ds = Dataset()
ds.file_meta = meta
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.SOPClassUID = "1.2.840.10008.5.1.4.1.1.7"
ds.SOPInstanceUID = pydicom.uid.generate_uid()
ds.PatientName = "John Doe"
ds.PatientID = "123456"
ds.Modality = "OT"
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
ds.StudyInstanceUID = pydicom.uid.generate_uid()
ds.FrameOfReferenceUID = pydicom.uid.generate_uid()
ds.BitsAllocated = 8
ds.BitsStored = 8
ds.HighBit = 7
ds.PhotometricInterpretation = "RGB"
ds.PixelRepresentation = 0
ds.Rows = image_array.shape[0]
ds.Columns = image_array.shape[1]
ds.SamplesPerPixel = 3
ds.PlanarConfiguration = 0
if plane.lower() == "axial":
ds.ImageOrientationPatient = [1, 0, 0, 0, 1, 0]
elif plane.lower() == "sagittal":
ds.ImageOrientationPatient = [0, 1, 0, 0, 0, -1]
elif plane.lower() == "coronal":
ds.ImageOrientationPatient = [1, 0, 0, 0, 0, -1]
else:
raise ValueError("Invalid plane value. Must be 'axial', 'sagittal', or 'coronal'.")
ds.PixelData = image_array.tobytes()
pydicom.filewriter.write_file(dicom_path, ds, write_like_original=False)
# Example usage
if __name__ == "__main__":
png_path = "../../figures/spine_example.png"
output_path = "./"
plane = "sagittal"
to_dicom(png_path, output_path, plane)
| 2,389 | 33.142857 | 98 | py |
Comp2Comp | Comp2Comp-master/comp2comp/visualization/linear_planar_reformation.py | """
@author: louisblankemeier
"""
import numpy as np
def linear_planar_reformation(
medical_volume: np.ndarray, segmentation: np.ndarray, centroids, dimension="axial"
):
if dimension == "sagittal" or dimension == "coronal":
centroids = sorted(centroids, key=lambda x: x[2])
elif dimension == "axial":
centroids = sorted(centroids, key=lambda x: x[0])
centroids = [(int(x[0]), int(x[1]), int(x[2])) for x in centroids]
sagittal_centroids = [centroids[i][0] for i in range(0, len(centroids))]
coronal_centroids = [centroids[i][1] for i in range(0, len(centroids))]
axial_centroids = [centroids[i][2] for i in range(0, len(centroids))]
sagittal_vals, coronal_vals, axial_vals = [], [], []
if dimension == "sagittal":
sagittal_vals = [sagittal_centroids[0]] * axial_centroids[0]
if dimension == "coronal":
coronal_vals = [coronal_centroids[0]] * axial_centroids[0]
if dimension == "axial":
axial_vals = [axial_centroids[0]] * sagittal_centroids[0]
for i in range(1, len(axial_centroids)):
if dimension == "sagittal" or dimension == "coronal":
num = axial_centroids[i] - axial_centroids[i - 1]
elif dimension == "axial":
num = sagittal_centroids[i] - sagittal_centroids[i - 1]
if dimension == "sagittal":
interp = list(np.linspace(sagittal_centroids[i - 1], sagittal_centroids[i], num=num))
sagittal_vals.extend(interp)
if dimension == "coronal":
interp = list(np.linspace(coronal_centroids[i - 1], coronal_centroids[i], num=num))
coronal_vals.extend(interp)
if dimension == "axial":
interp = list(np.linspace(axial_centroids[i - 1], axial_centroids[i], num=num))
axial_vals.extend(interp)
if dimension == "sagittal":
sagittal_vals.extend(
[sagittal_centroids[-1]] * (medical_volume.shape[2] - len(sagittal_vals))
)
sagittal_vals = np.array(sagittal_vals)
sagittal_vals = sagittal_vals.astype(int)
if dimension == "coronal":
coronal_vals.extend([coronal_centroids[-1]] * (medical_volume.shape[2] - len(coronal_vals)))
coronal_vals = np.array(coronal_vals)
coronal_vals = coronal_vals.astype(int)
if dimension == "axial":
axial_vals.extend([axial_centroids[-1]] * (medical_volume.shape[0] - len(axial_vals)))
axial_vals = np.array(axial_vals)
axial_vals = axial_vals.astype(int)
if dimension == "sagittal":
sagittal_image = medical_volume[sagittal_vals, :, range(len(sagittal_vals))]
sagittal_label = segmentation[sagittal_vals, :, range(len(sagittal_vals))]
if dimension == "coronal":
coronal_image = medical_volume[:, coronal_vals, range(len(coronal_vals))]
coronal_label = segmentation[:, coronal_vals, range(len(coronal_vals))]
if dimension == "axial":
axial_image = medical_volume[range(len(axial_vals)), :, axial_vals]
axial_label = segmentation[range(len(axial_vals)), :, axial_vals]
if dimension == "sagittal":
return sagittal_image, sagittal_label
if dimension == "coronal":
return coronal_image, coronal_label
if dimension == "axial":
return axial_image, axial_label
| 3,316 | 37.126437 | 100 | py |
Comp2Comp | Comp2Comp-master/comp2comp/visualization/detectron_visualizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
import os
from enum import Enum, unique
from pathlib import Path
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from comp2comp.utils.colormap import random_color
from comp2comp.visualization.dicom import to_dicom
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (
height,
width,
), f"mask shape: {m.shape}, target dims: {height}, {width}"
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = (
segment_ids[sorted_idxs],
areas[sorted_idxs],
)
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(lbl, s * 100) for lbl, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [lbl + ("|crowd" if crowd else "") for lbl, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
self.fig = fig
self.ax = ax
self.reset_image(img)
def reset_image(self, img):
"""
Args:
img: same as in __init__
"""
img = img.astype("uint8")
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
# if filepath is a png or jpg
img = self.get_image()
if filepath.endswith(".png") or filepath.endswith(".jpg"):
self.fig.savefig(filepath)
if filepath.endswith(".dcm"):
to_dicom(img, Path(filepath))
return img
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
# if metadata is None:
# metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids,
scores,
self.metadata.thing_classes,
[x.get("iscrowd", 0) for x in sinfo],
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (
x0,
y0,
) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i],
edge_color=colors[i],
label=labels[i] if labels is not None else None,
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > self.keypoint_threshold:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line(
[nose_x, mid_shoulder_x],
[nose_y, mid_shoulder_y],
color=_RED,
)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line(
[mid_hip_x, mid_shoulder_x],
[mid_hip_y, mid_shoulder_y],
color=_RED,
)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0,
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={
"facecolor": "black",
"alpha": 0.8,
"pad": 0.7,
"edgecolor": "none",
},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [
(-w / 2, h / 2),
(-w / 2, -h / 2),
(w / 2, -h / 2),
(w / 2, h / 2),
]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(
label,
text_pos,
color=label_color,
font_size=font_size,
rotation=angle,
)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=False, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self,
binary_mask,
color=None,
*,
edge_color=None,
text=None,
alpha=0.5,
area_threshold=10,
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component smaller than this area will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
"""
Args:
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
shape2d = (soft_mask.shape[0], soft_mask.shape[1])
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = soft_mask * alpha
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
binary_mask = (soft_mask > 0.5).astype("uint8")
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is not None:
"""
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
"""
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _draw_text_in_mask(self, binary_mask, text, color):
"""
Find proper places to draw text given a binary mask.
"""
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
if stats[1:, -1].size == 0:
return
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=color)
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
| 48,577 | 38.526444 | 100 | py |
Comp2Comp | Comp2Comp-master/comp2comp/spine/spine_visualization.py | """
@author: louisblankemeier
"""
import os
from pathlib import Path
from typing import Union
import numpy as np
from comp2comp.visualization.detectron_visualizer import Visualizer
def spine_binary_segmentation_overlay(
img_in: Union[str, Path],
mask: Union[str, Path],
base_path: Union[str, Path],
file_name: str,
figure_text_key=None,
spine_hus=None,
spine=True,
model_type=None,
pixel_spacing=None,
):
"""Save binary segmentation overlay.
Args:
img_in (Union[str, Path]): Path to the input image.
mask (Union[str, Path]): Path to the mask.
base_path (Union[str, Path]): Path to the output directory.
file_name (str): Output file name.
centroids (list, optional): List of centroids. Defaults to None.
figure_text_key (dict, optional): Figure text key. Defaults to None.
spine_hus (list, optional): List of HU values. Defaults to None.
spine (bool, optional): Spine flag. Defaults to True.
model_type (Models): Model type. Defaults to None.
"""
_COLORS = (
np.array(
[
1.000,
0.000,
0.000,
0.000,
1.000,
0.000,
1.000,
1.000,
0.000,
1.000,
0.500,
0.000,
0.000,
1.000,
1.000,
1.000,
0.000,
1.000,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
label_map = {"L5": 0, "L4": 1, "L3": 2, "L2": 3, "L1": 4, "T12": 5}
_ROI_COLOR = np.array([1.000, 0.340, 0.200])
_SPINE_TEXT_OFFSET_FROM_TOP = 10.0
_SPINE_TEXT_OFFSET_FROM_RIGHT = 63.0
_SPINE_TEXT_VERTICAL_SPACING = 14.0
img_in = np.clip(img_in, -300, 1800)
img_in = normalize_img(img_in) * 255.0
images_base_path = Path(base_path) / "images"
images_base_path.mkdir(exist_ok=True)
img_in = img_in.reshape((img_in.shape[0], img_in.shape[1], 1))
img_rgb = np.tile(img_in, (1, 1, 3))
vis = Visualizer(img_rgb)
levels = list(spine_hus.keys())
levels.reverse()
num_levels = len(levels)
# draw seg masks
for i, level in enumerate(levels):
color = _COLORS[label_map[level]]
edge_color = None
alpha_val = 0.2
vis.draw_binary_mask(
mask[:, :, i].astype(int),
color=color,
edge_color=edge_color,
alpha=alpha_val,
area_threshold=0,
)
# draw rois
for i, _ in enumerate(levels):
color = _ROI_COLOR
edge_color = color
vis.draw_binary_mask(
mask[:, :, num_levels + i].astype(int),
color=color,
edge_color=edge_color,
alpha=alpha_val,
area_threshold=0,
)
# draw text and lines
for i, level in enumerate(levels):
vis.draw_text(
text=f"{level}: {round(float(spine_hus[level]))}",
position=(
mask.shape[1] - _SPINE_TEXT_OFFSET_FROM_RIGHT,
_SPINE_TEXT_VERTICAL_SPACING * i + _SPINE_TEXT_OFFSET_FROM_TOP,
),
color=_COLORS[label_map[level]],
font_size=9,
horizontal_alignment="left",
)
"""
vis.draw_line(
x_data=(0, mask.shape[1] - 1),
y_data=(
int(
inferior_superior_centers[num_levels - i - 1]
* (pixel_spacing[2] / pixel_spacing[1])
),
int(
inferior_superior_centers[num_levels - i - 1]
* (pixel_spacing[2] / pixel_spacing[1])
),
),
color=_COLORS[label_map[level]],
linestyle="dashed",
linewidth=0.25,
)
"""
vis_obj = vis.get_output()
img = vis_obj.save(os.path.join(images_base_path, file_name))
return img
def normalize_img(img: np.ndarray) -> np.ndarray:
"""Normalize the image.
Args:
img (np.ndarray): Input image.
Returns:
np.ndarray: Normalized image.
"""
return (img - img.min()) / (img.max() - img.min())
| 4,315 | 26.666667 | 79 | py |
Comp2Comp | Comp2Comp-master/comp2comp/spine/spine.py | """
@author: louisblankemeier
"""
import math
import os
import shutil
import zipfile
from pathlib import Path
from time import time
from typing import Union
import nibabel as nib
import numpy as np
import pandas as pd
import wget
from PIL import Image
from totalsegmentator.libs import (
download_pretrained_weights,
nostdout,
setup_nnunet,
)
from comp2comp.inference_class_base import InferenceClass
from comp2comp.models.models import Models
from comp2comp.spine import spine_utils
from comp2comp.visualization.dicom import to_dicom
class SpineSegmentation(InferenceClass):
"""Spine segmentation."""
def __init__(self, model_name, save=True):
super().__init__()
self.model_name = model_name
self.save_segmentations = save
def __call__(self, inference_pipeline):
# inference_pipeline.dicom_series_path = self.input_path
self.output_dir = inference_pipeline.output_dir
self.output_dir_segmentations = os.path.join(self.output_dir, "segmentations/")
if not os.path.exists(self.output_dir_segmentations):
os.makedirs(self.output_dir_segmentations)
self.model_dir = inference_pipeline.model_dir
seg, mv = self.spine_seg(
os.path.join(self.output_dir_segmentations, "converted_dcm.nii.gz"),
self.output_dir_segmentations + "spine.nii.gz",
inference_pipeline.model_dir,
)
inference_pipeline.segmentation = seg
inference_pipeline.medical_volume = mv
inference_pipeline.save_segmentations = self.save_segmentations
return {}
def setup_nnunet_c2c(self, model_dir: Union[str, Path]):
"""Adapted from TotalSegmentator."""
model_dir = Path(model_dir)
config_dir = model_dir / Path("." + self.model_name)
(config_dir / "nnunet/results/nnUNet/3d_fullres").mkdir(exist_ok=True, parents=True)
(config_dir / "nnunet/results/nnUNet/2d").mkdir(exist_ok=True, parents=True)
weights_dir = config_dir / "nnunet/results"
self.weights_dir = weights_dir
os.environ["nnUNet_raw_data_base"] = str(
weights_dir
) # not needed, just needs to be an existing directory
os.environ["nnUNet_preprocessed"] = str(
weights_dir
) # not needed, just needs to be an existing directory
os.environ["RESULTS_FOLDER"] = str(weights_dir)
def download_spine_model(self, model_dir: Union[str, Path]):
download_dir = Path(
os.path.join(
self.weights_dir,
"nnUNet/3d_fullres/Task252_Spine/nnUNetTrainerV2_ep4000_nomirror__nnUNetPlansv2.1",
)
)
fold_0_path = download_dir / "fold_0"
if not os.path.exists(fold_0_path):
download_dir.mkdir(parents=True, exist_ok=True)
wget.download(
"https://huggingface.co/louisblankemeier/spine_v1/resolve/main/fold_0.zip",
out=os.path.join(download_dir, "fold_0.zip"),
)
with zipfile.ZipFile(os.path.join(download_dir, "fold_0.zip"), "r") as zip_ref:
zip_ref.extractall(download_dir)
os.remove(os.path.join(download_dir, "fold_0.zip"))
wget.download(
"https://huggingface.co/louisblankemeier/spine_v1/resolve/main/plans.pkl",
out=os.path.join(download_dir, "plans.pkl"),
)
print("Spine model downloaded.")
else:
print("Spine model already downloaded.")
def spine_seg(self, input_path: Union[str, Path], output_path: Union[str, Path], model_dir):
"""Run spine segmentation.
Args:
input_path (Union[str, Path]): Input path.
output_path (Union[str, Path]): Output path.
"""
print("Segmenting spine...")
st = time()
os.environ["SCRATCH"] = self.model_dir
# Setup nnunet
model = "3d_fullres"
folds = [0]
trainer = "nnUNetTrainerV2_ep4000_nomirror"
crop_path = None
task_id = [252]
if self.model_name == "ts_spine":
setup_nnunet()
download_pretrained_weights(task_id[0])
elif self.model_name == "stanford_spine_v0.0.1":
self.setup_nnunet_c2c(model_dir)
self.download_spine_model(model_dir)
else:
raise ValueError("Invalid model name.")
if not self.save_segmentations:
output_path = None
from totalsegmentator.nnunet import nnUNet_predict_image
with nostdout():
img, seg = nnUNet_predict_image(
input_path,
output_path,
task_id,
model=model,
folds=folds,
trainer=trainer,
tta=False,
multilabel_image=True,
resample=1.5,
crop=None,
crop_path=crop_path,
task_name="total",
nora_tag="None",
preview=False,
nr_threads_resampling=1,
nr_threads_saving=6,
quiet=False,
verbose=False,
test=0,
)
end = time()
# Log total time for spine segmentation
print(f"Total time for spine segmentation: {end-st:.2f}s.")
if self.model_name == "stanford_spine_v0.0.1":
seg_data = seg.get_fdata()
# subtract 17 from seg values except for 0
seg_data = np.where(seg_data == 0, 0, seg_data - 17)
seg = nib.Nifti1Image(seg_data, seg.affine, seg.header)
return seg, img
class AxialCropper(InferenceClass):
"""Crop the CT image (medical_volume) and segmentation based on user-specified
lower and upper levels of the spine.
"""
def __init__(self, lower_level: str = "L5", upper_level: str = "L1", save=True):
"""
Args:
lower_level (str, optional): Lower level of the spine. Defaults to "L5".
upper_level (str, optional): Upper level of the spine. Defaults to "L1".
save (bool, optional): Save cropped image and segmentation. Defaults to True.
Raises:
ValueError: If lower_level or upper_level is not a valid spine level.
"""
super().__init__()
self.lower_level = lower_level
self.upper_level = upper_level
ts_spine_full_model = Models.model_from_name("ts_spine_full")
categories = ts_spine_full_model.categories
try:
self.lower_level_index = categories[self.lower_level]
self.upper_level_index = categories[self.upper_level]
except KeyError:
raise ValueError("Invalid spine level.") from None
self.save = save
def __call__(self, inference_pipeline):
"""
First dim goes from L to R.
Second dim goes from P to A.
Third dim goes from I to S.
"""
segmentation = inference_pipeline.segmentation
segmentation_data = segmentation.get_fdata()
upper_level_index = np.where(segmentation_data == self.upper_level_index)[2].max()
lower_level_index = np.where(segmentation_data == self.lower_level_index)[2].min()
segmentation = segmentation.slicer[:, :, lower_level_index:upper_level_index]
inference_pipeline.segmentation = segmentation
medical_volume = inference_pipeline.medical_volume
medical_volume = medical_volume.slicer[:, :, lower_level_index:upper_level_index]
inference_pipeline.medical_volume = medical_volume
if self.save:
nib.save(
segmentation,
os.path.join(inference_pipeline.output_dir, "segmentations", "spine.nii.gz"),
)
nib.save(
medical_volume,
os.path.join(
inference_pipeline.output_dir, "segmentations", "converted_dcm.nii.gz"
),
)
return {}
class SpineComputeROIs(InferenceClass):
def __init__(self, spine_model):
super().__init__()
self.spine_model_name = spine_model
self.spine_model_type = Models.model_from_name(self.spine_model_name)
def __call__(self, inference_pipeline):
# Compute ROIs
inference_pipeline.spine_model_type = self.spine_model_type
(spine_hus, rois, centroids_3d) = spine_utils.compute_rois(
inference_pipeline.segmentation,
inference_pipeline.medical_volume,
self.spine_model_type,
)
inference_pipeline.spine_hus = spine_hus
inference_pipeline.rois = rois
inference_pipeline.centroids_3d = centroids_3d
return {}
class SpineMetricsSaver(InferenceClass):
"""Save metrics to a CSV file."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
"""Save metrics to a CSV file."""
self.spine_hus = inference_pipeline.spine_hus
self.output_dir = inference_pipeline.output_dir
self.csv_output_dir = os.path.join(self.output_dir, "metrics")
if not os.path.exists(self.csv_output_dir):
os.makedirs(self.csv_output_dir, exist_ok=True)
self.save_results()
return {}
def save_results(self):
"""Save results to a CSV file."""
df = pd.DataFrame(columns=["Level", "ROI HU"])
for i, level in enumerate(self.spine_hus):
hu = self.spine_hus[level]
row = [level, hu]
df.loc[i] = row
df.to_csv(os.path.join(self.csv_output_dir, "spine_metrics.csv"), index=False)
class SpineFindDicoms(InferenceClass):
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
dicom_files, names, inferior_superior_centers = spine_utils.find_spine_dicoms(
inference_pipeline.centroids_3d,
inference_pipeline.dicom_series_path,
list(inference_pipeline.rois.keys()),
)
dicom_files = [Path(d) for d in dicom_files]
inference_pipeline.dicom_file_paths = dicom_files
inference_pipeline.names = names
inference_pipeline.dicom_file_names = names
inference_pipeline.inferior_superior_centers = inferior_superior_centers
return {}
class SpineCoronalSagittalVisualizer(InferenceClass):
def __init__(self, format="png"):
super().__init__()
self.format = format
def __call__(self, inference_pipeline):
output_path = inference_pipeline.output_dir
spine_model_type = inference_pipeline.spine_model_type
img_sagittal, img_coronal = spine_utils.visualize_coronal_sagittal_spine(
inference_pipeline.segmentation.get_fdata(),
list(inference_pipeline.rois.values()),
inference_pipeline.medical_volume.get_fdata(),
list(inference_pipeline.centroids_3d.values()),
output_path,
spine_hus=inference_pipeline.spine_hus,
model_type=spine_model_type,
pixel_spacing=inference_pipeline.pixel_spacing_list,
format=self.format,
)
inference_pipeline.spine_vis_sagittal = img_sagittal
inference_pipeline.spine_vis_coronal = img_coronal
inference_pipeline.spine = True
if not inference_pipeline.save_segmentations:
shutil.rmtree(os.path.join(output_path, "segmentations"))
return {}
class SpineReport(InferenceClass):
def __init__(self, format="png"):
super().__init__()
self.format = format
def __call__(self, inference_pipeline):
sagittal_image = inference_pipeline.spine_vis_sagittal
coronal_image = inference_pipeline.spine_vis_coronal
# concatenate these numpy arrays laterally
img = np.concatenate((coronal_image, sagittal_image), axis=1)
output_path = os.path.join(inference_pipeline.output_dir, "images", "spine_report")
if self.format == "png":
im = Image.fromarray(img)
im.save(output_path + ".png")
elif self.format == "dcm":
to_dicom(img, output_path + ".dcm")
return {}
class SpineMuscleAdiposeTissueReport(InferenceClass):
"""Spine muscle adipose tissue report class."""
def __init__(self):
super().__init__()
self.image_files = [
"spine_coronal.png",
"spine_sagittal.png",
"T12.png",
"L1.png",
"L2.png",
"L3.png",
"L4.png",
"L5.png",
]
def __call__(self, inference_pipeline):
image_dir = Path(inference_pipeline.output_dir) / "images"
self.generate_panel(image_dir)
return {}
def generate_panel(self, image_dir: Union[str, Path]):
"""Generate panel.
Args:
image_dir (Union[str, Path]): Path to the image directory.
"""
image_files = [os.path.join(image_dir, path) for path in self.image_files]
# construct a list which includes only the images that exist
image_files = [path for path in image_files if os.path.exists(path)]
im_cor = Image.open(image_files[0])
im_sag = Image.open(image_files[1])
im_cor_width = int(im_cor.width / im_cor.height * 512)
num_muscle_fat_cols = math.ceil((len(image_files) - 2) / 2)
width = (8 + im_cor_width + 8) + ((512 + 8) * num_muscle_fat_cols)
height = 1048
new_im = Image.new("RGB", (width, height))
index = 2
for j in range(8, height, 520):
for i in range(8 + im_cor_width + 8, width, 520):
try:
im = Image.open(image_files[index])
im.thumbnail((512, 512))
new_im.paste(im, (i, j))
index += 1
im.close()
except Exception:
continue
im_cor.thumbnail((im_cor_width, 512))
new_im.paste(im_cor, (8, 8))
im_sag.thumbnail((im_cor_width, 512))
new_im.paste(im_sag, (8, 528))
new_im.save(os.path.join(image_dir, "spine_muscle_adipose_tissue_report.png"))
im_cor.close()
im_sag.close()
new_im.close()
| 14,371 | 34.574257 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/spine/spine_utils.py | """
@author: louisblankemeier
"""
import logging
import math
from glob import glob
from typing import Dict, List
import cv2
import numpy as np
from pydicom.filereader import dcmread
from scipy.ndimage import zoom
from comp2comp.spine import spine_visualization
def find_spine_dicoms(centroids: Dict, path: str, levels):
"""Find the dicom files corresponding to the spine T12 - L5 levels."""
vertical_positions = []
for level in centroids:
centroid = centroids[level]
vertical_positions.append(round(centroid[2]))
dicom_files = []
ipps = []
for dicom_path in glob(path + "/*.dcm"):
ipp = dcmread(dicom_path).ImagePositionPatient
ipps.append(ipp[2])
dicom_files.append(dicom_path)
dicom_files = [x for _, x in sorted(zip(ipps, dicom_files))]
dicom_files = list(np.array(dicom_files)[vertical_positions])
return (dicom_files, levels, vertical_positions)
# Function that takes a numpy array as input, computes the
# sagittal centroid of each label and returns a list of the
# centroids
def compute_centroids(seg: np.ndarray, spine_model_type):
"""Compute the centroids of the labels.
Args:
seg (np.ndarray): Segmentation volume.
spine_model_type (str): Model type.
Returns:
List[int]: List of centroids.
"""
# take values of spine_model_type.categories dictionary
# and convert to list
centroids = {}
for level in spine_model_type.categories:
label_idx = spine_model_type.categories[level]
try:
pos = compute_centroid(seg, "sagittal", label_idx)
centroids[level] = pos
except Exception:
logging.warning(f"Label {level} not found in segmentation volume.")
return centroids
# Function that takes a numpy array as input, as well as a list of centroids,
# takes a slice through the centroid on axis = 1 for each centroid
# and returns a list of the slices
def get_slices(seg: np.ndarray, centroids: Dict, spine_model_type):
"""Get the slices corresponding to the centroids.
Args:
seg (np.ndarray): Segmentation volume.
centroids (List[int]): List of centroids.
spine_model_type (str): Model type.
Returns:
List[np.ndarray]: List of slices.
"""
seg = seg.astype(np.uint8)
slices = {}
for level in centroids:
label_idx = spine_model_type.categories[level]
binary_seg = (seg[centroids[level], :, :] == label_idx).astype(int)
if np.sum(binary_seg) > 200: # heuristic to make sure enough of the body is showing
slices[level] = binary_seg
return slices
# Function that takes a mask and for each deletes the right most
# connected component. Returns the mask with the right most
# connected component deleted
def delete_right_most_connected_component(mask: np.ndarray):
"""Delete the right most connected component corresponding to spinous processes.
Args:
mask (np.ndarray): Mask volume.
Returns:
np.ndarray: Mask volume.
"""
mask = mask.astype(np.uint8)
_, labels, _, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)
right_most_connected_component = np.argmin(centroids[1:, 1]) + 1
mask[labels == right_most_connected_component] = 0
return mask
# compute center of mass of 2d mask
def compute_center_of_mass(mask: np.ndarray):
"""Compute the center of mass of a 2D mask.
Args:
mask (np.ndarray): Mask volume.
Returns:
np.ndarray: Center of mass.
"""
mask = mask.astype(np.uint8)
_, _, _, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)
center_of_mass = np.mean(centroids[1:, :], axis=0)
return center_of_mass
# Function that takes a 3d centroid and retruns a binary mask with a 3d
# roi around the centroid
def roi_from_mask(img, centroid: np.ndarray):
"""Compute a 3D ROI from a 3D mask.
Args:
img (np.ndarray): Image volume.
centroid (np.ndarray): Centroid.
Returns:
np.ndarray: ROI volume.
"""
roi = np.zeros(img.shape)
img_np = img.get_fdata()
pixel_spacing = img.header.get_zooms()
length_i = 5.0 / pixel_spacing[0]
length_j = 5.0 / pixel_spacing[1]
length_k = 5.0 / pixel_spacing[2]
print(
f"Computing ROI with centroid {centroid[0]:.3f}, {centroid[1]:.3f}, {centroid[2]:.3f} "
f"and pixel spacing "
f"{pixel_spacing[0]:.3f}mm, {pixel_spacing[1]:.3f}mm, {pixel_spacing[2]:.3f}mm..."
)
# cubic ROI around centroid
"""
roi[
int(centroid[0] - length) : int(centroid[0] + length),
int(centroid[1] - length) : int(centroid[1] + length),
int(centroid[2] - length) : int(centroid[2] + length),
] = 1
"""
# spherical ROI around centroid
roi = np.zeros(img_np.shape)
i_lower = math.floor(centroid[0] - length_i)
j_lower = math.floor(centroid[1] - length_j)
k_lower = math.floor(centroid[2] - length_k)
i_lower_idx = 1000
j_lower_idx = 1000
k_lower_idx = 1000
i_upper_idx = 0
j_upper_idx = 0
k_upper_idx = 0
found_pixels = False
for i in range(i_lower, i_lower + 2 * math.ceil(length_i) + 1):
for j in range(j_lower, j_lower + 2 * math.ceil(length_j) + 1):
for k in range(k_lower, k_lower + 2 * math.ceil(length_k) + 1):
if (i - centroid[0]) ** 2 / length_i**2 + (
j - centroid[1]
) ** 2 / length_j**2 + (k - centroid[2]) ** 2 / length_k**2 <= 1:
roi[i, j, k] = 1
if i < i_lower_idx:
i_lower_idx = i
if j < j_lower_idx:
j_lower_idx = j
if k < k_lower_idx:
k_lower_idx = k
if i > i_upper_idx:
i_upper_idx = i
if j > j_upper_idx:
j_upper_idx = j
if k > k_upper_idx:
k_upper_idx = k
found_pixels = True
if not found_pixels:
print("No pixels in ROI!")
raise ValueError
print(
f"Number of pixels included in i, j, and k directions: {i_upper_idx - i_lower_idx + 1}, "
f"{j_upper_idx - j_lower_idx + 1}, {k_upper_idx - k_lower_idx + 1}"
)
return roi
# Function that takes a 3d image and a 3d binary mask and returns that average
# value of the image inside the mask
def mean_img_mask(img: np.ndarray, mask: np.ndarray, index: int):
"""Compute the mean of an image inside a mask.
Args:
img (np.ndarray): Image volume.
mask (np.ndarray): Mask volume.
rescale_slope (float): Rescale slope.
rescale_intercept (float): Rescale intercept.
Returns:
float: Mean value.
"""
img = img.astype(np.float32)
mask = mask.astype(np.float32)
img_masked = (img * mask)[mask > 0]
# mean = (rescale_slope * np.mean(img_masked)) + rescale_intercept
# median = (rescale_slope * np.median(img_masked)) + rescale_intercept
mean = np.mean(img_masked)
return mean
def compute_rois(seg, img, spine_model_type):
"""Compute the ROIs for the spine.
Args:
seg (np.ndarray): Segmentation volume.
img (np.ndarray): Image volume.
rescale_slope (float): Rescale slope.
rescale_intercept (float): Rescale intercept.
spine_model_type (Models): Model type.
Returns:
spine_hus (List[float]): List of HU values.
rois (List[np.ndarray]): List of ROIs.
centroids_3d (List[np.ndarray]): List of centroids.
"""
seg_np = seg.get_fdata()
centroids = compute_centroids(seg_np, spine_model_type)
slices = get_slices(seg_np, centroids, spine_model_type)
for level in slices:
slice = slices[level]
# keep only the two largest connected components
two_largest, two = keep_two_largest_connected_components(slice)
if two:
slices[level] = delete_right_most_connected_component(two_largest)
# Compute ROIs
rois = {}
spine_hus = {}
centroids_3d = {}
for i, level in enumerate(slices):
slice = slices[level]
center_of_mass = compute_center_of_mass(slice)
centroid = np.array([centroids[level], center_of_mass[1], center_of_mass[0]])
roi = roi_from_mask(img, centroid)
spine_hus[level] = mean_img_mask(img.get_fdata(), roi, i)
rois[level] = roi
centroids_3d[level] = centroid
return (spine_hus, rois, centroids_3d)
def keep_two_largest_connected_components(mask: Dict):
"""Keep the two largest connected components.
Args:
mask (np.ndarray): Mask volume.
Returns:
np.ndarray: Mask volume.
"""
mask = mask.astype(np.uint8)
# sort connected components by size
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)
stats = stats[1:, 4]
sorted_indices = np.argsort(stats)[::-1]
# keep only the two largest connected components
mask = np.zeros(mask.shape)
mask[labels == sorted_indices[0] + 1] = 1
two = True
try:
mask[labels == sorted_indices[1] + 1] = 1
except Exception:
two = False
return (mask, two)
def compute_centroid(seg: np.ndarray, plane: str, label: int):
"""Compute the centroid of a label in a given plane.
Args:
seg (np.ndarray): Segmentation volume.
plane (str): Plane.
label (int): Label.
Returns:
int: Centroid.
"""
if plane == "axial":
sum_out_axes = (0, 1)
sum_axis = 2
elif plane == "sagittal":
sum_out_axes = (1, 2)
sum_axis = 0
elif plane == "coronal":
sum_out_axes = (0, 2)
sum_axis = 1
sums = np.sum(seg == label, axis=sum_out_axes)
normalized_sums = sums / np.sum(sums)
pos = int(np.sum(np.arange(0, seg.shape[sum_axis]) * normalized_sums))
return pos
def to_one_hot(label: np.ndarray, model_type, spine_hus):
"""Convert a label to one-hot encoding.
Args:
label (np.ndarray): Label volume.
model_type (Models): Model type.
Returns:
np.ndarray: One-hot encoding volume.
"""
levels = list(spine_hus.keys())
levels.reverse()
one_hot_label = np.zeros((label.shape[0], label.shape[1], len(levels)))
for i, level in enumerate(levels):
label_idx = model_type.categories[level]
one_hot_label[:, :, i] = (label == label_idx).astype(int)
return one_hot_label
def visualize_coronal_sagittal_spine(
seg: np.ndarray,
rois: List[np.ndarray],
mvs: np.ndarray,
centroids_3d: np.ndarray,
output_dir: str,
spine_hus=None,
model_type=None,
pixel_spacing=None,
format="png",
):
"""Visualize the coronal and sagittal planes of the spine.
Args:
seg (np.ndarray): Segmentation volume.
rois (List[np.ndarray]): List of ROIs.
mvs (dm.MedicalVolume): Medical volume.
centroids (List[int]): List of centroids.
label_text (List[str]): List of labels.
output_dir (str): Output directory.
spine_hus (List[float], optional): List of HU values. Defaults to None.
model_type (Models, optional): Model type. Defaults to None.
"""
sagittal_vals, coronal_vals = curved_planar_reformation(mvs, centroids_3d)
zoom_factor = pixel_spacing[2] / pixel_spacing[1]
sagittal_image = mvs[sagittal_vals, :, range(len(sagittal_vals))]
sagittal_label = seg[sagittal_vals, :, range(len(sagittal_vals))]
sagittal_image = zoom(sagittal_image, (zoom_factor, 1), order=3)
sagittal_label = zoom(sagittal_label, (zoom_factor, 1), order=1).round()
one_hot_sag_label = to_one_hot(sagittal_label, model_type, spine_hus)
for roi in rois:
one_hot_roi_label = roi[sagittal_vals, :, range(len(sagittal_vals))]
one_hot_roi_label = zoom(one_hot_roi_label, (zoom_factor, 1), order=1).round()
one_hot_sag_label = np.concatenate(
(
one_hot_sag_label,
one_hot_roi_label.reshape(
(one_hot_roi_label.shape[0], one_hot_roi_label.shape[1], 1)
),
),
axis=2,
)
coronal_image = mvs[:, coronal_vals, range(len(coronal_vals))]
coronal_label = seg[:, coronal_vals, range(len(coronal_vals))]
coronal_image = zoom(coronal_image, (1, zoom_factor), order=3)
coronal_label = zoom(coronal_label, (1, zoom_factor), order=1).round()
# coronal_image = zoom(coronal_image, (zoom_factor, 1), order=3)
# coronal_label = zoom(coronal_label, (zoom_factor, 1), order=0).astype(int)
one_hot_cor_label = to_one_hot(coronal_label, model_type, spine_hus)
for roi in rois:
one_hot_roi_label = roi[:, coronal_vals, range(len(coronal_vals))]
one_hot_roi_label = zoom(one_hot_roi_label, (1, zoom_factor), order=1).round()
one_hot_cor_label = np.concatenate(
(
one_hot_cor_label,
one_hot_roi_label.reshape(
(one_hot_roi_label.shape[0], one_hot_roi_label.shape[1], 1)
),
),
axis=2,
)
# flip both axes of coronal image
sagittal_image = np.flip(sagittal_image, axis=0)
sagittal_image = np.flip(sagittal_image, axis=1)
# flip both axes of coronal label
one_hot_sag_label = np.flip(one_hot_sag_label, axis=0)
one_hot_sag_label = np.flip(one_hot_sag_label, axis=1)
coronal_image = np.transpose(coronal_image)
one_hot_cor_label = np.transpose(one_hot_cor_label, (1, 0, 2))
# flip both axes of sagittal image
coronal_image = np.flip(coronal_image, axis=0)
coronal_image = np.flip(coronal_image, axis=1)
# flip both axes of sagittal label
one_hot_cor_label = np.flip(one_hot_cor_label, axis=0)
one_hot_cor_label = np.flip(one_hot_cor_label, axis=1)
if format == "png":
sagittal_name = "spine_sagittal.png"
coronal_name = "spine_coronal.png"
elif format == "dcm":
sagittal_name = "spine_sagittal.dcm"
coronal_name = "spine_coronal.dcm"
else:
raise ValueError("Format must be either png or dcm")
img_sagittal = spine_visualization.spine_binary_segmentation_overlay(
sagittal_image,
one_hot_sag_label,
output_dir,
sagittal_name,
spine_hus=spine_hus,
model_type=model_type,
pixel_spacing=pixel_spacing,
)
img_coronal = spine_visualization.spine_binary_segmentation_overlay(
coronal_image,
one_hot_cor_label,
output_dir,
coronal_name,
spine_hus=spine_hus,
model_type=model_type,
pixel_spacing=pixel_spacing,
)
return img_sagittal, img_coronal
def curved_planar_reformation(mvs, centroids):
centroids = sorted(centroids, key=lambda x: x[2])
centroids = [(int(x[0]), int(x[1]), int(x[2])) for x in centroids]
sagittal_centroids = [centroids[i][0] for i in range(0, len(centroids))]
coronal_centroids = [centroids[i][1] for i in range(0, len(centroids))]
axial_centroids = [centroids[i][2] for i in range(0, len(centroids))]
sagittal_vals = [sagittal_centroids[0]] * axial_centroids[0]
coronal_vals = [coronal_centroids[0]] * axial_centroids[0]
for i in range(1, len(axial_centroids)):
num = axial_centroids[i] - axial_centroids[i - 1]
interp = list(np.linspace(sagittal_centroids[i - 1], sagittal_centroids[i], num=num))
sagittal_vals.extend(interp)
interp = list(np.linspace(coronal_centroids[i - 1], coronal_centroids[i], num=num))
coronal_vals.extend(interp)
sagittal_vals.extend([sagittal_centroids[-1]] * (mvs.shape[2] - len(sagittal_vals)))
coronal_vals.extend([coronal_centroids[-1]] * (mvs.shape[2] - len(coronal_vals)))
sagittal_vals = np.array(sagittal_vals)
coronal_vals = np.array(coronal_vals)
sagittal_vals = sagittal_vals.astype(int)
coronal_vals = coronal_vals.astype(int)
return (sagittal_vals, coronal_vals)
'''
def compare_ts_stanford_centroids(labels_path, pred_centroids):
"""Compare the centroids of the Stanford dataset with the centroids of the TS dataset.
Args:
labels_path (str): Path to the Stanford dataset labels.
"""
t12_diff = []
l1_diff = []
l2_diff = []
l3_diff = []
l4_diff = []
l5_diff = []
num_skipped = 0
labels = glob(labels_path + "/*")
for label_path in labels:
# modify label_path to give pred_path
pred_path = label_path.replace("labelsTs", "predTs_TS")
print(label_path.split("/")[-1])
label_nib = nib.load(label_path)
label = label_nib.get_fdata()
spacing = label_nib.header.get_zooms()[2]
pred_nib = nib.load(pred_path)
pred = pred_nib.get_fdata()
if True:
pred[pred == 18] = 6
pred[pred == 19] = 5
pred[pred == 20] = 4
pred[pred == 21] = 3
pred[pred == 22] = 2
pred[pred == 23] = 1
for label_idx in range(1, 7):
label_level = label == label_idx
indexes = np.array(range(label.shape[2]))
sums = np.sum(label_level, axis=(0, 1))
normalized_sums = sums / np.sum(sums)
label_centroid = np.sum(indexes * normalized_sums)
print(f"Centroid for label {label_idx}: {label_centroid}")
if False:
try:
pred_centroid = pred_centroids[6 - label_idx]
except Exception:
# Change this part
print("Something wrong with pred_centroids, skipping!")
num_skipped += 1
break
# if revert_to_original:
if True:
pred_level = pred == label_idx
sums = np.sum(pred_level, axis=(0, 1))
indices = list(range(sums.shape[0]))
groupby_input = zip(indices, list(sums))
g = groupby(groupby_input, key=lambda x: x[1] > 0.0)
m = max([list(s) for v, s in g if v > 0], key=lambda x: np.sum(list(zip(*x))[1]))
res = list(zip(*m))
indexes = list(res[0])
sums = list(res[1])
normalized_sums = sums / np.sum(sums)
pred_centroid = np.sum(indexes * normalized_sums)
print(f"Centroid for prediction {label_idx}: {pred_centroid}")
diff = np.absolute(pred_centroid - label_centroid) * spacing
if label_idx == 1:
t12_diff.append(diff)
elif label_idx == 2:
l1_diff.append(diff)
elif label_idx == 3:
l2_diff.append(diff)
elif label_idx == 4:
l3_diff.append(diff)
elif label_idx == 5:
l4_diff.append(diff)
elif label_idx == 6:
l5_diff.append(diff)
print(f"Skipped {num_skipped}")
print("The final mean differences in mm:")
print(
np.mean(t12_diff),
np.mean(l1_diff),
np.mean(l2_diff),
np.mean(l3_diff),
np.mean(l4_diff),
np.mean(l5_diff),
)
print("The final median differences in mm:")
print(
np.median(t12_diff),
np.median(l1_diff),
np.median(l2_diff),
np.median(l3_diff),
np.median(l4_diff),
np.median(l5_diff),
)
def compare_ts_stanford_roi_hus(image_path):
"""Compare the HU values of the Stanford dataset with the HU values of the TS dataset.
image_path (str): Path to the Stanford dataset images.
"""
img_paths = glob(image_path + "/*")
differences = np.zeros((40, 6))
ground_truth = np.zeros((40, 6))
for i, img_path in enumerate(img_paths):
print(f"Image number {i + 1}")
image_path_no_0000 = re.sub(r"_0000", "", img_path)
ts_seg_path = image_path_no_0000.replace("imagesTs", "predTs_TS")
stanford_seg_path = image_path_no_0000.replace("imagesTs", "labelsTs")
img = nib.load(img_path).get_fdata()
img = np.swapaxes(img, 0, 1)
ts_seg = nib.load(ts_seg_path).get_fdata()
ts_seg = np.swapaxes(ts_seg, 0, 1)
stanford_seg = nib.load(stanford_seg_path).get_fdata()
stanford_seg = np.swapaxes(stanford_seg, 0, 1)
ts_model_type = Models.model_from_name("ts_spine")
(spine_hus_ts, rois, centroids_3d) = compute_rois(ts_seg, img, 1, 0, ts_model_type)
stanford_model_type = Models.model_from_name("stanford_spine_v0.0.1")
(spine_hus_stanford, rois, centroids_3d) = compute_rois(
stanford_seg, img, 1, 0, stanford_model_type
)
difference_vals = np.abs(np.array(spine_hus_ts) - np.array(spine_hus_stanford))
print(f"Differences {difference_vals}\n")
differences[i, :] = difference_vals
ground_truth[i, :] = spine_hus_stanford
print("\n")
# compute average percent change from ground truth
percent_change = np.divide(differences, ground_truth) * 100
average_percent_change = np.mean(percent_change, axis=0)
median_percent_change = np.median(percent_change, axis=0)
# print average percent change
print("Average percent change from ground truth:")
print(average_percent_change)
print("Median percent change from ground truth:")
print(median_percent_change)
# print average difference
average_difference = np.mean(differences, axis=0)
median_difference = np.median(differences, axis=0)
print("Average difference from ground truth:")
print(average_difference)
print("Median difference from ground truth:")
print(median_difference)
def process_post_hoc(pred_path):
"""Apply post-hoc heuristics for improving Stanford spine model vertical centroid predictions.
Args:
pred_path (str): Path to the prediction.
"""
pred_nib = nib.load(pred_path)
pred = pred_nib.get_fdata()
pred_bodies = np.logical_and(pred >= 1, pred <= 6)
pred_bodies = pred_bodies.astype(np.int64)
labels_out, N = cc3d.connected_components(pred_bodies, return_N=True, connectivity=6)
stats = cc3d.statistics(labels_out)
print(stats)
labels_out_list = []
voxel_counts_list = list(stats["voxel_counts"])
for idx_lab in range(1, N + 2):
labels_out_list.append(labels_out == idx_lab)
centroids_list = list(stats["centroids"][:, 2])
labels = []
centroids = []
voxels = []
for idx, count in enumerate(voxel_counts_list):
if count > 10000:
labels.append(labels_out_list[idx])
centroids.append(centroids_list[idx])
voxels.append(count)
top_comps = [
(counts0, labels0, centroids0)
for counts0, labels0, centroids0 in sorted(zip(voxels, labels, centroids), reverse=True)
]
top_comps = top_comps[1:7]
# ====== Check whether the connected components are fusing vertebral bodies ======
revert_to_original = False
volumes = list(zip(*top_comps))[0]
if volumes[0] > 1.5 * volumes[1]:
revert_to_original = True
print("Reverting to original...")
labels = list(zip(*top_comps))[1]
centroids = list(zip(*top_comps))[2]
top_comps = zip(centroids, labels)
pred_centroids = [x for x, _ in sorted(top_comps)]
for label_idx in range(1, 7):
if not revert_to_original:
try:
pred_centroid = pred_centroids[6 - label_idx]
except:
# Change this part
print(
"Post processing failure, probably < 6 predicted bodies. Reverting to original labels."
)
revert_to_original = True
if revert_to_original:
pred_level = pred == label_idx
sums = np.sum(pred_level, axis=(0, 1))
indices = list(range(sums.shape[0]))
groupby_input = zip(indices, list(sums))
# sys.exit()
g = groupby(groupby_input, key=lambda x: x[1] > 0.0)
m = max([list(s) for v, s in g if v > 0], key=lambda x: np.sum(list(zip(*x))[1]))
# sys.exit()
# m = max([list(s) for v, s in g], key=lambda np.sum)
res = list(zip(*m))
indexes = list(res[0])
sums = list(res[1])
normalized_sums = sums / np.sum(sums)
pred_centroid = np.sum(indexes * normalized_sums)
print(f"Centroid for prediction {label_idx}: {pred_centroid}")
'''
| 24,734 | 33.887165 | 107 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/colormap.py | # Copyright (c) Facebook, Inc. and its affiliates.
"""
An awesome colormap for really neat visualizations.
Copied from Detectron, and removed gray colors.
"""
import random
import numpy as np
__all__ = ["colormap", "random_color", "random_colors"]
# fmt: off
# RGB:
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
# fmt: on
def colormap(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
"""
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
def random_color(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a vector of 3 numbers
"""
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret
def random_colors(N, rgb=False, maximum=255):
"""
Args:
N (int): number of unique colors needed
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a list of random_color
"""
indices = random.sample(range(len(_COLORS)), N)
ret = [_COLORS[i] * maximum for i in indices]
if not rgb:
ret = [x[::-1] for x in ret]
return ret
if __name__ == "__main__":
import cv2
size = 100
H, W = 10, 10
canvas = np.random.rand(H * size, W * size, 3).astype("float32")
for h in range(H):
for w in range(W):
idx = h * W + w
if idx >= len(_COLORS):
break
canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
cv2.imshow("a", canvas)
cv2.waitKey(0)
| 4,094 | 25.082803 | 87 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/orientation.py | import nibabel as nib
from comp2comp.inference_class_base import InferenceClass
class ToCanonical(InferenceClass):
"""Convert spine segmentation to canonical orientation."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
"""
First dim goes from L to R.
Second dim goes from P to A.
Third dim goes from I to S.
"""
canonical_segmentation = nib.as_closest_canonical(inference_pipeline.segmentation)
canonical_medical_volume = nib.as_closest_canonical(inference_pipeline.medical_volume)
inference_pipeline.segmentation = canonical_segmentation
inference_pipeline.medical_volume = canonical_medical_volume
inference_pipeline.pixel_spacing_list = canonical_medical_volume.header.get_zooms()
return {}
| 842 | 32.72 | 94 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/logger.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import logging
import os
import sys
import time
from collections import Counter
from termcolor import colored
logging.captureWarnings(True)
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
self._abbrev_name = kwargs.pop("abbrev_name", "")
if len(self._abbrev_name):
self._abbrev_name = self._abbrev_name + "."
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record):
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers # noqa
def setup_logger(
output=None,
distributed_rank=0,
*,
color=True,
name="Comp2Comp",
abbrev_name=None,
):
"""
Initialize the detectron2 logger and set its verbosity level to "INFO".
Args:
output (str): a file name or a directory to save log. If None, will not
save log file. If ends with ".txt" or ".log", assumed to be a file
name. Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger
abbrev_name (str): an abbreviation of the module, to avoid long names in
logs. Set to "" to not log the root module in logs.
By default, will abbreviate "detectron2" to "d2" and leave other
modules unchanged.
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if abbrev_name is None:
abbrev_name = name
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s",
datefmt="%m/%d %H:%M:%S",
)
# stdout logging: master only
if distributed_rank == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
abbrev_name=str(abbrev_name),
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if distributed_rank > 0:
filename = filename + ".rank{}".format(distributed_rank)
os.makedirs(os.path.dirname(filename), exist_ok=True)
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
# cache the opened file object, so that different calls to `setup_logger`
# with the same file name can safely write to the same file.
@functools.lru_cache(maxsize=None)
def _cached_log_stream(filename):
return open(filename, "a")
"""
Below are some other convenient logging methods.
They are mainly adopted from
https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py
"""
def _find_caller():
"""
Returns:
str: module name of the caller
tuple: a hashable key to be used to identify different callers
"""
frame = sys._getframe(2)
while frame:
code = frame.f_code
if os.path.join("utils", "logger.") not in code.co_filename:
mod_name = frame.f_globals["__name__"]
if mod_name == "__main__":
mod_name = "detectron2"
return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
frame = frame.f_back
_LOG_COUNTER = Counter()
_LOG_TIMER = {}
def log_first_n(lvl, msg, n=1, *, name=None, key="caller"):
"""
Log only for the first n times.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by
default.
key (str or tuple[str]): the string(s) can be one of "caller" or
"message", which defines how to identify duplicated logs.
For example, if called with `n=1, key="caller"`, this function
will only log the first call from the same caller, regardless of
the message content.
If called with `n=1, key="message"`, this function will log the
same content only once, even if they are called from different
places.
If called with `n=1, key=("caller", "message")`, this function
will not log only if the same caller has logged the same message
before.
"""
if isinstance(key, str):
key = (key,)
assert len(key) > 0
caller_module, caller_key = _find_caller()
hash_key = ()
if "caller" in key:
hash_key = hash_key + caller_key
if "message" in key:
hash_key = hash_key + (msg,)
_LOG_COUNTER[hash_key] += 1
if _LOG_COUNTER[hash_key] <= n:
logging.getLogger(name or caller_module).log(lvl, msg)
def log_every_n(lvl, msg, n=1, *, name=None):
"""
Log once per n times.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by
default.
"""
caller_module, key = _find_caller()
_LOG_COUNTER[key] += 1
if n == 1 or _LOG_COUNTER[key] % n == 1:
logging.getLogger(name or caller_module).log(lvl, msg)
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
"""
Log no more than once per n seconds.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by
default.
"""
caller_module, key = _find_caller()
last_logged = _LOG_TIMER.get(key, None)
current_time = time.time()
if last_logged is None or current_time - last_logged >= n:
logging.getLogger(name or caller_module).log(lvl, msg)
_LOG_TIMER[key] = current_time
| 6,831 | 31.533333 | 101 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/run.py | import logging
import os
import re
from typing import Sequence, Union
logger = logging.getLogger(__name__)
def format_output_path(
file_path,
save_dir: str = None,
base_dirs: Sequence[str] = None,
file_name: Sequence[str] = None,
):
"""Format output path for a given file.
Args:
file_path (str): File path.
save_dir (str, optional): Save directory. Defaults to None.
base_dirs (Sequence[str], optional): Base directories. Defaults to None.
file_name (Sequence[str], optional): File name. Defaults to None.
Returns:
str: Output path.
"""
dirname = os.path.dirname(file_path) if not save_dir else save_dir
if save_dir and base_dirs:
dirname: str = os.path.dirname(file_path)
relative_dir = [
dirname.split(bdir, 1)[1] for bdir in base_dirs if dirname.startswith(bdir)
][0]
# Trim path separator from the path
relative_dir = relative_dir.lstrip(os.path.sep)
dirname = os.path.join(save_dir, relative_dir)
if file_name is not None:
return os.path.join(
dirname,
"{}.h5".format(file_name),
)
return os.path.join(
dirname,
"{}.h5".format(os.path.splitext(os.path.basename(file_path))[0]),
)
# Function the returns a list of file names exluding
# the extention from the list of file paths
def get_file_names(files):
"""Get file names from a list of file paths.
Args:
files (list): List of file paths.
Returns:
list: List of file names.
"""
file_names = []
for file in files:
file_name = os.path.splitext(os.path.basename(file))[0]
file_names.append(file_name)
return file_names
def find_files(
root_dirs: Union[str, Sequence[str]],
max_depth: int = None,
exist_ok: bool = False,
pattern: str = None,
):
"""Recursively search for files.
To avoid recomputing experiments with results, set `exist_ok=False`.
Results will be searched for in `PREFERENCES.OUTPUT_DIR` (if non-empty).
Args:
root_dirs (`str(s)`): Root folder(s) to search.
max_depth (int, optional): Maximum depth to search.
exist_ok (bool, optional): If `True`, recompute results for
scans.
pattern (str, optional): If specified, looks for files with names
matching the pattern.
Return:
List[str]: Experiment directories to test.
"""
def _get_files(depth: int, dir_name: str):
if dir_name is None or not os.path.isdir(dir_name):
return []
if max_depth is not None and depth > max_depth:
return []
files = os.listdir(dir_name)
ret_files = []
for file in files:
possible_dir = os.path.join(dir_name, file)
if os.path.isdir(possible_dir):
subfiles = _get_files(depth + 1, possible_dir)
ret_files.extend(subfiles)
elif os.path.isfile(possible_dir):
if pattern and not re.match(pattern, possible_dir):
continue
output_path = format_output_path(possible_dir)
if not exist_ok and os.path.isfile(output_path):
logger.info(
"Skipping {} - results exist at {}".format(possible_dir, output_path)
)
continue
ret_files.append(possible_dir)
return ret_files
out_files = []
if isinstance(root_dirs, str):
root_dirs = [root_dirs]
for d in root_dirs:
out_files.extend(_get_files(0, d))
return sorted(set(out_files))
| 3,692 | 28.544 | 93 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/__init__.py | 0 | 0 | 0 | py | |
Comp2Comp | Comp2Comp-master/comp2comp/utils/dl_utils.py | import subprocess
from keras import Model
# from keras.utils import multi_gpu_model
# from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
def get_available_gpus(num_gpus: int = None):
"""Get gpu ids for gpus that are >95% free.
Tensorflow does not support checking free memory on gpus.
This is a crude method that relies on `nvidia-smi` to
determine which gpus are occupied and which are free.
Args:
num_gpus: Number of requested gpus. If not specified,
ids of all available gpu(s) are returned.
Returns:
List[int]: List of gpu ids that are free. Length
will equal `num_gpus`, if specified.
"""
# Built-in tensorflow gpu id.
assert isinstance(num_gpus, (type(None), int))
if num_gpus == 0:
return [-1]
num_requested_gpus = num_gpus
try:
num_gpus = (
len(subprocess.check_output("nvidia-smi --list-gpus", shell=True).decode().split("\n"))
- 1
)
out_str = subprocess.check_output("nvidia-smi | grep MiB", shell=True).decode()
except subprocess.CalledProcessError:
return None
mem_str = [x for x in out_str.split() if "MiB" in x]
# First 2 * num_gpu elements correspond to memory for gpus
# Order: (occupied-0, total-0, occupied-1, total-1, ...)
mems = [float(x[:-3]) for x in mem_str]
gpu_percent_occupied_mem = [
mems[2 * gpu_id] / mems[2 * gpu_id + 1] for gpu_id in range(num_gpus)
]
available_gpus = [gpu_id for gpu_id, mem in enumerate(gpu_percent_occupied_mem) if mem < 0.05]
if num_requested_gpus and num_requested_gpus > len(available_gpus):
raise ValueError(
"Requested {} gpus, only {} are free".format(num_requested_gpus, len(available_gpus))
)
return available_gpus[:num_requested_gpus] if num_requested_gpus else available_gpus
class ModelMGPU(Model):
"""Wrapper for distributing model across multiple gpus"""
def __init__(self, ser_model, gpus):
pmodel = multi_gpu_model(ser_model, gpus) # noqa: F821
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
"""Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
"""
# return Model.__getattribute__(self, attrname)
if "load" in attrname or "save" in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
| 2,610 | 34.767123 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/process.py | """
@author: louisblankemeier
"""
import os
import shutil
import sys
import traceback
from datetime import datetime
from pathlib import Path
from time import time
from comp2comp.io.io_utils import get_dicom_or_nifti_paths_and_num
def process_2d(args, pipeline_builder):
output_dir = Path(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../outputs",
datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
)
)
if not os.path.exists(output_dir):
output_dir.mkdir(parents=True)
model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../models")
if not os.path.exists(model_dir):
os.mkdir(model_dir)
pipeline = pipeline_builder(args)
pipeline(output_dir=output_dir, model_dir=model_dir)
def process_3d(args, pipeline_builder):
model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../models")
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if args.output_path is not None:
output_path = Path(args.output_path)
else:
output_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../outputs")
if not args.overwrite_outputs:
date_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
output_path = os.path.join(output_path, date_time)
for path, num in get_dicom_or_nifti_paths_and_num(args.input_path):
try:
st = time()
if path.endswith(".nii") or path.endswith(".nii.gz"):
print("Processing: ", path)
else:
print("Processing: ", path, " with ", num, " slices")
min_slices = 30
if num < min_slices:
print(f"Number of slices is less than {min_slices}, skipping\n")
continue
print("")
try:
sys.stdout.flush()
except Exception:
pass
if path.endswith(".nii") or path.endswith(".nii.gz"):
folder_name = Path(os.path.basename(os.path.normpath(path)))
# remove .nii or .nii.gz
folder_name = os.path.normpath(
Path(str(folder_name).replace(".gz", "").replace(".nii", ""))
)
output_dir = Path(
os.path.join(
output_path,
folder_name,
)
)
else:
output_dir = Path(
os.path.join(
output_path,
Path(os.path.basename(os.path.normpath(path))),
)
)
if not os.path.exists(output_dir):
output_dir.mkdir(parents=True)
pipeline = pipeline_builder(path, args)
pipeline(output_dir=output_dir, model_dir=model_dir)
print(f"Finished processing {path} in {time() - st:.1f} seconds\n")
except Exception:
print(f"ERROR PROCESSING {path}\n")
traceback.print_exc()
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
continue
| 3,239 | 29 | 95 | py |
Comp2Comp | Comp2Comp-master/comp2comp/utils/env.py | import importlib
import importlib.util
import os
import sys
__all__ = []
# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path # noqa
def _import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module
def _configure_libraries():
"""
Configurations for some libraries.
"""
# An environment option to disable `import cv2` globally,
# in case it leads to negative performance impact
disable_cv2 = int(os.environ.get("MEDSEGPY_DISABLE_CV2", False))
if disable_cv2:
sys.modules["cv2"] = None
else:
# Disable opencl in opencv since its interaction with cuda often
# has negative effects
# This envvar is supported after OpenCV 3.4.0
os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
try:
import cv2
if int(cv2.__version__.split(".")[0]) >= 3:
cv2.ocl.setUseOpenCL(False)
except ImportError:
pass
_ENV_SETUP_DONE = False
def setup_environment():
"""Perform environment setup work. The default setup is a no-op, but this
function allows the user to specify a Python source file or a module in
the $MEDSEGPY_ENV_MODULE environment variable, that performs
custom setup work that may be necessary to their computing environment.
"""
global _ENV_SETUP_DONE
if _ENV_SETUP_DONE:
return
_ENV_SETUP_DONE = True
_configure_libraries()
custom_module_path = os.environ.get("MEDSEGPY_ENV_MODULE")
if custom_module_path:
setup_custom_environment(custom_module_path)
else:
# The default setup is a no-op
pass
def setup_custom_environment(custom_module):
"""
Load custom environment setup by importing a Python source file or a
module, and run the setup function.
"""
if custom_module.endswith(".py"):
module = _import_file("medsegpy.utils.env.custom_module", custom_module)
else:
module = importlib.import_module(custom_module)
assert hasattr(module, "setup_environment") and callable(module.setup_environment), (
"Custom environment module defined in {} does not have the "
"required callable attribute 'setup_environment'."
).format(custom_module)
module.setup_environment()
| 2,539 | 30.358025 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/hip/hip_utils.py | """
@author: louisblankemeier
"""
import math
import os
import shutil
import cv2
import nibabel as nib
import numpy as np
import scipy.ndimage as ndi
from scipy.ndimage import zoom
from skimage.morphology import ball, binary_erosion
from comp2comp.hip.hip_visualization import method_visualizer
def compute_rois(medical_volume, segmentation, model, output_dir, save=False):
left_femur_mask = segmentation.get_fdata() == model.categories["femur_left"]
left_femur_mask = left_femur_mask.astype(np.uint8)
right_femur_mask = segmentation.get_fdata() == model.categories["femur_right"]
right_femur_mask = right_femur_mask.astype(np.uint8)
left_head_roi, left_head_centroid, left_head_hu = get_femural_head_roi(
left_femur_mask, medical_volume, output_dir, "left_head"
)
right_head_roi, right_head_centroid, right_head_hu = get_femural_head_roi(
right_femur_mask, medical_volume, output_dir, "right_head"
)
(
left_intertrochanter_roi,
left_intertrochanter_centroid,
left_intertrochanter_hu,
) = get_femural_head_roi(left_femur_mask, medical_volume, output_dir, "left_intertrochanter")
(
right_intertrochanter_roi,
right_intertrochanter_centroid,
right_intertrochanter_hu,
) = get_femural_head_roi(right_femur_mask, medical_volume, output_dir, "right_intertrochanter")
(left_neck_roi, left_neck_centroid, left_neck_hu,) = get_femural_neck_roi(
left_femur_mask,
medical_volume,
left_intertrochanter_roi,
left_intertrochanter_centroid,
left_head_roi,
left_head_centroid,
output_dir,
)
(right_neck_roi, right_neck_centroid, right_neck_hu,) = get_femural_neck_roi(
right_femur_mask,
medical_volume,
right_intertrochanter_roi,
right_intertrochanter_centroid,
right_head_roi,
right_head_centroid,
output_dir,
)
combined_roi = (
left_head_roi
+ (right_head_roi) # * 2)
+ (left_intertrochanter_roi) # * 3)
+ (right_intertrochanter_roi) # * 4)
+ (left_neck_roi) # * 5)
+ (right_neck_roi) # * 6)
)
if save:
# make roi directory if it doesn't exist
parent_output_dir = os.path.dirname(output_dir)
roi_output_dir = os.path.join(parent_output_dir, "rois")
if not os.path.exists(roi_output_dir):
os.makedirs(roi_output_dir)
# Convert left ROI to NIfTI
left_roi_nifti = nib.Nifti1Image(combined_roi, medical_volume.affine)
left_roi_path = os.path.join(roi_output_dir, "roi.nii.gz")
nib.save(left_roi_nifti, left_roi_path)
shutil.copy(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"tunnelvision.ipynb",
),
parent_output_dir,
)
return {
"left_head": {"roi": left_head_roi, "centroid": left_head_centroid, "hu": left_head_hu},
"right_head": {"roi": right_head_roi, "centroid": right_head_centroid, "hu": right_head_hu},
"left_intertrochanter": {
"roi": left_intertrochanter_roi,
"centroid": left_intertrochanter_centroid,
"hu": left_intertrochanter_hu,
},
"right_intertrochanter": {
"roi": right_intertrochanter_roi,
"centroid": right_intertrochanter_centroid,
"hu": right_intertrochanter_hu,
},
"left_neck": {
"roi": left_neck_roi,
"centroid": left_neck_centroid,
"hu": left_neck_hu,
},
"right_neck": {
"roi": right_neck_roi,
"centroid": right_neck_centroid,
"hu": right_neck_hu,
},
}
def get_femural_head_roi(
femur_mask, medical_volume, output_dir, anatomy, visualize_method=False, min_pixel_count=20
):
top = np.where(femur_mask.sum(axis=(0, 1)) != 0)[0].max()
top_mask = femur_mask[:, :, top]
print(f"======== Computing {anatomy} femur ROIs ========")
while True:
labeled, num_features = ndi.label(top_mask)
component_sizes = np.bincount(labeled.ravel())
valid_components = np.where(component_sizes >= min_pixel_count)[0][1:]
if len(valid_components) == 2:
break
top -= 1
if top < 0:
print("Two connected components not found in the femur mask.")
break
top_mask = femur_mask[:, :, top]
if len(valid_components) == 2:
# Find the center of mass for each connected component
center_of_mass_1 = list(ndi.center_of_mass(top_mask, labeled, valid_components[0]))
center_of_mass_2 = list(ndi.center_of_mass(top_mask, labeled, valid_components[1]))
# Assign left_center_of_mass to be the center of mass with lowest value in the first dimension
if center_of_mass_1[0] < center_of_mass_2[0]:
left_center_of_mass = center_of_mass_1
right_center_of_mass = center_of_mass_2
else:
left_center_of_mass = center_of_mass_2
right_center_of_mass = center_of_mass_1
print(f"Left center of mass: {left_center_of_mass}")
print(f"Right center of mass: {right_center_of_mass}")
if anatomy == "left_intertrochanter" or anatomy == "right_head":
center_of_mass = left_center_of_mass
elif anatomy == "right_intertrochanter" or anatomy == "left_head":
center_of_mass = right_center_of_mass
coronal_slice = femur_mask[:, round(center_of_mass[1]), :]
coronal_image = medical_volume.get_fdata()[:, round(center_of_mass[1]), :]
sagittal_slice = femur_mask[round(center_of_mass[0]), :, :]
sagittal_image = medical_volume.get_fdata()[round(center_of_mass[0]), :, :]
zooms = medical_volume.header.get_zooms()
zoom_factor = zooms[2] / zooms[1]
coronal_slice = zoom(coronal_slice, (1, zoom_factor), order=1).round()
coronal_image = zoom(coronal_image, (1, zoom_factor), order=3).round()
sagittal_image = zoom(sagittal_image, (1, zoom_factor), order=3).round()
centroid = [round(center_of_mass[0]), 0, 0]
print(f"Starting centroid: {centroid}")
for _ in range(3):
sagittal_slice = femur_mask[centroid[0], :, :]
sagittal_slice = zoom(sagittal_slice, (1, zoom_factor), order=1).round()
centroid[1], centroid[2], radius_sagittal = inscribe_sagittal(sagittal_slice, zoom_factor)
print(f"Centroid after inscribe sagittal: {centroid}")
axial_slice = femur_mask[:, :, centroid[2]]
if anatomy == "left_intertrochanter" or anatomy == "right_head":
axial_slice[round(right_center_of_mass[0]) :, :] = 0
elif anatomy == "right_intertrochanter" or anatomy == "left_head":
axial_slice[: round(left_center_of_mass[0]), :] = 0
centroid[0], centroid[1], radius_axial = inscribe_axial(axial_slice)
print(f"Centroid after inscribe axial: {centroid}")
axial_image = medical_volume.get_fdata()[:, :, round(centroid[2])]
sagittal_image = medical_volume.get_fdata()[round(centroid[0]), :, :]
sagittal_image = zoom(sagittal_image, (1, zoom_factor), order=3).round()
if visualize_method:
method_visualizer(
sagittal_image,
axial_image,
axial_slice,
sagittal_slice,
[centroid[2], centroid[1]],
radius_sagittal,
[centroid[1], centroid[0]],
radius_axial,
output_dir,
anatomy,
)
roi = compute_hip_roi(medical_volume, centroid, radius_sagittal, radius_axial)
# selem = ndi.generate_binary_structure(3, 1)
selem = ball(3)
femur_mask_eroded = binary_erosion(femur_mask, selem)
roi = roi * femur_mask_eroded
roi_eroded = roi.astype(np.uint8)
hu = get_mean_roi_hu(medical_volume, roi_eroded)
return (roi_eroded, centroid, hu)
def get_femural_neck_roi(
femur_mask,
medical_volume,
intertrochanter_roi,
intertrochanter_centroid,
head_roi,
head_centroid,
output_dir,
):
zooms = medical_volume.header.get_zooms()
direction_vector = np.array(head_centroid) - np.array(intertrochanter_centroid)
unit_direction_vector = direction_vector / np.linalg.norm(direction_vector)
z, y, x = np.where(intertrochanter_roi)
intertrochanter_points = np.column_stack((z, y, x))
t_start = np.dot(intertrochanter_points - intertrochanter_centroid, unit_direction_vector).max()
z, y, x = np.where(head_roi)
head_points = np.column_stack((z, y, x))
t_end = (
np.linalg.norm(direction_vector)
+ np.dot(head_points - head_centroid, unit_direction_vector).min()
)
z, y, x = np.indices(femur_mask.shape)
coordinates = np.stack((z, y, x), axis=-1)
distance_to_line_origin = np.dot(coordinates - intertrochanter_centroid, unit_direction_vector)
coordinates_zoomed = coordinates * zooms
intertrochanter_centroid_zoomed = np.array(intertrochanter_centroid) * zooms
unit_direction_vector_zoomed = unit_direction_vector * zooms
distance_to_line = np.linalg.norm(
np.cross(
coordinates_zoomed - intertrochanter_centroid_zoomed,
coordinates_zoomed - (intertrochanter_centroid_zoomed + unit_direction_vector_zoomed),
),
axis=-1,
) / np.linalg.norm(unit_direction_vector_zoomed)
cylinder_radius = 10
cylinder_mask = (
(distance_to_line <= cylinder_radius)
& (distance_to_line_origin >= t_start)
& (distance_to_line_origin <= t_end)
)
# selem = ndi.generate_binary_structure(3, 1)
selem = ball(3)
femur_mask_eroded = binary_erosion(femur_mask, selem)
roi = cylinder_mask * femur_mask_eroded
neck_roi = roi.astype(np.uint8)
hu = get_mean_roi_hu(medical_volume, neck_roi)
centroid = list(intertrochanter_centroid + unit_direction_vector * (t_start + t_end) / 2)
centroid = [round(x) for x in centroid]
return neck_roi, centroid, hu
def compute_hip_roi(img, centroid, radius_sagittal, radius_axial):
pixel_spacing = img.header.get_zooms()
length_i = radius_axial * 0.75 / pixel_spacing[0]
length_j = radius_axial * 0.75 / pixel_spacing[1]
length_k = radius_sagittal * 0.75 / pixel_spacing[2]
roi = np.zeros(img.get_fdata().shape, dtype=np.uint8)
i_lower = math.floor(centroid[0] - length_i)
j_lower = math.floor(centroid[1] - length_j)
k_lower = math.floor(centroid[2] - length_k)
for i in range(i_lower, i_lower + 2 * math.ceil(length_i) + 1):
for j in range(j_lower, j_lower + 2 * math.ceil(length_j) + 1):
for k in range(k_lower, k_lower + 2 * math.ceil(length_k) + 1):
if (i - centroid[0]) ** 2 / length_i**2 + (
j - centroid[1]
) ** 2 / length_j**2 + (k - centroid[2]) ** 2 / length_k**2 <= 1:
roi[i, j, k] = 1
return roi
def inscribe_axial(axial_mask):
dist_map = cv2.distanceTransform(axial_mask, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
_, radius_axial, _, center_axial = cv2.minMaxLoc(dist_map)
center_axial = list(center_axial)
left_right_center = round(center_axial[1])
posterior_anterior_center = round(center_axial[0])
return left_right_center, posterior_anterior_center, radius_axial
def inscribe_sagittal(sagittal_mask, zoom_factor):
dist_map = cv2.distanceTransform(sagittal_mask, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
_, radius_sagittal, _, center_sagittal = cv2.minMaxLoc(dist_map)
center_sagittal = list(center_sagittal)
posterior_anterior_center = round(center_sagittal[1])
inferior_superior_center = round(center_sagittal[0])
inferior_superior_center = round(inferior_superior_center / zoom_factor)
return posterior_anterior_center, inferior_superior_center, radius_sagittal
def get_mean_roi_hu(medical_volume, roi):
masked_medical_volume = medical_volume.get_fdata() * roi
return np.mean(masked_medical_volume[masked_medical_volume != 0])
| 12,093 | 36.212308 | 102 | py |
Comp2Comp | Comp2Comp-master/comp2comp/hip/hip.py | """
@author: louisblankemeier
"""
import os
from pathlib import Path
from time import time
from typing import Union
import pandas as pd
from totalsegmentator.libs import (
download_pretrained_weights,
nostdout,
setup_nnunet,
)
from comp2comp.hip import hip_utils
from comp2comp.hip.hip_visualization import (
hip_report_visualizer,
hip_roi_visualizer,
)
from comp2comp.inference_class_base import InferenceClass
from comp2comp.models.models import Models
class HipSegmentation(InferenceClass):
"""Spine segmentation."""
def __init__(self, model_name):
super().__init__()
self.model_name = model_name
self.model = Models.model_from_name(model_name)
def __call__(self, inference_pipeline):
# inference_pipeline.dicom_series_path = self.input_path
self.output_dir = inference_pipeline.output_dir
self.output_dir_segmentations = os.path.join(self.output_dir, "segmentations/")
if not os.path.exists(self.output_dir_segmentations):
os.makedirs(self.output_dir_segmentations)
self.model_dir = inference_pipeline.model_dir
seg, mv = self.hip_seg(
os.path.join(self.output_dir_segmentations, "converted_dcm.nii.gz"),
self.output_dir_segmentations + "hip.nii.gz",
inference_pipeline.model_dir,
)
inference_pipeline.model = self.model
inference_pipeline.segmentation = seg
inference_pipeline.medical_volume = mv
return {}
def hip_seg(self, input_path: Union[str, Path], output_path: Union[str, Path], model_dir):
"""Run spine segmentation.
Args:
input_path (Union[str, Path]): Input path.
output_path (Union[str, Path]): Output path.
"""
print("Segmenting hip...")
st = time()
os.environ["SCRATCH"] = self.model_dir
# Setup nnunet
model = "3d_fullres"
folds = [0]
trainer = "nnUNetTrainerV2_ep4000_nomirror"
crop_path = None
task_id = [254]
if self.model_name == "ts_hip":
setup_nnunet()
download_pretrained_weights(task_id[0])
else:
raise ValueError("Invalid model name.")
from totalsegmentator.nnunet import nnUNet_predict_image
with nostdout():
img, seg = nnUNet_predict_image(
input_path,
output_path,
task_id,
model=model,
folds=folds,
trainer=trainer,
tta=False,
multilabel_image=True,
resample=1.5,
crop=None,
crop_path=crop_path,
task_name="total",
nora_tag=None,
preview=False,
nr_threads_resampling=1,
nr_threads_saving=6,
quiet=False,
verbose=False,
test=0,
)
end = time()
# Log total time for hip segmentation
print(f"Total time for hip segmentation: {end-st:.2f}s.")
return seg, img
class HipComputeROIs(InferenceClass):
def __init__(self, hip_model):
super().__init__()
self.hip_model_name = hip_model
self.hip_model_type = Models.model_from_name(self.hip_model_name)
def __call__(self, inference_pipeline):
segmentation = inference_pipeline.segmentation
medical_volume = inference_pipeline.medical_volume
model = inference_pipeline.model
images_folder = os.path.join(inference_pipeline.output_dir, "dev")
results_dict = hip_utils.compute_rois(medical_volume, segmentation, model, images_folder)
inference_pipeline.femur_results_dict = results_dict
return {}
class HipMetricsSaver(InferenceClass):
"""Save metrics to a CSV file."""
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
metrics_output_dir = os.path.join(inference_pipeline.output_dir, "metrics")
if not os.path.exists(metrics_output_dir):
os.makedirs(metrics_output_dir)
results_dict = inference_pipeline.femur_results_dict
left_head_hu = results_dict["left_head"]["hu"]
right_head_hu = results_dict["right_head"]["hu"]
left_intertrochanter_hu = results_dict["left_intertrochanter"]["hu"]
right_intertrochanter_hu = results_dict["right_intertrochanter"]["hu"]
left_neck_hu = results_dict["left_neck"]["hu"]
right_neck_hu = results_dict["right_neck"]["hu"]
# save to csv
df = pd.DataFrame(
{
"Left Head (HU)": [left_head_hu],
"Right Head (HU)": [right_head_hu],
"Left Intertrochanter (HU)": [left_intertrochanter_hu],
"Right Intertrochanter (HU)": [right_intertrochanter_hu],
"Left Neck (HU)": [left_neck_hu],
"Right Neck (HU)": [right_neck_hu],
}
)
df.to_csv(os.path.join(metrics_output_dir, "hip_metrics.csv"), index=False)
return {}
class HipVisualizer(InferenceClass):
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
medical_volume = inference_pipeline.medical_volume
left_head_roi = inference_pipeline.femur_results_dict["left_head"]["roi"]
left_head_centroid = inference_pipeline.femur_results_dict["left_head"]["centroid"]
left_head_hu = inference_pipeline.femur_results_dict["left_head"]["hu"]
left_intertrochanter_roi = inference_pipeline.femur_results_dict["left_intertrochanter"][
"roi"
]
left_intertrochanter_centroid = inference_pipeline.femur_results_dict[
"left_intertrochanter"
]["centroid"]
left_intertrochanter_hu = inference_pipeline.femur_results_dict["left_intertrochanter"][
"hu"
]
left_neck_roi = inference_pipeline.femur_results_dict["left_neck"]["roi"]
left_neck_centroid = inference_pipeline.femur_results_dict["left_neck"]["centroid"]
left_neck_hu = inference_pipeline.femur_results_dict["left_neck"]["hu"]
right_head_roi = inference_pipeline.femur_results_dict["right_head"]["roi"]
right_head_centroid = inference_pipeline.femur_results_dict["right_head"]["centroid"]
right_head_hu = inference_pipeline.femur_results_dict["right_head"]["hu"]
right_intertrochanter_roi = inference_pipeline.femur_results_dict["right_intertrochanter"][
"roi"
]
right_intertrochanter_centroid = inference_pipeline.femur_results_dict[
"right_intertrochanter"
]["centroid"]
right_intertrochanter_hu = inference_pipeline.femur_results_dict["right_intertrochanter"][
"hu"
]
right_neck_roi = inference_pipeline.femur_results_dict["right_neck"]["roi"]
right_neck_centroid = inference_pipeline.femur_results_dict["right_neck"]["centroid"]
right_neck_hu = inference_pipeline.femur_results_dict["right_neck"]["hu"]
output_dir = inference_pipeline.output_dir
images_output_dir = os.path.join(output_dir, "images")
if not os.path.exists(images_output_dir):
os.makedirs(images_output_dir)
hip_roi_visualizer(
medical_volume,
left_head_roi,
left_head_centroid,
left_head_hu,
images_output_dir,
"left_head",
)
hip_roi_visualizer(
medical_volume,
left_intertrochanter_roi,
left_intertrochanter_centroid,
left_intertrochanter_hu,
images_output_dir,
"left_intertrochanter",
)
hip_roi_visualizer(
medical_volume,
left_neck_roi,
left_neck_centroid,
left_neck_hu,
images_output_dir,
"left_neck",
)
hip_roi_visualizer(
medical_volume,
right_head_roi,
right_head_centroid,
right_head_hu,
images_output_dir,
"right_head",
)
hip_roi_visualizer(
medical_volume,
right_intertrochanter_roi,
right_intertrochanter_centroid,
right_intertrochanter_hu,
images_output_dir,
"right_intertrochanter",
)
hip_roi_visualizer(
medical_volume,
right_neck_roi,
right_neck_centroid,
right_neck_hu,
images_output_dir,
"right_neck",
)
hip_report_visualizer(
medical_volume.get_fdata(),
left_head_roi + right_head_roi,
[left_head_centroid, right_head_centroid],
images_output_dir,
"head",
{"Left Head HU": round(left_head_hu), "Right Head HU": round(right_head_hu)},
)
hip_report_visualizer(
medical_volume.get_fdata(),
left_intertrochanter_roi + right_intertrochanter_roi,
[left_intertrochanter_centroid, right_intertrochanter_centroid],
images_output_dir,
"intertrochanter",
{
"Left Intertrochanter HU": round(left_intertrochanter_hu),
"Right Intertrochanter HU": round(right_intertrochanter_hu),
},
)
hip_report_visualizer(
medical_volume.get_fdata(),
left_neck_roi + right_neck_roi,
[left_neck_centroid, right_neck_centroid],
images_output_dir,
"neck",
{"Left Neck HU": round(left_neck_hu), "Right Neck HU": round(right_neck_hu)},
)
return {}
| 9,838 | 33.522807 | 99 | py |
Comp2Comp | Comp2Comp-master/comp2comp/hip/hip_visualization.py | """
@author: louisblankemeier
"""
import os
import numpy as np
from scipy.ndimage import zoom
from comp2comp.visualization.detectron_visualizer import Visualizer
from comp2comp.visualization.linear_planar_reformation import (
linear_planar_reformation,
)
def method_visualizer(
sagittal_image,
axial_image,
axial_slice,
sagittal_slice,
center_sagittal,
radius_sagittal,
center_axial,
radius_axial,
output_dir,
anatomy,
):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
axial_image = np.clip(axial_image, -300, 1800)
axial_image = normalize_img(axial_image) * 255.0
sagittal_image = np.clip(sagittal_image, -300, 1800)
sagittal_image = normalize_img(sagittal_image) * 255.0
sagittal_image = sagittal_image.reshape((sagittal_image.shape[0], sagittal_image.shape[1], 1))
img_rgb = np.tile(sagittal_image, (1, 1, 3))
vis = Visualizer(img_rgb)
vis.draw_circle(circle_coord=center_sagittal, color=[0, 1, 0], radius=radius_sagittal)
vis.draw_binary_mask(sagittal_slice)
vis_obj = vis.get_output()
vis_obj.save(os.path.join(output_dir, f"{anatomy}_sagittal_method.png"))
axial_image = axial_image.reshape((axial_image.shape[0], axial_image.shape[1], 1))
img_rgb = np.tile(axial_image, (1, 1, 3))
vis = Visualizer(img_rgb)
vis.draw_circle(circle_coord=center_axial, color=[0, 1, 0], radius=radius_axial)
vis.draw_binary_mask(axial_slice)
vis_obj = vis.get_output()
vis_obj.save(os.path.join(output_dir, f"{anatomy}_axial_method.png"))
def hip_roi_visualizer(
medical_volume,
roi,
centroid,
hu,
output_dir,
anatomy,
):
zooms = medical_volume.header.get_zooms()
zoom_factor = zooms[2] / zooms[1]
sagittal_image = medical_volume.get_fdata()[centroid[0], :, :]
sagittal_roi = roi[centroid[0], :, :]
sagittal_image = zoom(sagittal_image, (1, zoom_factor), order=1).round()
sagittal_roi = zoom(sagittal_roi, (1, zoom_factor), order=3).round()
sagittal_image = np.flip(sagittal_image.T)
sagittal_roi = np.flip(sagittal_roi.T)
axial_image = medical_volume.get_fdata()[:, :, round(centroid[2])]
axial_roi = roi[:, :, round(centroid[2])]
axial_image = np.flip(axial_image.T)
axial_roi = np.flip(axial_roi.T)
_ROI_COLOR = np.array([1.000, 0.340, 0.200])
sagittal_image = np.clip(sagittal_image, -300, 1800)
sagittal_image = normalize_img(sagittal_image) * 255.0
sagittal_image = sagittal_image.reshape((sagittal_image.shape[0], sagittal_image.shape[1], 1))
img_rgb = np.tile(sagittal_image, (1, 1, 3))
vis = Visualizer(img_rgb)
vis.draw_binary_mask(
sagittal_roi, color=_ROI_COLOR, edge_color=_ROI_COLOR, alpha=0.0, area_threshold=0
)
vis.draw_text(
text=f"Mean HU: {round(hu)}",
position=(412, 10),
color=_ROI_COLOR,
font_size=9,
horizontal_alignment="left",
)
vis_obj = vis.get_output()
vis_obj.save(os.path.join(output_dir, f"{anatomy}_hip_roi_sagittal.png"))
"""
axial_image = np.clip(axial_image, -300, 1800)
axial_image = normalize_img(axial_image) * 255.0
axial_image = axial_image.reshape((axial_image.shape[0], axial_image.shape[1], 1))
img_rgb = np.tile(axial_image, (1, 1, 3))
vis = Visualizer(img_rgb)
vis.draw_binary_mask(
axial_roi, color=_ROI_COLOR, edge_color=_ROI_COLOR, alpha=0.0, area_threshold=0
)
vis.draw_text(
text=f"Mean HU: {round(hu)}",
position=(412, 10),
color=_ROI_COLOR,
font_size=9,
horizontal_alignment="left",
)
vis_obj = vis.get_output()
vis_obj.save(os.path.join(output_dir, f"{anatomy}_hip_roi_axial.png"))
"""
def hip_report_visualizer(medical_volume, roi, centroids, output_dir, anatomy, labels):
_ROI_COLOR = np.array([1.000, 0.340, 0.200])
image, mask = linear_planar_reformation(medical_volume, roi, centroids, dimension="axial")
# add 3rd dim to image
image = np.flip(image.T)
mask = np.flip(mask.T)
mask[mask > 1] = 1
# mask = np.expand_dims(mask, axis=2)
image = np.expand_dims(image, axis=2)
image = np.clip(image, -300, 1800)
image = normalize_img(image) * 255.0
img_rgb = np.tile(image, (1, 1, 3))
vis = Visualizer(img_rgb)
vis.draw_binary_mask(mask, color=_ROI_COLOR, edge_color=_ROI_COLOR, alpha=0.0, area_threshold=0)
pos_idx = 0
for key, value in labels.items():
vis.draw_text(
text=f"{key}: {value}",
position=(310, 10 + pos_idx * 17),
color=_ROI_COLOR,
font_size=9,
horizontal_alignment="left",
)
pos_idx += 1
vis_obj = vis.get_output()
vis_obj.save(os.path.join(output_dir, f"{anatomy}_report_axial.png"))
def normalize_img(img: np.ndarray) -> np.ndarray:
"""Normalize the image.
Args:
img (np.ndarray): Input image.
Returns:
np.ndarray: Normalized image.
"""
return (img - img.min()) / (img.max() - img.min())
| 5,065 | 31.063291 | 100 | py |
Comp2Comp | Comp2Comp-master/comp2comp/liver_spleen_pancreas/visualization_utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy
from matplotlib.colors import ListedColormap
from PIL import Image
def extract_axial_mid_slice(ct, mask, crop=True):
slice_idx = np.argmax(mask.sum(axis=(0, 1)))
ct_slice_z = np.transpose(ct[:, :, slice_idx], axes=(1, 0))
mask_slice_z = np.transpose(mask[:, :, slice_idx], axes=(1, 0))
ct_slice_z = np.flip(ct_slice_z, axis=(0, 1))
mask_slice_z = np.flip(mask_slice_z, axis=(0, 1))
if crop:
ct_range_x = np.where(ct_slice_z.max(axis=0) > -200)[0][[0, -1]]
ct_slice_z = ct_slice_z[ct_range_x[0] : ct_range_x[1], ct_range_x[0] : ct_range_x[1]]
mask_slice_z = mask_slice_z[ct_range_x[0] : ct_range_x[1], ct_range_x[0] : ct_range_x[1]]
return ct_slice_z, mask_slice_z
def extract_coronal_mid_slice(ct, mask, crop=True):
# find the slice with max coherent extent of the organ
coronary_extent = np.where(mask.sum(axis=(0, 2)))[0]
max_extent = 0
max_extent_idx = 0
for idx in coronary_extent:
label, num_features = scipy.ndimage.label(mask[:, idx, :])
if num_features > 1:
continue
else:
extent = len(np.where(label.sum(axis=1))[0])
if extent > max_extent:
max_extent = extent
max_extent_idx = idx
ct_slice_y = np.transpose(ct[:, max_extent_idx, :], axes=(1, 0))
mask_slice_y = np.transpose(mask[:, max_extent_idx, :], axes=(1, 0))
ct_slice_y = np.flip(ct_slice_y, axis=1)
mask_slice_y = np.flip(mask_slice_y, axis=1)
return ct_slice_y, mask_slice_y
def save_slice(
ct_slice,
mask_slice,
path,
figsize=(12, 12),
corner_text=None,
unit_dict=None,
aspect=1,
show=False,
xy_placement=None,
class_color=1,
fontsize=14,
):
# colormap for shown segmentations
color_array = plt.get_cmap("tab10")(range(10))
color_array = np.concatenate((np.array([[0, 0, 0, 0]]), color_array[:, :]), axis=0)
map_object_seg = ListedColormap(name="segmentation_cmap", colors=color_array)
fig, axx = plt.subplots(1, figsize=figsize, frameon=False)
axx.imshow(
ct_slice,
cmap="gray",
vmin=-400,
vmax=400,
interpolation="spline36",
aspect=aspect,
origin="lower",
)
axx.imshow(
mask_slice * class_color,
cmap=map_object_seg,
vmin=0,
vmax=9,
alpha=0.2,
interpolation="nearest",
aspect=aspect,
origin="lower",
)
plt.axis("off")
axx.axes.get_xaxis().set_visible(False)
axx.axes.get_yaxis().set_visible(False)
y_size, x_size = ct_slice.shape
if corner_text is not None:
bbox_props = dict(boxstyle="round", facecolor="gray", alpha=0.5)
texts = []
for k, v in corner_text.items():
if isinstance(v, str):
texts.append("{:<9}{}".format(k + ":", v))
else:
unit = unit_dict[k] if k in unit_dict else ""
texts.append("{:<9}{:.0f} {}".format(k + ":", v, unit))
if xy_placement is None:
# get the extent of textbox, remove, and the plot again with correct position
t = axx.text(
0.5,
0.5,
"\n".join(texts),
color="white",
transform=axx.transAxes,
fontsize=fontsize,
family="monospace",
bbox=bbox_props,
va="top",
ha="left",
)
xmin, xmax = t.get_window_extent().xmin, t.get_window_extent().xmax
xmin, xmax = axx.transAxes.inverted().transform((xmin, xmax))
xy_placement = [1 - (xmax - xmin) - (xmax - xmin) * 0.09, 0.975]
t.remove()
axx.text(
xy_placement[0],
xy_placement[1],
"\n".join(texts),
color="white",
transform=axx.transAxes,
fontsize=fontsize,
family="monospace",
bbox=bbox_props,
va="top",
ha="left",
)
if show:
plt.show()
else:
fig.savefig(path, bbox_inches="tight", pad_inches=0)
plt.close(fig)
def slicedDilationOrErosion(input_mask, num_iteration, operation):
"""
Perform the dilation on the smallest slice that will fit the
segmentation
"""
margin = 2 if num_iteration is None else num_iteration + 1
# find the minimum volume enclosing the organ
x_idx = np.where(input_mask.sum(axis=(1, 2)))[0]
x_start, x_end = x_idx[0] - margin, x_idx[-1] + margin
y_idx = np.where(input_mask.sum(axis=(0, 2)))[0]
y_start, y_end = y_idx[0] - margin, y_idx[-1] + margin
z_idx = np.where(input_mask.sum(axis=(0, 1)))[0]
z_start, z_end = z_idx[0] - margin, z_idx[-1] + margin
struct = scipy.ndimage.generate_binary_structure(3, 1)
struct = scipy.ndimage.iterate_structure(struct, num_iteration)
if operation == "dilate":
mask_slice = scipy.ndimage.binary_dilation(
input_mask[x_start:x_end, y_start:y_end, z_start:z_end], structure=struct
).astype(np.int8)
elif operation == "erode":
mask_slice = scipy.ndimage.binary_erosion(
input_mask[x_start:x_end, y_start:y_end, z_start:z_end], structure=struct
).astype(np.int8)
output_mask = input_mask.copy()
output_mask[x_start:x_end, y_start:y_end, z_start:z_end] = mask_slice
return output_mask
def extract_organ_metrics(ct, all_masks, class_num=None, vol_per_pixel=None, erode_mask=True):
if erode_mask:
eroded_mask = slicedDilationOrErosion(
input_mask=(all_masks == class_num), num_iteration=3, operation="erode"
)
ct_organ_vals = ct[eroded_mask == 1]
else:
ct_organ_vals = ct[all_masks == class_num]
results = {}
# in ml
organ_vol = (all_masks == class_num).sum() * vol_per_pixel
organ_mean = ct_organ_vals.mean()
organ_median = np.median(ct_organ_vals)
results = {
"Organ": class_map_part_organs[class_num],
"Volume": organ_vol,
"Mean": organ_mean,
"Median": organ_median,
}
return results
def generate_slice_images(
ct, all_masks, class_nums, unit_dict, vol_per_pixel, pix_dims, root, fontsize=20, show=False
):
all_results = {}
colors = [1, 3, 4]
for i, c_num in enumerate(class_nums):
organ_name = class_map_part_organs[c_num]
axial_path = os.path.join(root, organ_name.lower() + "_axial.png")
coronal_path = os.path.join(root, organ_name.lower() + "_coronal.png")
ct_slice_z, liver_slice_z = extract_axial_mid_slice(ct, all_masks == c_num)
results = extract_organ_metrics(ct, all_masks, class_num=c_num, vol_per_pixel=vol_per_pixel)
save_slice(
ct_slice_z,
liver_slice_z,
axial_path,
figsize=(12, 12),
corner_text=results,
unit_dict=unit_dict,
class_color=colors[i],
fontsize=fontsize,
show=show,
)
ct_slice_y, liver_slice_y = extract_coronal_mid_slice(ct, all_masks == c_num)
save_slice(
ct_slice_y,
liver_slice_y,
coronal_path,
figsize=(12, 12),
aspect=pix_dims[2] / pix_dims[1],
show=show,
class_color=colors[i],
)
all_results[results["Organ"]] = results
if show:
return
return all_results
def generate_liver_spleen_pancreas_report(root, organ_names):
axial_imgs = [Image.open(os.path.join(root, organ + "_axial.png")) for organ in organ_names]
coronal_imgs = [Image.open(os.path.join(root, organ + "_coronal.png")) for organ in organ_names]
result_width = max(
sum([img.size[0] for img in axial_imgs]), sum([img.size[0] for img in coronal_imgs])
)
result_height = max([a.size[1] + c.size[1] for a, c in zip(axial_imgs, coronal_imgs)])
result = Image.new("RGB", (result_width, result_height))
total_width = 0
for a_img, c_img in zip(axial_imgs, coronal_imgs):
a_width, a_height = a_img.size
c_width, c_height = c_img.size
translate = (a_width - c_width) // 2 if a_width > c_width else 0
result.paste(im=a_img, box=(total_width, 0))
result.paste(im=c_img, box=(translate + total_width, a_height))
total_width += a_width
result.save(os.path.join(root, "liver_spleen_pancreas_report.png"))
# from https://github.com/wasserth/TotalSegmentator/blob/master/totalsegmentator/map_to_binary.py
class_map_part_organs = {
1: "Spleen",
2: "Right Kidney",
3: "Left Kidney",
4: "Gallbladder",
5: "Liver",
6: "Stomach",
7: "Aorta",
8: "Inferior vena cava",
9: "portal Vein and Splenic Vein",
10: "Pancreas",
11: "Right Adrenal Gland",
12: "Left Adrenal Gland Left",
13: "lung_upper_lobe_left",
14: "lung_lower_lobe_left",
15: "lung_upper_lobe_right",
16: "lung_middle_lobe_right",
17: "lung_lower_lobe_right",
}
| 9,235 | 28.227848 | 100 | py |
Comp2Comp | Comp2Comp-master/comp2comp/liver_spleen_pancreas/visualization.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
from comp2comp.inference_class_base import InferenceClass
from comp2comp.liver_spleen_pancreas.visualization_utils import (
generate_liver_spleen_pancreas_report,
generate_slice_images,
)
class LiverSpleenPancreasVisualizer(InferenceClass):
def __init__(self):
super().__init__()
self.unit_dict = {
"Volume": r"$\mathregular{cm^3}$",
"Mean": "HU",
"Median": "HU",
}
self.class_nums = [1, 5, 10]
self.organ_names = ["liver", "spleen", "pancreas"]
def __call__(self, inference_pipeline):
self.output_dir = inference_pipeline.output_dir
self.output_dir_images_organs = os.path.join(self.output_dir, "images/")
inference_pipeline.output_dir_images_organs_organs_organs = self.output_dir_images_organs
if not os.path.exists(self.output_dir_images_organs):
os.makedirs(self.output_dir_images_organs)
inference_pipeline.medical_volume_arr = np.flip(
inference_pipeline.medical_volume.get_fdata(), axis=1
)
inference_pipeline.segmentation_arr = np.flip(
inference_pipeline.segmentation.get_fdata(), axis=1
)
inference_pipeline.pix_dims = inference_pipeline.medical_volume.header["pixdim"][1:4]
inference_pipeline.vol_per_pixel = np.prod(
inference_pipeline.pix_dims / 10
) # mm to cm for having ml/pixel.
self.organ_metrics = generate_slice_images(
inference_pipeline.medical_volume_arr,
inference_pipeline.segmentation_arr,
self.class_nums,
self.unit_dict,
inference_pipeline.vol_per_pixel,
inference_pipeline.pix_dims,
self.output_dir_images_organs,
fontsize=24,
)
inference_pipeline.organ_metrics = self.organ_metrics
generate_liver_spleen_pancreas_report(self.output_dir_images_organs, self.organ_names)
return {}
class LiverSpleenPancreasMetricsPrinter(InferenceClass):
def __init__(self):
super().__init__()
def __call__(self, inference_pipeline):
results = inference_pipeline.organ_metrics
organs = list(results.keys())
name_dist = max([len(o) for o in organs])
metrics = []
for k in results[list(results.keys())[0]].keys():
if k != "Organ":
metrics.append(k)
units = ["cm^3", "HU", "HU"]
header = "{:<" + str(name_dist + 4) + "}" + ("{:<" + str(15) + "}") * len(metrics)
header = header.format("Organ", *[m + "(" + u + ")" for m, u in zip(metrics, units)])
base_print = "{:<" + str(name_dist + 4) + "}" + ("{:<" + str(15) + ".0f}") * len(metrics)
print("\n")
print(header)
for organ in results.values():
line = base_print.format(*organ.values())
print(line)
print("\n")
output_dir = inference_pipeline.output_dir
self.output_dir_metrics_organs = os.path.join(output_dir, "metrics/")
if not os.path.exists(self.output_dir_metrics_organs):
os.makedirs(self.output_dir_metrics_organs)
header = ",".join(["Organ"] + [m + "(" + u + ")" for m, u in zip(metrics, units)]) + "\n"
with open(
os.path.join(self.output_dir_metrics_organs, "liver_spleen_pancreas_metrics.csv"), "w"
) as f:
f.write(header)
for organ in results.values():
line = ",".join([str(v) for v in organ.values()]) + "\n"
f.write(line)
return {}
| 3,687 | 31.069565 | 98 | py |
Comp2Comp | Comp2Comp-master/comp2comp/liver_spleen_pancreas/liver_spleen_pancreas.py | import os
from pathlib import Path
from time import time
from typing import Union
from totalsegmentator.libs import (
download_pretrained_weights,
nostdout,
setup_nnunet,
)
from comp2comp.inference_class_base import InferenceClass
class LiverSpleenPancreasSegmentation(InferenceClass):
"""Organ segmentation."""
def __init__(self):
super().__init__()
# self.input_path = input_path
def __call__(self, inference_pipeline):
# inference_pipeline.dicom_series_path = self.input_path
self.output_dir = inference_pipeline.output_dir
self.output_dir_segmentations = os.path.join(self.output_dir, "segmentations/")
if not os.path.exists(self.output_dir_segmentations):
os.makedirs(self.output_dir_segmentations)
self.model_dir = inference_pipeline.model_dir
mv, seg = self.organ_seg(
os.path.join(self.output_dir_segmentations, "converted_dcm.nii.gz"),
self.output_dir_segmentations + "organs.nii.gz",
inference_pipeline.model_dir,
)
inference_pipeline.segmentation = seg
inference_pipeline.medical_volume = mv
return {}
def organ_seg(self, input_path: Union[str, Path], output_path: Union[str, Path], model_dir):
"""Run organ segmentation.
Args:
input_path (Union[str, Path]): Input path.
output_path (Union[str, Path]): Output path.
"""
print("Segmenting organs...")
st = time()
os.environ["SCRATCH"] = self.model_dir
# Setup nnunet
model = "3d_fullres"
folds = [0]
trainer = "nnUNetTrainerV2_ep4000_nomirror"
crop_path = None
task_id = [251]
setup_nnunet()
download_pretrained_weights(task_id[0])
from totalsegmentator.nnunet import nnUNet_predict_image
with nostdout():
seg, mvs = nnUNet_predict_image(
input_path,
output_path,
task_id,
model=model,
folds=folds,
trainer=trainer,
tta=False,
multilabel_image=True,
resample=1.5,
crop=None,
crop_path=crop_path,
task_name="total",
nora_tag="None",
preview=False,
nr_threads_resampling=1,
nr_threads_saving=6,
quiet=False,
verbose=True,
test=0,
)
end = time()
# Log total time for spine segmentation
print(f"Total time for organ segmentation: {end-st:.2f}s.")
return seg, mvs
| 2,723 | 27.673684 | 96 | py |
Comp2Comp | Comp2Comp-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'comp2comp'
copyright = '2023, StanfordMIMI'
author = 'StanfordMIMI'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
# Adapted from https://github.com/pyvoxel/pyvoxel
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinxcontrib.bibtex",
"sphinx_rtd_theme",
"sphinx.ext.githubpages",
"m2r2",
]
autosummary_generate = True
autosummary_imported_members = True
bibtex_bibfiles = ["references.bib"]
templates_path = ['_templates']
exclude_patterns = []
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
htmlhelp_basename = "Comp2Compdoc"
html_static_path = ["_static"]
intersphinx_mapping = {"numpy": ("https://numpy.org/doc/stable/", None)}
html_theme_options = {"navigation_depth": 2}
source_suffix = [".rst", ".md"]
todo_include_todos = True
napoleon_use_ivar = True
napoleon_google_docstring = True
html_show_sourcelink = False
| 1,598 | 26.568966 | 85 | py |
g2p | g2p-master/setup.py | #from distutils.core import setup
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'g2p_en',
packages = ['g2p_en'], # this must be the same as the name above
version = '2.0.0',
description = 'A Simple Python Module for English Grapheme To Phoneme Conversion',
long_description=long_description,
author = 'Kyubyong Park & Jongseok Kim',
author_email = 'kbpark.linguist@gmail.com',
url = 'https://github.com/Kyubyong/g2p', # use the URL to the github repo
download_url = 'https://github.com/Kyubyong/g2p/archive/1.0.0.tar.gz', # I'll explain this in a second
keywords = ['g2p','g2p_en'], # arbitrary keywords
classifiers = [],
install_requires = [
'numpy>=1.13.1',
'nltk>=3.2.4',
'inflect>=0.3.1',
'distance>=0.1.3',
],
license='Apache Software License',
include_package_data=True
)
| 1,058 | 30.147059 | 104 | py |
g2p | g2p-master/g2p_en/expand.py | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
Borrowed
from https://github.com/keithito/tacotron/blob/master/text/numbers.py
By kyubyong park. kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/g2p
'''
from __future__ import print_function
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| 2,491 | 30.15 | 99 | py |
g2p | g2p-master/g2p_en/__init__.py | from .g2p import G2p
| 21 | 10 | 20 | py |
g2p | g2p-master/g2p_en/g2p.py | # -*- coding: utf-8 -*-
# /usr/bin/python
'''
By kyubyong park(kbpark.linguist@gmail.com) and Jongseok Kim(https://github.com/ozmig77)
https://www.github.com/kyubyong/g2p
'''
from nltk import pos_tag
from nltk.corpus import cmudict
import nltk
from nltk.tokenize import TweetTokenizer
word_tokenize = TweetTokenizer().tokenize
import numpy as np
import codecs
import re
import os
import unicodedata
from builtins import str as unicode
from .expand import normalize_numbers
try:
nltk.data.find('taggers/averaged_perceptron_tagger.zip')
except LookupError:
nltk.download('averaged_perceptron_tagger')
try:
nltk.data.find('corpora/cmudict.zip')
except LookupError:
nltk.download('cmudict')
dirname = os.path.dirname(__file__)
def construct_homograph_dictionary():
f = os.path.join(dirname,'homographs.en')
homograph2features = dict()
for line in codecs.open(f, 'r', 'utf8').read().splitlines():
if line.startswith("#"): continue # comment
headword, pron1, pron2, pos1 = line.strip().split("|")
homograph2features[headword.lower()] = (pron1.split(), pron2.split(), pos1)
return homograph2features
# def segment(text):
# '''
# Splits text into `tokens`.
# :param text: A string.
# :return: A list of tokens (string).
# '''
# print(text)
# text = re.sub('([.,?!]( |$))', r' \1', text)
# print(text)
# return text.split()
class G2p(object):
def __init__(self):
super().__init__()
self.graphemes = ["<pad>", "<unk>", "</s>"] + list("abcdefghijklmnopqrstuvwxyz")
self.phonemes = ["<pad>", "<unk>", "<s>", "</s>"] + ['AA0', 'AA1', 'AA2', 'AE0', 'AE1', 'AE2', 'AH0', 'AH1', 'AH2', 'AO0',
'AO1', 'AO2', 'AW0', 'AW1', 'AW2', 'AY0', 'AY1', 'AY2', 'B', 'CH', 'D', 'DH',
'EH0', 'EH1', 'EH2', 'ER0', 'ER1', 'ER2', 'EY0', 'EY1',
'EY2', 'F', 'G', 'HH',
'IH0', 'IH1', 'IH2', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L',
'M', 'N', 'NG', 'OW0', 'OW1',
'OW2', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH',
'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH']
self.g2idx = {g: idx for idx, g in enumerate(self.graphemes)}
self.idx2g = {idx: g for idx, g in enumerate(self.graphemes)}
self.p2idx = {p: idx for idx, p in enumerate(self.phonemes)}
self.idx2p = {idx: p for idx, p in enumerate(self.phonemes)}
self.cmu = cmudict.dict()
self.load_variables()
self.homograph2features = construct_homograph_dictionary()
def load_variables(self):
self.variables = np.load(os.path.join(dirname,'checkpoint20.npz'))
self.enc_emb = self.variables["enc_emb"] # (29, 64). (len(graphemes), emb)
self.enc_w_ih = self.variables["enc_w_ih"] # (3*128, 64)
self.enc_w_hh = self.variables["enc_w_hh"] # (3*128, 128)
self.enc_b_ih = self.variables["enc_b_ih"] # (3*128,)
self.enc_b_hh = self.variables["enc_b_hh"] # (3*128,)
self.dec_emb = self.variables["dec_emb"] # (74, 64). (len(phonemes), emb)
self.dec_w_ih = self.variables["dec_w_ih"] # (3*128, 64)
self.dec_w_hh = self.variables["dec_w_hh"] # (3*128, 128)
self.dec_b_ih = self.variables["dec_b_ih"] # (3*128,)
self.dec_b_hh = self.variables["dec_b_hh"] # (3*128,)
self.fc_w = self.variables["fc_w"] # (74, 128)
self.fc_b = self.variables["fc_b"] # (74,)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def grucell(self, x, h, w_ih, w_hh, b_ih, b_hh):
rzn_ih = np.matmul(x, w_ih.T) + b_ih
rzn_hh = np.matmul(h, w_hh.T) + b_hh
rz_ih, n_ih = rzn_ih[:, :rzn_ih.shape[-1] * 2 // 3], rzn_ih[:, rzn_ih.shape[-1] * 2 // 3:]
rz_hh, n_hh = rzn_hh[:, :rzn_hh.shape[-1] * 2 // 3], rzn_hh[:, rzn_hh.shape[-1] * 2 // 3:]
rz = self.sigmoid(rz_ih + rz_hh)
r, z = np.split(rz, 2, -1)
n = np.tanh(n_ih + r * n_hh)
h = (1 - z) * n + z * h
return h
def gru(self, x, steps, w_ih, w_hh, b_ih, b_hh, h0=None):
if h0 is None:
h0 = np.zeros((x.shape[0], w_hh.shape[1]), np.float32)
h = h0 # initial hidden state
outputs = np.zeros((x.shape[0], steps, w_hh.shape[1]), np.float32)
for t in range(steps):
h = self.grucell(x[:, t, :], h, w_ih, w_hh, b_ih, b_hh) # (b, h)
outputs[:, t, ::] = h
return outputs
def encode(self, word):
chars = list(word) + ["</s>"]
x = [self.g2idx.get(char, self.g2idx["<unk>"]) for char in chars]
x = np.take(self.enc_emb, np.expand_dims(x, 0), axis=0)
return x
def predict(self, word):
# encoder
enc = self.encode(word)
enc = self.gru(enc, len(word) + 1, self.enc_w_ih, self.enc_w_hh,
self.enc_b_ih, self.enc_b_hh, h0=np.zeros((1, self.enc_w_hh.shape[-1]), np.float32))
last_hidden = enc[:, -1, :]
# decoder
dec = np.take(self.dec_emb, [2], axis=0) # 2: <s>
h = last_hidden
preds = []
for i in range(20):
h = self.grucell(dec, h, self.dec_w_ih, self.dec_w_hh, self.dec_b_ih, self.dec_b_hh) # (b, h)
logits = np.matmul(h, self.fc_w.T) + self.fc_b
pred = logits.argmax()
if pred == 3: break # 3: </s>
preds.append(pred)
dec = np.take(self.dec_emb, [pred], axis=0)
preds = [self.idx2p.get(idx, "<unk>") for idx in preds]
return preds
def __call__(self, text):
# preprocessing
text = unicode(text)
text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn') # Strip accents
text = text.lower()
text = re.sub("[^ a-z'.,?!\-]", "", text)
text = text.replace("i.e.", "that is")
text = text.replace("e.g.", "for example")
# tokenization
words = word_tokenize(text)
tokens = pos_tag(words) # tuples of (word, tag)
# steps
prons = []
for word, pos in tokens:
if re.search("[a-z]", word) is None:
pron = [word]
elif word in self.homograph2features: # Check homograph
pron1, pron2, pos1 = self.homograph2features[word]
if pos.startswith(pos1):
pron = pron1
else:
pron = pron2
elif word in self.cmu: # lookup CMU dict
pron = self.cmu[word][0]
else: # predict for oov
pron = self.predict(word)
prons.extend(pron)
prons.extend([" "])
return prons[:-1]
if __name__ == '__main__':
texts = ["I have $250 in my pocket.", # number -> spell-out
"popular pets, e.g. cats and dogs", # e.g. -> for example
"I refuse to collect the refuse around here.", # homograph
"I'm an activationist."] # newly coined word
g2p = G2p()
for text in texts:
out = g2p(text)
print(out)
| 7,595 | 37.953846 | 138 | py |
spark-jobserver | spark-jobserver-master/job-server-python/__init__.py | 0 | 0 | 0 | py | |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/setup.py | from setuptools import setup, find_packages
import os
setup(
name="spark-jobserver-python",
version=os.getenv("SJS_VERSION", "NO_ENV"),
description=("The python modules required to "
"support PySpark jobs in Spark Job Server"),
url="https://github.com/spark-jobserver/spark-jobserver",
license="Apache License 2.0",
packages=find_packages(exclude=["test*", "example*"]),
install_requires=["pyhocon", "py4j"]
)
| 456 | 31.642857 | 61 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/setup-examples.py | from setuptools import setup, find_packages
import os
setup(
name='sjs-python-examples',
version=os.getenv('SJS_VERSION', 'NO_ENV'),
description='Examples of jobs for Spark Job Server',
url='https://github.com/spark-jobserver/spark-jobserver',
license='Apache License 2.0',
packages=find_packages(exclude=['test*', 'sparkkjob*']),
install_requires=['pyhocon', 'py4j']
)
| 427 | 31.923077 | 65 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/sparkjobserver/subprocess.py | """
This module is a runnable program designed to be called from
a JVM process in order to execute a Python Spark-Job-Server job.
It should be executed using a single argument, which is the port
number of the Py4J gateway client which the JVM application should
start before calling this program as a subprocess.
The JVM gateway should include an endpoint method through which
this program can retrieve an object containing information about
the Job to be run. Since Python is not strongly typed, the endpoint
can be any type of JVM object.
The case class `spark.jobserver.python.JobEndpoint`
implements all the methods that this program expects an endpoint to have.
"""
from __future__ import print_function
import sys
import os
from importlib import import_module
from py4j.java_gateway import JavaGateway, java_import, \
GatewayClient, GatewayParameters
from pyhocon import ConfigFactory
from pyspark.context import SparkContext, SparkConf
from pyspark.sql import SQLContext, HiveContext, SparkSession
from sparkjobserver.api import ValidationProblem, JobEnvironment
import traceback
def exit_with_failure(message, exit_code=1):
"""
Terminate the process with a specific message and error code
:param message: The message to write to stderr
:param exitCode: The exit code with which to terminate
:return: N/A, the process terminates when this method is called
"""
print(message, file=sys.stderr)
sys.exit(exit_code)
def import_class(cls):
"""
Import a python class where its identity is not known until runtime.
:param cls: The fully qualified path of the class including module
prefixes, e.g. sparkjobserver.api.SparkJob
:return: The constructor for the class, as a function which can be
called to instantiate an instance.
"""
(module_name, class_name) = cls.rsplit('.', 1)
module = import_module(module_name)
c = getattr(module, class_name)
return c
if __name__ == "__main__":
port = int(sys.argv[1])
auth_token = sys.argv[2]
gateway_parameters = GatewayParameters(
port=port, auto_convert=True, auth_token=auth_token)
gateway = JavaGateway(
gateway_parameters=gateway_parameters, auto_convert=True)
entry_point = gateway.entry_point
imports = entry_point.getPy4JImports()
for i in imports:
java_import(gateway.jvm, i)
context_config =\
ConfigFactory.parse_string(entry_point.contextConfigAsHocon())
job_id = entry_point.jobId()
job_env = JobEnvironment(job_id, None, context_config)
job_config = ConfigFactory.parse_string(entry_point.jobConfigAsHocon())
job_class = import_class(entry_point.jobClass())
job = job_class()
jcontext = entry_point.context()
jspark_conf = entry_point.sparkConf()
spark_conf = SparkConf(_jconf=jspark_conf)
context_class = jcontext.contextType()
context = None
sc = None
if context_class == 'org.apache.spark.api.java.JavaSparkContext':
context = SparkContext(
gateway=gateway, jsc=jcontext, conf=spark_conf)
elif context_class == 'org.apache.spark.sql.SQLContext':
jsc = gateway.jvm.org.apache.spark.api.java.JavaSparkContext(
jcontext.sparkContext())
sc = SparkContext(gateway=gateway, jsc=jsc, conf=spark_conf)
ss = SparkSession(sc, jcontext.sparkSession())
context = SQLContext(sc, ss, jcontext)
elif context_class == 'org.apache.spark.sql.hive.HiveContext':
jsc = gateway.jvm.org.apache.spark.api.java.JavaSparkContext(
jcontext.sparkContext())
sc = SparkContext(gateway=gateway, jsc=jsc, conf=spark_conf)
context = HiveContext(sc, jcontext)
elif context_class == 'org.apache.spark.sql.SparkSession':
jsc = gateway.jvm.org.apache.spark.api.java.JavaSparkContext(
jcontext.sparkContext())
sc = SparkContext(gateway=gateway, jsc=jsc, conf=spark_conf)
context = SparkSession(sc, jcontext.spark())
else:
customContext = job.build_context(gateway, jcontext, spark_conf)
if customContext is not None:
context = customContext
else:
exit_with_failure(
"Expected JavaSparkContext, SQLContext "
"or HiveContext but received %s" % repr(context_class), 2)
package_path = os.environ.get("PACKAGEPATH", None)
if package_path and sc:
try:
sc.addPyFile(package_path)
except Exception as error:
exit_with_failure(
"Error while adding Python package to Spark Context: %s\n%s" %
(repr(error), traceback.format_exc()), 5)
try:
job_data = job.validate(context, None, job_config)
except Exception as error:
exit_with_failure(
"Error while calling 'validate': %s\n%s" %
(repr(error), traceback.format_exc()), 3)
if isinstance(job_data, list) and \
isinstance(job_data[0], ValidationProblem):
entry_point.setValidationProblems([p.problem for p in job_data])
exit_with_failure("Validation problems in job, exiting")
else:
try:
result = job.run_job(context, job_env, job_data)
except Exception as error:
exit_with_failure(
"Error while calling 'run_job': %s\n%s" %
(repr(error), traceback.format_exc()), 4)
entry_point.setResult(result)
| 5,450 | 39.679104 | 78 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/sparkjobserver/api.py | """
This module defines the interfaces for Python based
Spark Job Server Jobs. Due to Python's typing, jobs
do not need to inherit from these classes but they must
implement the relevant methods described in SparkJob.
"""
class SparkJob:
"""
The primary interface for Python jobs in SparkJob server.
A job implementation must implement `validate` and `run_job`.
`build_context` only needs to be implemented if using a custom
context.
"""
def __init__(self):
"""
All python jobs should have a zero-args constructor.
:return: an instance of this job
"""
pass
def validate(self, context, runtime, config):
"""
This method is called by the job server to allow jobs to validate their
input and reject invalid job requests.
:param context: the context to be used for the job. Could be a
SparkContext, SQLContext, HiveContext etc.
May be reused across jobs.
:param runtime: the JobEnvironment containing run time information
pertaining to the job and context.
:param config: the HOCON config object passed into the job request
:return: either JobData, which is parsed from config, or a list of
validation problems.
"""
raise NotImplementedError(
"Concrete implementations should override validate")
def run_job(self, context, runtime, data):
"""
Entry point for the execution of a job
:param context: the context to be used for the job.
SparkContext, SQLContext, HiveContext etc.
May be reused across jobs
:param runtime: the JobEnvironment containing run time information
pertaining to the job and context.
:param data: the JobData returned by the validate method
:return: the job result OR a list of ValidationProblem objects.
"""
raise NotImplementedError(
"Concrete implementations should override run_job")
def build_context(self, gateway, jvmContext, sparkConf):
"""
For custom context types, the Python job needs to implement this method
to provide a method for converting the jvm context into its Python
equivalent. For jobs designed to work with JavaSparkContext, SQLContext
and HiveContext it is not necessary to implement this method
since the subprocess can handle those out of the box.
:param gateway: The Py4J gateway object
:param jvmContext: the JVM context object to be converted
(usually wrapped) into a Python context object.
:param sparkConf: The python form of the SparkConf object
:return: Should return a python context object of the appropriate type.
Will return None if not overridden.
"""
return None
class ValidationProblem:
"""
If the validation stage of a job fails, it MUST return a list of this type
of object. This is how the main program differentiates validation problems
from valid job data.
"""
def __init__(self, problem):
"""
:param problem: A string describing the problem
:return: An instance of this class
"""
self.problem = problem
def build_problems(problems):
"""
A helper method for converting a list of string problems into instances
of the validation problem class. It is important to return a list of the
correct type since otherwise it cannot be differentiated from a list of
job data.
:param problems: a list of strings describing the problems
:return: list of ValidationProblems, one for each string in the input
"""
return [ValidationProblem(p) for p in problems]
class JobEnvironment:
"""
The analog of spark.jobserver.api.JobEnvironment in the JVM job API.
"""
def __init__(self, job_id, named_objects, context_config):
"""
:param job_id: identifier for this job, as a string
:param named_objects: NamedObjects not implemented, so should be None
:param context_config: the Hocon configuration of the current context
:return: an instance of this class
"""
self.jobId = job_id
self.namedObjects = named_objects
self.contextConfig = context_config
# NamedObjects not currently supported in Python,
# but below is a skeleton for a possible interface.
class NamedObject:
def __init__(self, obj, forceComputation, storage_level):
self.obj = obj
self.forceComputation = forceComputation
self.storage_level = storage_level
class NamedObjects:
def __init__(self):
pass
def get(self, name):
"""
Gets a named object (NObj) with the given name if it already exists
and is cached. If the NObj does not exist, None is returned.
Note that a previously-known name object could 'disappear' if it hasn't
been used for a while, because for example, the SparkContext
garbage-collects old cached RDDs.
:param name: the unique name of the NObj.
The uniqueness is scoped to the current SparkContext.
:return: the NamedObject with the given name.
"""
raise NotImplementedError(
"Concrete implementations should override get")
def get_or_else_create(self, name, obj_gen):
"""
Gets a named object (NObj) with the given name, or creates it if one
doesn't already exist.
If the given NObj has already been computed by another job and cached
in memory, this method will return a reference to the cached NObj.
If the NObj has never been computed, then the generator will be called
to compute it and the result will be cached and returned to the caller.
:param name: the unique name of the NObj.
The uniqueness is scoped to the current SparkContext.
:param obj_gen: a 0-ary function which will generate the NObj
if it doesn't already exist.
:return: the NamedObject with the given name.
"""
raise NotImplementedError(
"Concrete implementations should override get_or_else_create")
def update(self, name, obj_gen):
"""
Replaces an existing named object (NObj) with a given name with
a new object. If an old named object for the given name existed,
it is un-persisted (non-blocking) and destroyed.
:param name: The unique name of the object.
:param obj_gen: a 0-ary function which will be called to generate
the object.
:return: the NamedObject with the given name.
"""
raise NotImplementedError(
"Concrete implementations should override update")
def forget(self, name):
"""
Removes the named object with the given name, if one existed, from
the cache. Has no effect if no named object with this name exists.
The persister is not (!) asked to unpersist the object;
use destroy instead if that is desired.
:param name: the unique name of the object.
The uniqueness is scoped to the current SparkContext.
:return: nothing
"""
raise NotImplementedError(
"Concrete implementations should override forget")
def destroy(self, name):
"""
Destroys the named object with the given name, if one existed. The
reference to the object is removed from the cache and the persister
is asked asynchronously to unpersist the
object if it was found in the list of named objects.
Has no effect if no named object with this name is known to the cache.
:param name: the unique name of the object.
The uniqueness is scoped to the current SparkContext.
:return: nothing
"""
raise NotImplementedError(
"Concrete implementations should override destroy")
def get_names(self):
"""
Returns the names of all named object that are managed by the named
objects implementation.
Note: this returns a snapshot of object names at one point in time.
The caller should always expect that the data returned from this
method may be stale and incorrect.
:return: a list of string names representing objects managed by
the NamedObjects implementation.
"""
raise NotImplementedError(
"Concrete implementations should override get_names")
| 8,538 | 37.463964 | 79 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/sparkjobserver/__init__.py | 0 | 0 | 0 | py | |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/test/apitests.py | import errno
import os
import unittest
from pyhocon import ConfigFactory
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext, HiveContext
from sparkjobserver.api import SparkJob, build_problems, ValidationProblem
from py4j.java_gateway import java_import
def silentremove(filename):
try:
os.remove(filename)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
class WordCountSparkJob(SparkJob):
"""
Simple example of a SparkContext job for use in tests
"""
def validate(self, context, runtime, config):
if config.get('input.strings', None):
return config.get('input.strings')
else:
return build_problems(['config input.strings not found'])
def run_job(self, context, runtime, data):
return context.parallelize(data).countByValue()
class SQLJob(SparkJob):
"""
Simple example of a Spark SQL job for use in tests.
Could be either SQLContext or HiveContext
"""
def validate(self, context, runtime, config):
problems = []
job_data = None
if not isinstance(context, SQLContext):
problems.append('Expected a SQL context')
if config.get('input.data', None):
job_data = config.get('input.data')
else:
problems.append('config input.data not found')
if len(problems) == 0:
return job_data
else:
return build_problems(problems)
def run_job(self, context, runtime, data):
rdd = context._sc.parallelize(data)
df = context.createDataFrame(data, ['name', 'age', 'salary'])
df.registerTempTable('people')
query = context.sql("""
SELECT age, AVG(salary)
from people GROUP BY age ORDER BY age""")
results = query.collect()
return [(r[0], r[1]) for r in results]
class TestSJSApi(unittest.TestCase):
def setUp(self):
conf = SparkConf().setAppName('test').setMaster('local[*]')
pwd = os.path.dirname(os.path.realpath(__file__))
metastore_dir = os.path.abspath(os.path.join(pwd, '..',
'metastore_db'))
silentremove(os.path.join(metastore_dir, "dbex.lck"))
silentremove(os.path.join(metastore_dir, "db.lck"))
self.sc = SparkContext(conf=conf)
self.jvm = self.sc._gateway.jvm
java_import(self.jvm, "org.apache.spark.sql.*")
def tearDown(self):
self.sc.stop()
def test_validation_failure(self):
job = WordCountSparkJob()
result = job.validate(self.sc, None, ConfigFactory.parse_string(""))
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertTrue(isinstance(result[0], ValidationProblem))
def test_validation_success(self):
job = WordCountSparkJob()
result = job.validate(
self.sc,
None,
ConfigFactory.parse_string('input.strings = ["a", "a", "b"]'))
self.assertEqual(result, ['a', 'a', 'b'])
def test_run_job(self):
job = WordCountSparkJob()
jobData = job.validate(
None,
self.sc,
ConfigFactory.parse_string('input.strings = ["a", "a", "b"]'))
result = job.run_job(self.sc, None, jobData)
self.assertEqual(result['a'], 2)
self.assertEqual(result['b'], 1)
def test_sql_job_validation_failure(self):
job = SQLJob()
result = job.validate(self.sc, None, ConfigFactory.parse_string(""))
self.assertTrue(isinstance(result, list))
self.assertEqual(2, len(result))
self.assertTrue(isinstance(result[0], ValidationProblem))
self.assertTrue(isinstance(result[1], ValidationProblem))
self.assertEqual('Expected a SQL context', result[0].problem)
self.assertEqual('config input.data not found', result[1].problem)
def test_run_sql_job(self):
job = SQLJob()
sqlContext = SQLContext(self.sc)
config = ConfigFactory.parse_string("""
input.data = [
['bob', 20, 1200],
['jon', 21, 1400],
['mary', 20, 1300],
['sue, 21, 1600]
]
""")
jobData = job.validate(sqlContext, None, config)
result = job.run_job(sqlContext, None, jobData)
self.assertEqual([(20, 1250), (21, 1500)], result)
# Note: The following testcase will fail if Hive/Hadoop versions
# are not compatible e.g. Hadoop 3.2.0 is not compatible with
# Hive version brought by spark 2.4.4.
def test_run_hive_job(self):
job = SQLJob()
sqlContext = HiveContext(self.sc)
config = ConfigFactory.parse_string("""
input.data = [
['bob', 20, 1200],
['jon', 21, 1400],
['mary', 20, 1300],
['sue, 21, 1600]
]
""")
jobData = job.validate(sqlContext, None, config)
result = job.run_job(sqlContext, None, jobData)
self.assertEqual([(20, 1250), (21, 1500)], result)
if __name__ == "__main__":
unittest.main()
| 5,202 | 33.230263 | 78 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/__init__.py | 0 | 0 | 0 | py | |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/hive_support_job/__init__.py | from sparkjobserver.api import SparkJob, build_problems
class HiveSupportJob(SparkJob):
def validate(self, context, runtime, config):
return None
def run_job(self, context, runtime, data):
query = 'CREATE TABLE IF NOT EXISTS check_support ' \
'(key INT, value STRING) USING hive'
context.sql(query)
| 350 | 28.25 | 61 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/sql_two_jobs/__init__.py | from sparkjobserver.api import SparkJob, build_problems
from pyspark.sql import SQLContext
class Job1(SparkJob):
def validate(self, context, runtime, config):
problems = []
job_data = None
if not isinstance(context, SQLContext):
problems.append('Expected a SQL context')
if config.get('input.data', None):
job_data = config.get('input.data')
else:
problems.append('config input.data not found')
if len(problems) == 0:
return job_data
else:
return build_problems(problems)
def run_job(self, context, runtime, data):
rdd = context._sc.parallelize(data)
# defining the temp table in terms of data written to disk,
# since basing it off a dataframe made up of parallelizing
# an in-process list within a Python job causes
# problems when a different job, constituting a different
# Python process, tries to use that dataframe.
context.createDataFrame(rdd, ['name', 'age', 'salary']).\
write.save("/tmp/people.parquet", mode='overwrite')
context.read.load('/tmp/people.parquet').\
createOrReplaceTempView('people_table')
return "done"
class Job2(SparkJob):
def validate(self, context, runtime, config):
problems = []
job_data = None
if not isinstance(context, SQLContext):
problems.append('Expected a SQL context')
if 'people_table' in context.tableNames():
job_data = ""
else:
problems.append("expect 'people_table' table to "
"have been created by earlier job")
if len(problems) == 0:
return job_data
else:
return build_problems(problems)
def run_job(self, context, runtime, data):
query = context.sql("""
SELECT age, AVG(salary)
FROM people_table GROUP BY age ORDER BY age""")
results = query.collect()
return results
| 2,036 | 34.12069 | 67 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/custom_context_job/__init__.py | from sparkjobserver.api import SparkJob, build_problems
from pyspark import SparkContext
class CustomContext(SparkContext):
def __init__(self, gateway, customContext, sparkConf):
self.jcustomContext = customContext
SparkContext.__init__(
self, gateway=gateway, jsc=customContext, conf=sparkConf)
def customMethod(self):
return self.jcustomContext.customMethod()
class CustomContextJob(SparkJob):
def validate(self, context, runtime, config):
if config.get('input.strings', None):
return config.get('input.strings')
else:
return build_problems(['config input.strings not found'])
def run_job(self, context, runtime, data):
count = context.parallelize(data).count()
return context.customMethod() + " " + str(count)
def build_context(self, gateway, jvmContext, sparkConf):
return CustomContext(gateway, jvmContext, sparkConf)
| 956 | 30.9 | 73 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/session_window/__init__.py | from sparkjobserver.api import SparkJob, build_problems
from pyspark.sql import SparkSession
class SessionWindowJob(SparkJob):
def validate(self, context, runtime, config):
problems = []
job_data = None
if not isinstance(context, SparkSession):
problems.append('Expected a SparkSession context')
if config.get('input.data', None):
job_data = config.get('input.data')
else:
problems.append('config input.data not found')
if len(problems) == 0:
return job_data
else:
return build_problems(problems)
def run_job(self, context, runtime, data):
rdd = context.sparkContext.parallelize(data)
df = context.createDataFrame(rdd, ['name', 'age', 'salary'])
df.registerTempTable('people')
# Window functions only available on
# HiveContext so differentiates from a SQLContext job
query = context.sql("""
SELECT name, age, RANK() OVER (partition by age order by name)
FROM people ORDER BY age
""")
results = query.collect()
return [(r[0], r[1], r[2]) for r in results]
| 1,176 | 34.666667 | 72 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/failing_job/__init__.py | from sparkjobserver.api import SparkJob, build_problems
class FailingRunJob(SparkJob):
def validate(self, context, runtime, config):
return "fine"
def run_job(self, context, runtime, data):
raise Exception("Deliberate failure")
class FailingValidateJob(SparkJob):
def validate(self, context, runtime, config):
raise Exception("Deliberate failure")
def run_job(self, context, runtime, data):
pass
| 452 | 21.65 | 55 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/word_count/__init__.py | from sparkjobserver.api import SparkJob, build_problems
class WordCountSparkJob(SparkJob):
def validate(self, context, runtime, config):
if config.get('input.strings', None):
return config.get('input.strings')
else:
return build_problems(['config input.strings not found'])
def run_job(self, context, runtime, data):
return context.parallelize(data).countByValue()
class FailingSparkJob(SparkJob):
"""
Simple example of a SparkContext job that fails
with an exception for use in tests
"""
def validate(self, context, runtime, config):
if config.get('input.strings', None):
return config.get('input.strings')
else:
return build_problems(['config input.strings not found'])
def run_job(self, context, runtime, data):
raise ValueError('Deliberate failure')
class WordCountSparkSessionJob(SparkJob):
def validate(self, context, runtime, config):
if config.get('input.strings', None):
return config.get('input.strings')
else:
return build_problems(['config input.strings not found'])
def run_job(self, context, runtime, data):
return context.sparkContext.parallelize(data).countByValue()
| 1,279 | 29.47619 | 69 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/hive_window/__init__.py | from sparkjobserver.api import SparkJob, build_problems
from pyspark.sql import HiveContext
class HiveWindowJob(SparkJob):
def validate(self, context, runtime, config):
problems = []
job_data = None
if not isinstance(context, HiveContext):
problems.append('Expected a HiveContext context')
if config.get('input.data', None):
job_data = config.get('input.data')
else:
problems.append('config input.data not found')
if len(problems) == 0:
return job_data
else:
return build_problems(problems)
def run_job(self, context, runtime, data):
rdd = context._sc.parallelize(data)
df = context.createDataFrame(rdd, ['name', 'age', 'salary'])
df.registerTempTable('people')
# Window functions only available on
# HiveContext so differentiates from a SQLContext job
query = context.sql("""
SELECT name, age, RANK() OVER (partition by age order by name)
FROM people ORDER BY age
""")
results = query.collect()
return [(r[0], r[1], r[2]) for r in results]
| 1,161 | 34.212121 | 72 | py |
spark-jobserver | spark-jobserver-master/job-server-python/src/python/example_jobs/sql_average/__init__.py | from sparkjobserver.api import SparkJob, build_problems
from pyspark.sql import SQLContext
class SQLAverageJob(SparkJob):
def validate(self, context, runtime, config):
problems = []
job_data = None
if not isinstance(context, SQLContext):
problems.append('Expected a SQL context')
if config.get('input.data', None):
job_data = config.get('input.data')
else:
problems.append('config input.data not found')
if len(problems) == 0:
return job_data
else:
return build_problems(problems)
def run_job(self, context, runtime, data):
rdd = context._sc.parallelize(data)
df = context.createDataFrame(rdd, ['name', 'age', 'salary'])
df.registerTempTable('people')
query = context.sql("SELECT age, AVG(salary) "
"from people GROUP BY age ORDER BY age")
results = query.collect()
return [(r[0], r[1]) for r in results]
| 1,009 | 33.827586 | 68 | py |
igmspec | igmspec-master/setup.py | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
#
# Standard imports
#
import glob, os
from distutils.extension import Extension
#
# setuptools' sdist command ignores MANIFEST.in
#
#from distutils.command.sdist import sdist as DistutilsSdist
from setuptools import setup
#
# DESI support code.
#
#from desiutil.setup import DesiTest, DesiVersion, get_version
#
# Begin setup
#
setup_keywords = dict()
#
# THESE SETTINGS NEED TO BE CHANGED FOR EVERY PRODUCT.
#
setup_keywords['name'] = 'igmspec'
setup_keywords['description'] = 'IGM Spectra Database'
setup_keywords['author'] = 'IGM Community'
setup_keywords['author_email'] = 'xavier@ucolick.org'
setup_keywords['license'] = 'BSD'
setup_keywords['url'] = 'https://github.com/pyigm/igmspec'
#
# END OF SETTINGS THAT NEED TO BE CHANGED.
#
setup_keywords['version'] = '0.2.dev0' #get_version(setup_keywords['name'])
#
# Use README.rst as long_description.
#
setup_keywords['long_description'] = ''
if os.path.exists('README.md'):
with open('README.md') as readme:
setup_keywords['long_description'] = readme.read()
#
# Set other keywords for the setup function. These are automated, & should
# be left alone unless you are an expert.
#
# Treat everything in bin/ except *.rst as a script to be installed.
#
if os.path.isdir('bin'):
setup_keywords['scripts'] = [fname for fname in glob.glob(os.path.join('bin', '*'))
if not os.path.basename(fname).endswith('.rst')]
setup_keywords['provides'] = [setup_keywords['name']]
setup_keywords['requires'] = ['Python (>2.7.0)']
# setup_keywords['install_requires'] = ['Python (>2.7.0)']
setup_keywords['zip_safe'] = False
setup_keywords['use_2to3'] = False
setup_keywords['packages'] = ['igmspec']
#setup_keywords['package_dir'] = {'':''}
#setup_keywords['cmdclass'] = {'version': DesiVersion, 'test': DesiTest, 'sdist': DistutilsSdist}
#etup_keywords['test_suite']='{name}.tests.{name}_test_suite.{name}_test_suite'.format(**setup_keywords)
setup_keywords['setup_requires']=['pytest-runner']
setup_keywords['tests_require']=['pytest']
# Autogenerate command-line scripts.
#
# setup_keywords['entry_points'] = {'console_scripts':['desiInstall = desiutil.install.main:main']}
#
# Add internal data directories.
#
data_files = []
# walk through the data directory, adding all files
data_generator = os.walk('igmspec/data')
for path, directories, files in data_generator:
for f in files:
data_path = '/'.join(path.split('/')[1:])
data_files.append(data_path + '/' + f)
setup_keywords['package_data'] = {'igmspec': data_files,
'': ['*.rst', '*.txt']}
setup_keywords['include_package_data'] = True
#
# Run setup command.
#
setup(**setup_keywords)
| 2,817 | 31.022727 | 104 | py |
igmspec | igmspec-master/timing/time_indiv_spec.py | """ Test time to load spectra one by one
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import cProfile, pstats
from astropy.coordinates import SkyCoord
from igmspec.igmspec import IgmSpec
def time_coord_to_spec(survey='HD-LLS_DR1', ntrials=1000, seed=123):
""" Time the process of grabbing spectra given an input coordinate
Parameters
----------
survey : str, optional
Survey to test on
ntrials = int, optional
Returns
-------
"""
# Init
igmsp = IgmSpec()
rstate = np.random.RandomState(seed)
# Grab survey
meta = igmsp.idb.grab_meta(survey)
coords = SkyCoord(ra=meta['RA'], dec=meta['DEC'], unit='deg')
rani = rstate.randint(0,len(meta),ntrials)
# Loop
for ii in rani:
coord = coords[ii]
# Grab
speclist, meta = igmsp.spec_from_coord(coord, isurvey=[survey])
# Command line execution
if __name__ == '__main__':
#cProfile.run('time_coord_to_spec(ntrials=100)')
cProfile.run('time_coord_to_spec(ntrials=100)', 'coord_to_spec.stats')
stats = pstats.Stats('coord_to_spec.stats')
stats.strip_dirs()
stats.sort_stats('cumulative')
stats.print_stats()
| 1,241 | 23.352941 | 82 | py |
igmspec | igmspec-master/igmspec/defs.py | """ Module for key definitions in the IGMspec database
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
from collections import OrderedDict
from astropy import units as u
def z_priority():
""" List of redshift priorities for setting the DB redshift
See also myers.zbest_myers
Returns
-------
zpri : list
"""
zpri = [
str('GGG'), # GGG redshifts
str('SDSS-HW'), # SDSS redshifts with Hewitt&Wild
str('BOSS_PCA'), # PCA analysis by Paris et al. 2015 on BOSS spectra
str('XQ-100'), # XQ-100 redshifts
str('BOSS_PIPE'), # BOSS Pipeline redshifts
str('2QZ'), #
str('2SLAQ'), #
str('AUS'),
str('AGES'),
str('COSMOS'),
str('FAN'),
str('MMT'),
str('PAPOVICH'),
str('GLIKMAN'),
str('MADDOX'),
str('LAMOST'),
str('MCGREER'),
str('VCV'),
str('ALLBOSS'),
str('Dall08'), # Dall'Aglio et al. 2008
str('UNKN'), # Unknown
]
return zpri
def get_cat_dict():
""" Definitions for the catalog
Returns
-------
"""
cdict = dict(match_toler=2*u.arcsec)
return cdict
def get_ssa_dict():
""" Return the survey dict
Returns
-------
"""
ssa_dict = dict(Title='BOSS DR12 Quasars', Publisher='JXP',
FluxUcd='phot.fluDens;em.wl',
FluxUnit='erg s**(-1) angstrom**(-1)',
SpecUcd='em.wl',
SpecUnit='Angstrom',
)
survey_dict = OrderedDict()
survey_dict['BOSS_DR12'] = 1
survey_dict['SDSS_DR7'] = 2
survey_dict['KODIAQ_DR1'] = 4 # O'Meara et al. 2016
survey_dict['HD-LLS_DR1'] = 8 # Prochaska et al. 2015
survey_dict['GGG'] = 16 # Worseck et al. 201X
survey_dict['HST_z2'] = 2**5 # O'Meara et al. 2011
survey_dict['XQ-100'] = 2**6 # Lopez et al. 2016
survey_dict['HDLA100'] = 2**7 # Neeleman et al. 2013
survey_dict['2QZ'] = 2**8 # Croom et al.
survey_dict['ESI_DLA'] = 2**9 # Rafelski et al. 2012, 2014
survey_dict['COS-Halos'] = 2**10 # Tumlinson et al. 2013
survey_dict['COS-Dwarfs'] = 2**11 # Bordoloi et al. 2014
survey_dict['HSTQSO'] = 2**12 # Ribaudo et al. 2011; Neeleman et al. 2016
survey_dict['MUSoDLA'] = 2**13 # Jorgensen et al. 2013
survey_dict['UVES_Dall'] = 2**14 # Dall'Aglio et al. 2008
survey_dict['UVpSM4'] = 2**15 # Cooksey et al. 2010, 2011
#
return survey_dict
| 2,623 | 28.483146 | 82 | py |
igmspec | igmspec-master/igmspec/__init__.py | 0 | 0 | 0 | py | |
igmspec | igmspec-master/igmspec/chk_pairs.py | """ Module to check for pairs in igmspec
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
from specdb.specdb import IgmSpec
from astropy import units as u
from astropy.coordinates import match_coordinates_sky, SkyCoord
from astropy.table import Table
def skip_gd_pair():
""" Currently up-to-date with v02
Returns
-------
stbl : Table
Table of good pairs
"""
skip = [[10.9697, 4.4073],
[15.3189, 2.0326],
[23.7452, 24.5140],
[28.4283, 20.9148],
[35.1739, 1.1985],
[40.0218, -0.6527],
[41.2998, -1.2216],
[41.9780, 0.6380],
[123.3026, 54.2806],
[126.6732, 45.7450],
[131.5022, 7.0747],
[150.3681, 50.4663],
[150.3362, 55.8989], # FOS lens
[158.2551, 47.2532],
[164.0395, 55.2669],
[170.1281, 54.7426],
[176.7206, 16.7400],
[188.5052, 6.5367],
[190.7380, 25.7174],
[193.7286, 8.7812],
[196.9841, 4.3710],
[198.7737, 47.9047],
[201.3239, 37.6164],
[211.2581, 44.8000],
[222.7320, 47.0272],
[238.3773, 22.5040],
[243.2571, 8.1350],
[253.7555, 26.0882],
[357.0800, 0.9549],
[116.9959, 43.3015],
[184.6687, 50.2621],
[166.63912, -18.35661], # FOS lens
[166.6396, -18.3567], # FOS lens
[216.9947, -1.3601],
[9.9763, -27.4229], # 2QZ pair
[341.6578, -29.4963], # 2QZ pair
]
# Table
sa = np.array(skip)
stbl = Table()
stbl['RA'] = sa[:,0]
stbl['DEC'] = sa[:,1]
# Return
return stbl
def chk_for_pairs(maindb, pair_sep=10*u.arcsec):
""" Generate new IGM_IDs for an input DB
Parameters
----------
maindb : Table
Return
------
result : bool
* True = pass
* False = fail
"""
c_main = SkyCoord(ra=maindb['RA'], dec=maindb['DEC'], unit='deg')
# Find candidate dups
idx, d2d, d3d = match_coordinates_sky(c_main, c_main, nthneighbor=2)
cand_pairs = np.where(d2d < pair_sep)[0]
# Finish
print("There are {:d} potential pairs with separation theta<{:g}".format(len(cand_pairs)//2,pair_sep))
return cand_pairs
def chk_v02(pair_sep=10*u.arcsec):
""" Check v02 for pairs
Returns
-------
"""
print("checking..")
igmsp = IgmSpec()
# Grab candidate pairs
cpairs = chk_for_pairs(igmsp.qcat.cat, pair_sep=pair_sep)
# Coords
c_main = SkyCoord(ra=igmsp.qcat.cat['RA'], dec=igmsp.qcat.cat['DEC'], unit='deg')
# Skip
stbl = skip_gd_pair()
# Loop
flg_cp = np.array([False]*len(igmsp.qcat.cat))
for qq, cpair in enumerate(cpairs):
# Skip those already done
if flg_cp[cpair]:
continue
# Find the matches
sep = c_main[cpair].separation(c_main)
pairs = sep < pair_sep
flg_cp[pairs] = True
# Skip pairs with very different zem
if np.sum(pairs) == 2:
zem = igmsp.qcat.cat['zem'][pairs]
if np.abs(zem[0]-zem[1]) > 0.1:
continue
# Both BOSS?
if (igmsp.qcat.cat['flag_survey'][pairs][0] == 1.) & (
igmsp.qcat.cat['flag_survey'][pairs][1] == 1.):
continue
# Skip table?
if np.min(np.abs(igmsp.qcat.cat['RA'][pairs][0]-stbl['RA'])) < 1e-4:
continue
# XQ-100? -- These have bad coords but have been matched
if igmsp.qcat.cat['flag_survey'][pairs][1] == 64.:
pdb.set_trace()
# Print
print('qq = {:d}'.format(qq))
print(igmsp.qcat.cat[['RA','DEC','IGM_ID','zem','flag_survey']][pairs])
print(sep.to('arcsec')[pairs])
pdb.set_trace()
# All clear?
print("All clear..")
# Command line execution
if __name__ == '__main__':
import sys
if len(sys.argv) == 1: #
flg = 0
flg += 2**0 # v02
else:
flg = sys.argv[1]
# SLLS Ions
if (flg % 2**1) >= 2**0:
#mk_lls_dr1(wrspec=False)
chk_v02() | 4,181 | 26.333333 | 106 | py |
igmspec | igmspec-master/igmspec/setup_package.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {'igmspec.tests': ['files/*']}
| 132 | 21.166667 | 63 | py |
igmspec | igmspec-master/igmspec/build_db.py | """ Module to build the hdf5 database file for IGMspec
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, warnings
import h5py
import json
import datetime
import pdb
from collections import OrderedDict
from specdb import defs
from specdb.build import utils as sdbbu
import igmspec
from igmspec.ingest import boss, hdlls, kodiaq, ggg, sdss, hst_z2, myers, twodf, xq100
from igmspec.ingest import hdla100
from igmspec.ingest import esidla
from igmspec.ingest import cos_halos
from igmspec.ingest import hst_qso
from igmspec.ingest import hst_cooksey as hst_c
from igmspec.ingest import cos_dwarfs
from igmspec.ingest import musodla
from igmspec.ingest import uves_dall
from igmspec.ingest import boss_dr14
from igmspec.ingest import esi_z6
from igmspec.ingest import kodiaq_two
from igmspec.ingest import uves_squad
from astropy.table import Table
def ver01(test=False, clobber=False, publisher='J.X. Prochaska', **kwargs):
""" Build version 1.0
Parameters
----------
test : bool, optional
Run test only
Returns
-------
"""
pdb.set_trace() # THIS VERSION IS NOW FROZEN
raise IOError("THIS VERSION IS NOW FROZEN")
version = 'v01'
# HDF5 file
outfil = igmspec.__path__[0]+'/../DB/IGMspec_DB_{:s}.hdf5'.format(version)
# Chk clobber
if os.path.isfile(outfil):
if clobber:
warnings.warn("Overwriting previous DB file {:s}".format(outfil))
else:
warnings.warn("Not overwiting previous DB file. Use clobber=True to do so")
return
# Begin
hdf = h5py.File(outfil,'w')
''' Myers QSOs '''
myers.orig_add_to_hdf(hdf)
# Main DB Table
idkey = 'IGM_ID'
maindb, tkeys = sdbbu.start_maindb(idkey)
# Group dict
group_dict = {}
# Organize for main loop
groups = get_build_groups(version)
pair_groups = ['SDSS_DR7']
meta_only = False
# Loop over the groups
for gname in groups:
# Meta
if gname == 'SDSS_DR7':
meta = groups[gname].grab_meta(hdf)
else:
meta = groups[gname].grab_meta()
# Survey flag
flag_g = sdbbu.add_to_group_dict(gname, group_dict)
# IDs
maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
first=(flag_g==1), close_pairs=(gname in pair_groups))
# Spectra
if not meta_only:
groups[gname].hdf5_adddata(hdf, gname, meta, idkey)
# Check for duplicates -- There is 1 pair in SDSS (i.e. 2 duplicates)
if not sdbbu.chk_for_duplicates(maindb, dup_lim=2):
raise ValueError("Failed duplicates")
# Check for junk
zpri = defs.z_priority()
# Finish
sdbbu.write_hdf(hdf, str('igmspec'), maindb, zpri,
group_dict, version, Publisher=str(publisher))
print("Wrote {:s} DB file".format(outfil))
print("Update DB info in specdb.defs.dbase_info !!")
def ver02(test=False, skip_copy=False, publisher='J.X. Prochaska', clobber=False,
version='v02', out_path=None):
""" Build version 2.X
Reads previous datasets from v1.X
Parameters
----------
test : bool, optional
Run test only
skip_copy : bool, optional
Skip copying the data from v01
Returns
-------
"""
import os
from specdb.specdb import IgmSpec
# Read v01
v01file = os.getenv('SPECDB')+'/IGMspec_DB_v01.hdf5'
#v01file_debug = igmspec.__path__[0]+'/tests/files/IGMspec_DB_v01_debug.hdf5'
print("Loading v01")
igmsp_v01 = IgmSpec(db_file=v01file)
v01hdf = igmsp_v01.hdf
maindb = igmsp_v01.cat.copy()
# Start new file
if out_path is None:
out_path = igmspec.__path__[0]+'/../DB/'
outfil = out_path + 'IGMspec_DB_{:s}.hdf5'.format(version)
# Clobber?
if not chk_clobber(outfil, clobber=clobber):
return
# Begin
hdf = h5py.File(outfil,'w')
# Copy over the old stuff
redo_groups = ['HD-LLS_DR1']
skip_groups = []#'BOSS_DR12', 'SDSS_DR7'] #warnings.warn("NEED TO PUT BACK SDSS AND BOSS!")
skip_copy = False
if (not test) and (not skip_copy):
old_groups = get_build_groups('v01')
for key in v01hdf.keys():
if key in ['catalog','quasars']+redo_groups+skip_groups:
continue
else:
#v01hdf.copy(key, hdf) # ONE STOP SHOPPING
grp = hdf.create_group(key)
# Copy spectra
v01hdf.copy(key+'/spec', hdf[key])
# Modify v01 meta and add
if key == 'BOSS_DR12':
meta = boss.add_coflag(v01hdf)
else:
meta = Table(v01hdf[key+'/meta'].value)
meta.rename_column('GRATING', 'DISPERSER')
hdf[key+'/meta'] = meta
for akey in v01hdf[key+'/meta'].attrs.keys():
hdf[key+'/meta'].attrs[akey] = v01hdf[key+'/meta'].attrs[akey]
# SSA info
old_groups[key].add_ssa(hdf, key)
skip_myers = False
if skip_myers:
warnings.warn("NEED TO INCLUDE MYERS!")
else:
myers.add_to_hdf(hdf)
# Setup groups
old_groups = get_build_groups('v01')
pair_groups = []
group_dict = igmsp_v01.qcat.group_dict
# Set/Check keys (and set idkey internally for other checks)
idkey = 'IGM_ID'
_, tkeys = sdbbu.start_maindb(idkey)
mkeys = list(maindb.keys())
for key in tkeys:
assert key in mkeys
# Loop over the old groups to update (as needed)
new_IDs = False
for gname in redo_groups:
print("Working to replace meta/spec for group: {:s}".format(gname))
# Meta
meta = old_groups[gname].grab_meta()
# Group flag
flag_g = group_dict[gname]
# IDs
if new_IDs:
pdb.set_trace() # NOT READY FOR THIS
#maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
# first=(flag_g==1), close_pairs=(gname in pair_groups))
else:
_, _, ids = sdbbu.set_new_ids(maindb, meta, idkey)
# Spectra
old_groups[gname].hdf5_adddata(hdf, gname, meta)
old_groups[gname].add_ssa(hdf, gname)
meta_only = False
new_groups = get_build_groups(version)
# Loop over the new groups
for gname in new_groups:
print("Working on group: {:s}".format(gname))
# Meta
meta = new_groups[gname].grab_meta()
# Survey flag
flag_g = sdbbu.add_to_group_dict(gname, group_dict, skip_for_debug=True)
# IDs
debug= False
#if gname == 'XQ-100':
# debug = True
maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
first=(flag_g==1), close_pairs=(gname in pair_groups),
debug=debug)
# Spectra
if not meta_only:
new_groups[gname].hdf5_adddata(hdf, gname, meta)
new_groups[gname].add_ssa(hdf, gname)
# Check for duplicates -- There is 1 pair in SDSS (i.e. 2 duplicates)
if not sdbbu.chk_for_duplicates(maindb, dup_lim=2):
raise ValueError("Failed duplicates")
# Check stacking
if not sdbbu.chk_vstack(hdf):
print("Meta data will not stack using specdb.utils.clean_vstack")
print("Proceed to write at your own risk..")
pdb.set_trace()
# Finish
zpri = v01hdf['catalog'].attrs['Z_PRIORITY']
sdbbuwrite_hdf(hdf, str('igmspec'), maindb, zpri,
group_dict, version, Publisher=str(publisher))
print("Wrote {:s} DB file".format(outfil))
print("Update DB info in specdb.defs.dbase_info !!")
def ver03(test=False, skip_copy=False, publisher='J.X. Prochaska', clobber=False,
version='v03.1', out_path=None, redo_dr14=False):
""" Build version 3.X
Reads several previous datasets from v1.X
Remakes the maindb using BOSS DR14 as the main driver
v3.0 will be BOSS DR14 only to speed up generation of the rest
Parameters
----------
test : bool, optional
Run test only
skip_copy : bool, optional
Skip copying the data from v01
Returns
-------
"""
import os
from specdb.specdb import IgmSpec
# Read v02
v02file = os.getenv('SPECDB')+'/IGMspec_DB_v02.1.hdf5'
igmsp_v02 = IgmSpec(db_file=v02file)
v02hdf = igmsp_v02.hdf
# Start new file
if out_path is None:
out_path = '/scratch/IGMSpec/'
outfil = out_path + 'IGMspec_DB_{:s}.hdf5'.format(version)
# Clobber?
if not chk_clobber(outfil, clobber=clobber):
return
# Other bits
pair_groups = ['SDSS_DR7']
# Begin
hdf = h5py.File(outfil,'w')
# Set/Check keys (and set idkey internally for other checks)
idkey = 'IGM_ID'
maindb, tkeys = sdbbu.start_maindb(idkey)
group_dict = {}
# BOSS DR14
new_groups = get_build_groups('v03')
gname = 'BOSS_DR14'
# Survey flag
flag_g = sdbbu.add_to_group_dict(gname, group_dict, skip_for_debug=True)
if not redo_dr14:
v030file = os.getenv('SPECDB')+'/IGMspec_DB_v03.0.hdf5'
igmsp_v030 = IgmSpec(db_file=v030file)
grp = hdf.create_group(gname)
# Copy spectra
igmsp_v030.hdf.copy(gname+'/spec', hdf[gname])
# Copy meta
igmsp_v030.hdf.copy(gname+'/meta', hdf[gname])
# Meta for maindb (a little risky as Meta needs to be aligned to the spectra but they should be)
meta = igmsp_v030['BOSS_DR14'].meta
meta.remove_column('IGM_ID')
maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
first=(flag_g==1), close_pairs=(gname in pair_groups),
debug=False)
else:
# BOSS DR14
print("Working on group: {:s}".format(gname))
# Meta
meta = new_groups[gname].grab_meta()
# IDs
maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
first=(flag_g==1), close_pairs=(gname in pair_groups),
debug=False)
# Spectra
new_groups[gname].hdf5_adddata(hdf, gname, meta)
new_groups[gname].add_ssa(hdf, gname)
# Pop me
new_groups.pop('BOSS_DR14')
# Loop on new v3 groups before copying in the others
for gname in new_groups.keys():
print("Working on group: {:s}".format(gname))
# Meta
meta = new_groups[gname].grab_meta()
# Survey flag
flag_g = sdbbu.add_to_group_dict(gname, group_dict, skip_for_debug=True)
# IDs
maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
first=(flag_g==1), close_pairs=(gname in pair_groups),
debug=False)
# Spectra
new_groups[gname].hdf5_adddata(hdf, gname, meta)
new_groups[gname].add_ssa(hdf, gname)
# Copy over all the old stuff
redo_groups = []#'HD-LLS_DR1']
skip_groups = ['BOSS_DR12']# 'SDSS_DR7'] #warnings.warn("NEED TO PUT BACK SDSS AND BOSS!")
skip_copy = False
if (not test) and (not skip_copy):
old1 = get_build_groups('v01')
old2 = get_build_groups('v02')
# Add v02 to v01 list
for key,item in old2.items():
old1[key] = item
# Loop on the combined
for key in old1.keys():
if key in ['catalog']+redo_groups+skip_groups:
continue
print("Working on: {:s}".format(key))
grp = hdf.create_group(key)
# Meta
meta = Table(v02hdf[key+'/meta'].value)
meta.remove_column('IGM_ID')
# Survey flag
flag_g = sdbbu.add_to_group_dict(key, group_dict, skip_for_debug=True)
# IDs
maindb = sdbbu.add_ids(maindb, meta, flag_g, tkeys, idkey,
first=(flag_g==1), close_pairs=(key in pair_groups),
debug=False)
# Add meta to HDF5
#meta.rename_column('GRATING', 'DISPERSER')
hdf[key+'/meta'] = meta
for akey in v02hdf[key+'/meta'].attrs.keys():
hdf[key+'/meta'].attrs[akey] = v02hdf[key+'/meta'].attrs[akey]
# SSA info
old1[key].add_ssa(hdf, key)
# Copy spectra
v02hdf.copy(key+'/spec', hdf[key])
skip_myers = False
if skip_myers:
warnings.warn("NEED TO INCLUDE MYERS!")
else:
# Copy from v02
_ = hdf.create_group('quasars')
v02hdf.copy('quasars', hdf['quasars'])
#myers.add_to_hdf(hdf)
# Setup groups
pair_groups = []
# Check for duplicates -- There is 1 pair in SDSS (i.e. 2 duplicates)
if not sdbbu.chk_for_duplicates(maindb, dup_lim=2):
raise ValueError("Failed duplicates")
# Check stacking
if not sdbbu.chk_vstack(hdf):
print("Meta data will not stack using specdb.utils.clean_vstack")
print("Proceed to write at your own risk..")
pdb.set_trace()
# Finish
zpri = v02hdf['catalog'].attrs['Z_PRIORITY']
sdbbu.write_hdf(hdf, str('igmspec'), maindb, zpri,
group_dict, version, Publisher=str(publisher))
print("Wrote {:s} DB file".format(outfil))
print("Update DB info in specdb.defs.dbase_info !!")
def chk_clobber(outfil, clobber=False):
""" Simple clobber check
outfil : str
clobber : bool, optional
"""
# Chk clobber
if os.path.isfile(outfil):
if clobber:
warnings.warn("Overwriting previous DB file {:s}".format(outfil))
return True
else:
warnings.warn("Not overwiting previous DB file. Set clobber=True to do so")
return False
else:
return True
def get_build_groups(version):
"""
Parameters
----------
version : str
Returns
-------
build_groups : dict
"""
groups = OrderedDict()
if version == 'v01':
groups['BOSS_DR12'] = boss
groups['SDSS_DR7'] = sdss
groups['KODIAQ_DR1'] = kodiaq
groups['HD-LLS_DR1'] = hdlls
groups['GGG'] = ggg
elif version[0:3] == 'v02':
groups['HST_z2'] = hst_z2 # O'Meara et al. 2011
groups['XQ-100'] = xq100 # Lopez et al. 2016
groups['HDLA100'] = hdla100 # Neeleman et al. 2013
groups['2QZ'] = twodf # Croom et al.
groups['ESI_DLA'] = esidla # Rafelski et al. 2012, 2014
groups['COS-Halos'] = cos_halos # Tumlinson et al. 2013
groups['COS-Dwarfs'] = cos_dwarfs # Bordoloi et al. 2014
groups['HSTQSO'] = hst_qso # Ribaudo et al. 2011; Neeleman et al. 2016
groups['MUSoDLA'] = musodla # Jorgensen et al. 2013
groups['UVES_Dall'] = uves_dall # Dall'Aglio et al. 2008
groups['UVpSM4'] = hst_c # Cooksey et al. 2010, 2011
elif version == 'v03':
groups['BOSS_DR14'] = boss_dr14 # Paris et al. 2018
groups['SQUAD_DR1'] = uves_squad # Murphy et al. 2018
groups['ESI_z6'] = esi_z6 # Eiler et al. 2018
groups['KODIAQ_DR2'] = kodiaq_two
else:
raise IOError("Not ready for this version")
# Return
return groups
| 15,398 | 32.045064 | 104 | py |
igmspec | igmspec-master/igmspec/scripts/__init__.py | 0 | 0 | 0 | py | |
igmspec | igmspec-master/igmspec/scripts/build_igmspec.py | #!/usr/bin/env python
"""
Run a build of the DB
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import pdb
try: # Python 3
ustr = unicode
except NameError:
ustr = str
def parser(options=None):
import argparse
# Parse
parser = argparse.ArgumentParser(
description='Build the igmspec DB')
parser.add_argument("-v", "--version", help="DB version to generate")
parser.add_argument("-t", "--test", default=False, action='store_true', help="Test?")
parser.add_argument("--boss_hdf", help="HDF file with BOSS dataset [avoids repeating spectra ingestion]")
parser.add_argument("--sdss_hdf", help="HDF file with SDSS dataset [avoids repeating spectra ingestion]")
parser.add_argument("--clobber", default=False, action='store_true', help="Clobber existing file?")
parser.add_argument("--out_path", type=str, help="Output path for file")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
""" Run
Parameters
----------
args
Returns
-------
"""
from igmspec import build_db
import h5py
# Grab arguments
pargs = parser(options=args)
# BOSS
if pargs.boss_hdf is not None:
boss_hdf = h5py.File(pargs.boss_hdf,'r')
else:
boss_hdf = None
if pargs.sdss_hdf is not None:
if boss_hdf is not None:
if pargs.boss_hdf==pargs.sdss_hdf:
sdss_hdf = boss_hdf
else:
sdss_hdf = h5py.File(pargs.sdss_hdf,'r')
else:
sdss_hdf = h5py.File(pargs.sdss_hdf,'r')
else:
sdss_hdf = None
# Run
if pargs.version is None:
print("Building v02 of the igmspec DB")
build_db.ver02(test=pargs.test, clobber=pargs.clobber)
elif pargs.version == 'v01':
print("Building v01 of the igmspec DB")
build_db.ver01(test=pargs.test,
boss_hdf=boss_hdf, sdss_hdf=sdss_hdf, clobber=pargs.clobber)
elif pargs.version == 'v02':
print("Building v02 of the igmspec DB")
build_db.ver02(test=pargs.test, clobber=pargs.clobber)
elif pargs.version == 'v02.1':
print("Building v02.1 of the igmspec DB")
build_db.ver02(test=pargs.test, version=pargs.version, clobber=pargs.clobber,
out_path=pargs.out_path)
elif pargs.version == 'v03':
print("Building v03 of the igmspec DB")
build_db.ver03(test=pargs.test, version=pargs.version, clobber=pargs.clobber,
out_path=pargs.out_path, redo_dr14=True)
elif pargs.version == 'v03.1':
print("Building v03.1 of the igmspec DB")
build_db.ver03(test=pargs.test, version=pargs.version, clobber=pargs.clobber,
out_path=pargs.out_path, redo_dr14=False)
else:
raise IOError("Bad version number")
if __name__ == '__main__':
main()
| 3,002 | 31.290323 | 109 | py |
igmspec | igmspec-master/igmspec/tests/test_ssa.py | # Module to run tests on scripts
# TEST_UNICODE_LITERALS
import pytest
import os
from specdb.specdb import IgmSpec
from specdb import ssa as spdb_ssa
#version = 'v01'
version = 'v02'
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def test_sdss_ssa_querydata():
if os.getenv('SPECDB') is None: # Would need to generate a new debug file for this to work..
assert True
return
igmsp = IgmSpec()#db_file=db_file)
#
ssa = spdb_ssa.SSAInterface(igmsp)
votable = ssa.querydata('0.027228,0.515341', SIZE=1e-3)
# Write
votable.to_xml('sdss_querydata.xml')
def test_chalos_ssa_querydata():
""" Mixes COS and HIRES
"""
if os.getenv('SPECDB') is None: # Would need to generate a new debug file for this to work..
assert True
return
igmsp = IgmSpec()#db_file=db_file)
#
ssa = spdb_ssa.SSAInterface(igmsp)
votable = ssa.querydata('344.4092,13.6793', SIZE=1e-3)
# Write
votable.to_xml('cos_querydata.xml')
| 1,083 | 24.209302 | 97 | py |
igmspec | igmspec-master/igmspec/tests/__init__.py | 0 | 0 | 0 | py | |
igmspec | igmspec-master/igmspec/tests/test_scripts.py | # Module to run tests on scripts
import matplotlib
matplotlib.use('agg') # For Travis
# TEST_UNICODE_LITERALS
import pytest
import os
#version = 'v01'
version = 'v02'
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
| 309 | 14.5 | 63 | py |
igmspec | igmspec-master/igmspec/ingest/uves_dall.py | """ Module to ingest UVES data from Dall'Aglio
Dall'Aglio et al. 2008, A&A, 491, 465
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, glob
import imp
import json
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy import units as u
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.specdb import IgmSpec
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
from specdb.zem.utils import zem_from_radec
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab UVES Dall'Aglio meta table
Returns
-------
"""
#
uvesdall_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/UVES_Dall/uves_dall_summ.dat', format='ascii')
nspec = len(uvesdall_meta)
# DATE
#datearr = [day.split('/') for day in list(uvesdall_meta['ObsDate'])]
#ndate = ['20'+str(day[2])+'-'+str(day[0])+'-'+str(day[1]) for day in datearr]
t = Time(uvesdall_meta['OBS-DATE'], out_subfmt='date') # Fixes to YYYY-MM-DD
uvesdall_meta.add_column(Column(t.iso, name='DATE-OBS'))
# RA/DEC
coord = SkyCoord(ra=uvesdall_meta['RA'], dec=uvesdall_meta['DEC'], unit=(u.hour,u.deg))
rad = [icoord.ra.value for icoord in coord]
decd = [icoord.dec.value for icoord in coord]
uvesdall_meta.rename_column('RA', 'RA_STR')
uvesdall_meta.rename_column('DEC', 'DEC_STR')
uvesdall_meta['RA_GROUP'] = rad
uvesdall_meta['DEC_GROUP'] = decd
# Add zem
igmsp = IgmSpec()
ztbl = Table(igmsp.hdf['quasars'].value)
zem, zsource = zem_from_radec(rad, decd, ztbl)
badz = np.where(zem < 0.1)[0]
for ibadz in badz:
if uvesdall_meta['NAME'][ibadz] == 'HE2243-6031':
zem[ibadz] = 3.005
zsource[ibadz] = 'FOP13' # Fumagalli+13
elif uvesdall_meta['NAME'][ibadz] == 'HE1341-1020':
zem[ibadz] = 2.137
zsource[ibadz] = 'Dall08' # Dall'Aglio+08
elif uvesdall_meta['NAME'][ibadz] == 'Q0002-422':
zem[ibadz] = 2.769
zsource[ibadz] = 'Dall08' # Dall'Aglio+08
elif uvesdall_meta['NAME'][ibadz] == 'PKS2000-330':
zem[ibadz] = 3.786
zsource[ibadz] = 'Dall08' # Dall'Aglio+08
else:
raise ValueError("Should not be here")
uvesdall_meta['zem_GROUP'] = zem
uvesdall_meta['sig_zem'] = [0.]*nspec
uvesdall_meta['flag_zem'] = zsource
#
uvesdall_meta.add_column(Column([2000.]*nspec, name='EPOCH'))
uvesdall_meta.add_column(Column(['VLT']*nspec, name='TELESCOPE'))
uvesdall_meta.add_column(Column(['UVES']*nspec, name='INSTR'))
uvesdall_meta.add_column(Column(['BOTH']*nspec, name='DISPERSER'))
uvesdall_meta.add_column(Column([45000.]*nspec, name='R'))
uvesdall_meta['STYPE'] = str('QSO')
# Sort
uvesdall_meta.sort('RA_GROUP')
# Check
assert chk_meta(uvesdall_meta, chk_cat_only=True)
return uvesdall_meta
'''
def meta_for_build(uvesdall_meta=None):
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
if uvesdall_meta is None:
uvesdall_meta = grab_meta()
nqso = len(uvesdall_meta)
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = uvesdall_meta[key]
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append UVES_Dall data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
from specdb import defs
# Add Survey
print("Adding {:s} survey to DB".format(sname))
uvesdall_grp = hdf.create_group(sname)
# Load up
Rdicts = defs.get_res_dicts()
# Checks
if sname != 'UVES_Dall':
raise IOError("Expecting UVES_Dall!!")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 150000 # Just needs to be large enough
data = init_data(max_npix, include_co=True)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist, wvmaxlist, npixlist, speclist = [], [], [], []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
# Read
specfile = os.getenv('RAW_IGMSPEC')+'/UVES_Dall/{:s}_flux.dat'.format(row['NAME'])
print("UVES_Dall: Reading {:s}".format(specfile))
spec = Table.read(specfile,format='ascii.fast_no_header',guess=False)#, data_start=1)
# Parse name
fname = specfile.split('/')[-1]
# npix
npix = len(spec['col1'])
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Continuum
# Some fiddling about
for key in ['wave','flux','sig','co']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec['col2']
data['sig'][0][:npix] = spec['col3']
data['wave'][0][:npix] = spec['col1']
data['co'][0][:npix] = spec['col4']
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2008A%26A...491..465D',
bib='dallaglio+08'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Dall''Aglio et al. (2008) compilation of VLT/UVES spectra'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 7,057 | 31.525346 | 104 | py |
igmspec | igmspec-master/igmspec/ingest/musodla.py | """ Module to ingest MUSoDLA survey
Jorgensen et al. 2013
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, json, glob, imp
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.table import Table, Column
from astropy.time import Time
from astropy import units as u
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Ingest supplied meta table
Returns
-------
meta : Table
"""
# Cut down to unique QSOs
musodla_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/MUSoDLA/datatab_v2.dat', format='ascii')
mdict = {1:'MagE', 2:'XSHOOTER', 3:'UVES', 4:'HIRES'}
mRdict = {'MagE':71., 'XSHOOTER':59., 'UVES':7., 'HIRES':7.}
gdict = {'MagE':'N/A', 'XSHOOTER':'ALL', 'UVES':'BOTH', 'HIRES':'RED'}
tdict = {'MagE':'Magellan', 'XSHOOTER':'VLT', 'UVES':'VLT', 'HIRES':'Keck-I'}
spfdict = {'MagE':'MagE', 'XSHOOTER':'XShooter', 'UVES':'UVES', 'HIRES':'HIRES'}
coords = []
zems = []
instrs = []
names = []
dinfos = []
dates = []
sfiles = []
Rs = []
gratings = []
telescopes = []
for row in musodla_meta:
# Coord
coord = ltu.radec_to_coord((row['RA(J2000)'],row['DEC(J2000)']))
# Instruments
insts = row['I'].split(',')
dinfo = row['date_info'].split(';')
for jj,inst in enumerate(insts):
coords.append(coord)
zems.append(row['z_em'])
instr = mdict[int(inst)]
instrs.append(instr)
Rs.append(3e5/mRdict[instr])
gratings.append(gdict[instr])
telescopes.append(tdict[instr])
names.append(row['QSOname'])
sfiles.append(row['QSOname']+'_{:s}.ascii'.format(spfdict[instr]))
# Date
assert dinfo[jj][0] == inst
dinfos.append(str(dinfo[jj]))
dsplit = dinfo[jj].split(',')
dates.append(dsplit[-1])
# Special case
if 'J1201+0116' in row['QSOname']:
sfiles[-1] = 'J1201+0116_HIRES_1.3kms.ascii'
sfiles.append('J1201+0116_HIRES_2.6kms.ascii')
instrs.append(instr)
Rs.append(3e5/mRdict[instr])
gratings.append(gdict[instr])
telescopes.append(tdict[instr])
names.append(row['QSOname'])
dinfos.append(str(dinfo[jj]))
dates.append(dsplit[-1])
coords.append(coord)
zems.append(row['z_em'])
# Generate
meta = Table()
meta['RA_GROUP'] = [coord.ra.deg for coord in coords]
meta['DEC_GROUP'] = [coord.dec.deg for coord in coords]
meta['NAME'] = names
meta['zem_GROUP'] = zems
meta['INSTR'] = instrs
meta['DISPERSER'] = gratings
meta['DATE-INFO'] = dinfos
meta['SPEC_FILE'] = sfiles
meta['TELESCOPE'] = telescopes
meta['R'] = Rs
t = Time(dates, out_subfmt='date') # Fixes to YYYY-MM-DD
meta.add_column(Column(t.iso, name='DATE-OBS'))
#
meta['sig_zem'] = 0.
meta['flag_zem'] = str('SDSS')
meta['STYPE'] = str('QSO')
# Check
assert chk_meta(meta, chk_cat_only=True)
# Return
return meta
def hdf5_adddata(hdf, sname, musodla_meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append MUSoDLA data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
from specdb.build.utils import chk_meta
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hdlls_grp = hdf.create_group(sname)
# Checks
if sname != 'MUSoDLA':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
max_npix = 230000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
nspec = len(musodla_meta)
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
npixlist = []
# Loop
for jj,row in enumerate(musodla_meta):
kk = jj
# Extract
f = os.getenv('RAW_IGMSPEC')+'/MUSoDLA/data/'+row['SPEC_FILE']
try:
spec = lsio.readspec(f, masking='edges')
except:
pdb.set_trace()
# Parse name
fname = f.split('/')[-1]
# npix
head = spec.header
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
if 'MagE' in f:
if fname in ['J2122-0014_MagE.ascii','J0011+1446_MagE.ascii']:
data['sig'][0][:npix] = spec.sig.value # Special cases..
else:
data['sig'][0][:npix] = 1./np.sqrt(spec.sig.value) # IVAR
else:
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[kk] = data
# Add columns
nmeta = len(musodla_meta)
musodla_meta.add_column(Column([2000.]*nmeta, name='EPOCH'))
musodla_meta.add_column(Column(npixlist, name='NPIX'))
musodla_meta.add_column(Column(wvminlist, name='WV_MIN'))
musodla_meta.add_column(Column(wvmaxlist, name='WV_MAX'))
musodla_meta.add_column(Column(np.arange(nmeta,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(musodla_meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = musodla_meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2013MNRAS.435..482J',
bib='regina+13'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: The Magellan uniform survey of damped Lya systems'.format(dset)
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
def chk_mage_flux():
""" At least one MagE file has a mix of fluxed and normalized spectra
And a 'normal' error array
"""
mage_files = glob.glob(os.getenv('RAW_IGMSPEC')+'/MUSoDLA/data/*MagE.ascii')
for mfile in mage_files:
spec = lsio.readspec(mfile)
nhigh = np.sum(spec.flux.value > 1e10)
print("File={:s} with nhigh={:d}".format(mfile, nhigh))
if nhigh > 1:
pdb.set_trace()
| 7,608 | 31.797414 | 97 | py |
igmspec | igmspec-master/igmspec/ingest/uves_squad.py | """ Module to ingest UVES SQUAD DR1 data
Murphy et al. 2018
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, json, glob, imp
import datetime
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy import units
from astropy.io import fits
from astropy.time import Time
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
from specdb.build.utils import set_resolution
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
spec_files : list
List of spec_file names
"""
# Load the summary table
path = os.path.join(os.getenv('RAW_IGMSPEC'), 'UVES_SQUAD_DR1')
squad_meta = Table.read(os.path.join(path, 'DR1_quasars_master.csv'))
# Limit to those with spectra
keep = np.array([True]*len(squad_meta))
for ii in [3,4,5]:
keep = keep & np.invert(squad_meta['Spec_status'] == str(ii))
squad_meta = squad_meta[keep]
# Cut down to unique QSOs
spec_files = []
for row in squad_meta:
# SPEC_FILE
fname = row['Name_Adopt']+'.fits'
spec_files.append(fname)
nqso = len(squad_meta)
# Coord me
coord = SkyCoord(ra=squad_meta['RA_Adopt'], dec=squad_meta['Dec_Adopt'], unit=(units.hour, units.deg))
squad_meta['RA_GROUP'] = coord.ra.value
squad_meta['DEC_GROUP'] = coord.dec.value
# Rename a few
squad_meta.rename_column('zem_Adopt', 'zem_GROUP')
squad_meta['sig_zem'] = 0.
#
zsource = [None]*nqso
gd_sdss = squad_meta['zem_SDSS'] > 0.
for ii in np.where(gd_sdss)[0]:
zsource[ii] = str('SDSS')
gd_NED = squad_meta['zem_SDSS'].mask & (squad_meta['zem_NED'] > 0.)
for ii in np.where(gd_NED)[0]:
zsource[ii] = str('NED')
gd_SIMBAD = squad_meta['zem_SDSS'].mask & squad_meta['zem_NED'].mask
for ii in np.where(gd_SIMBAD)[0]:
zsource[ii] = str('SIMBAD')
squad_meta['flag_zem'] = zsource
#
squad_meta.rename_column('WavStart', 'WV_MIN')
squad_meta.rename_column('WavEnd', 'WV_MAX')
squad_meta['INSTR'] = 'UVES'
squad_meta['TELESCOPE'] = 'VLT'
squad_meta['DISPERSER'] = 'BOTH'
squad_meta['EPOCH'] = 2000.
squad_meta['STYPE'] = str('QSO')
squad_meta['SPEC_FILE'] = spec_files
# Check
assert chk_meta(squad_meta, chk_cat_only=True)
return squad_meta
def hdf5_adddata(hdf, sname, squad_meta, debug=False, chk_meta_only=False):
""" Append UVES SQUAD data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
from specdb import defs
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hdlls_grp = hdf.create_group(sname)
# Load up
Rdicts = defs.get_res_dicts()
# Checks
if sname != 'SQUAD_DR1':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
#if mk_test_file:
# hdla100_full = hdlls_full[0:3]
max_npix = 300000 # Just needs to be large enough
data = init_data(max_npix, include_co=True)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
nspec = len(squad_meta)
spec_set.resize((nspec,))
Rlist = []
dateobslist = []
npixlist = []
gratinglist = []
# Loop
for jj,row in enumerate(squad_meta):
kk = jj
# Extract
f = os.path.join(os.getenv('RAW_IGMSPEC'),'UVES_SQUAD_DR1', 'spectra',
row['SPEC_FILE'][:-5], row['SPEC_FILE'])
spec = lsio.readspec(f)
# Parse name
fname = f.split('/')[-1]
# npix
head = spec.header
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
data['co'][0][:npix] = spec.co.value
# Meta
hdu = fits.open(f)
# Date
exp_tbl = hdu[3].data
items = []
for item in exp_tbl['UTDate']:
items.append(item.strip())
times = Time(items, format='iso')
dateobslist.append(times.min().value[0:10])
# R
comb_tbl = hdu[1].data
Rlist.append(int(np.mean(comb_tbl['NomResolPower'][comb_tbl['NomResolPower'] > 0.])))
# npix
npixlist.append(npix)
# Done
spec_set[kk] = data
# Add columns
squad_meta['GROUP_ID'] = np.arange(nspec, dtype=int)
squad_meta['R'] = Rlist
squad_meta['NPIX'] = npixlist
squad_meta['DATE-OBS'] = dateobslist
# Add HDLLS meta to hdf5
if chk_meta(squad_meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = squad_meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2019MNRAS.482.3458M',
bib='murphy+19'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: The Keck/HIRES Survey of 100 Damped Lya Systems'.format(dset)
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,181 | 29.756219 | 106 | py |
igmspec | igmspec-master/igmspec/ingest/myers.py | """ Module to ingest Myers' QSOs
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
import pdb
from astropy.table import Table
from astropy.io import fits
from astropy import units as u
from linetools import utils as ltu
from specdb.build import utils as sbu
def add_to_hdf(hdf, Z_MIN = 0.1, Z_MAX = 7.1, MATCH_TOL = 2.0*u.arcsec):
"""Generate Myers + SDSS_BOSS QSO catalog from Myers and DR12 files
This routine reads in the SDSS/BOSS specObj and PhotPosPlate files
and pulls out all QSOs. This is then matched amd merged with
the Myers QSO compilation with duplicates removed. It adds this
catalog to the 'quasars' field of the hdf5 object hdf
Requires
that that environment varialble RAW_IGMSPEC be set to the top
directory where the SDSS/BOSS files and Myers files live.
Parameters
----------
hdf : hdf5 object
database to hold QSO catalog
Z_MIN : float, optional [default Z_MIN = 0.1]
minimum QSO redshift applied to the catalog
Z_MAX : float. optimal [default Z_MAX = 7.1]
maximum QSO redshift applied to the catalo
MATCH_TOL : quantity, optional [default 2.0*u.arcsec]
matching radius between Myers and SDSS/BOSS catalogs
Returns
-------
None :
None
Examples
--------
>>> add_to_hdf(hdf)
None
Notes
-----
In the 'quasars' table added to the hdf5 object hdf, tags with the SDSS_BOSS_ prefix are
the SDSS/BOSS tags, and tags with MYERS_ are from the Myers catalog. We also add the following
tags
'SDSS_BOSS_MYERS_FLAG' = set to either SDSS_BOSS_MYERS, SDSS_BOSS_ONLY, or MYERS_ONLY
'RA', 'DEC' = our best knowledge of the coordinates (PLUG_RA, PLUG_DEC for SDSS/BOSS, otherwise Myers)
'SOURCEBIT' = Myers catalog sourcebits, with a new 2**19 sourcebit for objects in SDSS_BOSS_ONLY objects
'ZEM' = Our best knwoledge of the redshift ( either MYERS_ZEM or SDSS/BOSS redshift for SDSS_BOSS_ONLY objects
'ZEM_SOURCE' = The source of the redshift following the Myers classification with an additional SDSS_BOSS_ONLY
"""
import json
from astropy.table import Column, hstack, vstack
from astropy.coordinates import SkyCoord, search_around_sky
## SDSS/BOSS data stuff
specfile = os.getenv('RAW_IGMSPEC') + '/SDSS_BOSS/specObj-dr12_trim.fits'
spec = Table.read(specfile)
# Read in select columns from DR12 photometry. This and the file above are aligned
posfile = os.getenv('RAW_IGMSPEC') + '/SDSS_BOSS/photoPosPlate-dr12_trim.fits'
phot = Table.read(posfile)
# Trim to QSO, Specprimary, spec2d called it a QSO, redshift flag cuts, sanity check on coords
itrim = (spec['SPECPRIMARY'] == 1) & \
[('QSO' in q) for q in spec['CLASS']] & \
(spec['ZWARNING'] < 5) & \
(spec['PLUG_RA'] >= 0.0) & (spec['PLUG_RA'] <= 360.0) & \
(np.abs(spec['PLUG_DEC']) <= 90.0)
spec = spec[itrim]
phot = phot[itrim]
sdss_boss1 = hstack([spec, phot], join_type='exact')
# Add SDSS prefix to all SDSS tags
for key in sdss_boss1.keys():
sdss_boss1.rename_column(key, 'SDSS_BOSS_' + key)
# Read in the Myers file, match it to Myers sweeps photometry
# Myers master QSO catalog
ADM_file = os.getenv('RAW_IGMSPEC') + '/Myers/GTR-ADM-QSO-master-wvcv.fits.gz'
ADM_qso = Table.read(ADM_file)
head1 = fits.open(ADM_file)[1].header
DATE = head1['DATE']
# Photometry for Myers QSO catalog. This file is not aligned with the catalog file, i.e. it is a
# superset that includes the catalog file. For that reason we need to match and tack on photometry
ADM_sweep_file = os.getenv('RAW_IGMSPEC') + '/Myers/GTR-ADM-QSO-master-sweeps-Feb5-2016.fits'
ADM_sweep = Table.read(ADM_sweep_file)
c_qso = SkyCoord(ra=ADM_qso['RA'], dec=ADM_qso['DEC'],unit ='deg')
c_swp = SkyCoord(ra=ADM_sweep['RA'], dec=ADM_sweep['DEC'], unit='deg')
## Create an aligned Table for matching photometry from sweeps
nqso = len(ADM_qso)
qso_phot = Table(np.repeat(np.zeros_like(ADM_sweep[0]), nqso))
# Rename the RA and DEC
qso_phot.rename_column('RA', 'RA_sweep')
qso_phot.rename_column('DEC', 'DEC_sweep')
# Cull out the keys which already exist in the ADM_qso Table (except
# for the RA and DEC, which we renamed)
dupe_keys = list(set(ADM_qso.keys()) & set(qso_phot.keys()))
qso_phot.remove_columns(dupe_keys)
# Match the Myers catalog to the Myers sweeps
idx, d2d, d3d = c_qso.match_to_catalog_sky(c_swp)
# Currently using 1.0" for matching, as for the SDSS objects, these will mostly be the exact
# same coordinates.
itrim = (d2d <= 1.0 * u.arcsec)
qso_phot[:][itrim] = ADM_sweep[:][idx[itrim]]
ADM_qso = hstack([ADM_qso, qso_phot], join_type='exact')
# Trim to only spectroscopic objects
ispec = spectro_myers(ADM_qso)
ADM_qso = ADM_qso[ispec]
# assign best redshifts to ZEM tag
zbest_myers(ADM_qso)
# Add MYERS prefix to all MYERS tags
for key in ADM_qso.keys():
ADM_qso.rename_column(key, 'MYERS_' + key)
# Now we meatch the SDSS/BOSS and Myers catalogs to create one master QSO catalog
#
# There are three groups of objects, 1) SDSS-MYERS match, 2) SDSS only, 3) Myers only.
# Deal with each in turn.
# 1) SDSS-MYERS match. Add Myers tags to the SDSS structure
c_sdss = SkyCoord(ra=sdss_boss1['SDSS_BOSS_PLUG_RA'], dec=sdss_boss1['SDSS_BOSS_PLUG_DEC'], unit='deg')
c_myers = SkyCoord(ra=ADM_qso['MYERS_RA'], dec=ADM_qso['MYERS_DEC'], unit='deg')
isdss, imyers, d2d, _ = search_around_sky(c_sdss, c_myers, MATCH_TOL)
sdss_myers = hstack([sdss_boss1[isdss], ADM_qso[imyers]], join_type='exact')
sdss_myers['SDSS_BOSS_MYERS_FLAG'] = 'SDSS_BOSS_MYERS'
sdss_myers['RA'] = sdss_myers['SDSS_BOSS_PLUG_RA'] # SDSS/BOSS Plug coords most accurate
sdss_myers['DEC'] = sdss_myers['SDSS_BOSS_PLUG_DEC']
sdss_myers['SOURCEBIT'] = sdss_myers['MYERS_SOURCEBIT']
sdss_myers['ZEM'] = sdss_myers['MYERS_ZEM']
sdss_myers['ZEM_SOURCE'] = sdss_myers['MYERS_ZEM_SOURCE']
# 2) SDSS only
# Find the SDSS objects that have no match in the Myers catalog
inomatch = np.ones(len(c_sdss), dtype=bool)
inomatch[isdss] = False
sdss_only = sdss_boss1[inomatch]
sdss_only['SDSS_BOSS_MYERS_FLAG'] = 'SDSS_BOSS_ONLY'
sdss_only['RA'] = sdss_only['SDSS_BOSS_PLUG_RA']
sdss_only['DEC'] = sdss_only['SDSS_BOSS_PLUG_DEC']
sdss_only['SOURCEBIT'] = 2 ** 19 # New source bit for SDSS only objects
sdss_only['ZEM'] = sdss_only['SDSS_BOSS_Z']
sdss_only['ZEM_SOURCE'] = 'SDSS_BOSS_ONLY'
# 3) Myers only
# Find the Myers objects that have no match in SDSS/BOSS
inomatch = np.ones(len(c_myers), dtype=bool)
inomatch[imyers] = False
myers_only = ADM_qso[inomatch]
myers_only['SDSS_BOSS_MYERS_FLAG'] = 'MYERS_ONLY'
myers_only['RA'] = myers_only['MYERS_RA']
myers_only['DEC'] = myers_only['MYERS_DEC']
myers_only['SOURCEBIT'] = myers_only['MYERS_SOURCEBIT']
myers_only['ZEM'] = myers_only['MYERS_ZEM']
myers_only['ZEM_SOURCE'] = myers_only['MYERS_ZEM_SOURCE']
sdss_myers_out = vstack([sdss_myers, sdss_only, myers_only])
# Cut down
ztrim = (sdss_myers_out['ZEM'] >= Z_MIN) & (sdss_myers_out['ZEM'] <= Z_MAX)
coordtrim = (sdss_myers_out['RA'] >= 0.0) & (sdss_myers_out['RA'] <= 360.0) & (np.abs(
sdss_myers_out['DEC']) <= 90.0)
keep = ztrim & coordtrim
sdss_myers_out = sdss_myers_out[keep]
# Clean out unicode
sbu.clean_table_for_hdf(sdss_myers_out)
hdf['quasars'] = sdss_myers_out
hdf['quasars'].attrs['MYERS_DATE'] = DATE
# Myers dict
mdict = myers_dict()
hdf['quasars'].attrs['MYERS_DICT'] = json.dumps(ltu.jsonify(mdict))
return None
def myers_dict():
""" Generate a dict for coding Myers sources
Returns
-------
mdict : dict
"""
source = ['SDSS', # (Schneider et al. with Hewett and Wild redshifts)
'2QZ', #
'2SLAQ', #
'AUS', #
'AGES', #
'COSMOS', #
'FAN', #
'BOSS', # (Paris et al. through DR12+SEQUELS)
'MMT', #
'KDE', # (Photometric; Richards et al.)
'XDQSOZ', # (Photometric; Bovy et al.)
'PAPOVICH', #
'GLIKMAN', #
'MADDOX', #
'LAMOST', #
'VHS', # (Photometric; calculated using the Vista Hemisphere Survey IR-data)
'MCGREER', #
'VCV', #
'ALLBOSS', #
]
mdict = {}
for kk,key in enumerate(source):
mdict[key] = 2**kk
# Return
return mdict
def zbest_myers(ADM_qso):
""" Assign best redshift within the Myers catalog
Parameters
----------
ADM_qso : Table
Myers catalog without ZEM, ZEM_SOURCE columns
Returns
-------
Nothing; fills Myers catalog with ZEM, ZEM_SOURCE columns
0 SDSS (Schneider et al. with Hewett and Wild redshifts)
1 2QZ
2 2SLAQ
3 AUS
4 AGES
5 COSMOS
6 FAN
7 BOSS (Paris et al. through DR12+SEQUELS)
8 MMT
9 KDE (Photometric; Richards et al.)
10 XDQSOZ (Photometric; Bovy et al.)
11 PAPOVICH
12 GLIKMAN
13 MADDOX
14 LAMOST
15 VHS (Photometric; calculated using the Vista Hemisphere Survey IR-data)
16 MCGREER
17 VCV
18 ALLBOSS
"""
#nmyers = len(ADM_qso)
#zstr = replicate(create_struct('ZEM', 0.0, 'ZEM_SOURCE', ''), nmyers)
#myers = struct_addtags(a, zstr)
#; Bits for Myers survey SOURCEBIT in order of redshift precedenece
#; HW , BOSS , all the rest
myers_pref = [0, 7, 1, 2, 3, 4, 5, 6, 8, 11, 12, 13, 14, 16, 17, 18]
myers_binary = [2**ipref for ipref in myers_pref]
#myers_binary = [2**0, 2**7, 2**1, 2**2, 2**3, 2**4, 2**5, 2**6, 2**8, 2**11,
# 2**12, 2**13, 2**14, 2**16, 2**17, 2**18]
myers_source = ['SDSS-HW', 'BOSS_PCA', '2QZ', '2SLAQ', 'AUS', 'AGES', 'COSMOS', 'FAN', 'MMT', 'PAPOVICH',
'GLIKMAN', 'MADDOX', 'LAMOST', 'MCGREER', 'VCV', 'ALLBOSS']
myers_source = [str(msrc) for msrc in myers_source] # For hdf5
#; Above gives top priority to HW, and second priority to BOSS
# Assign the best redshift to Myers targets
zem = []
zem_source = []
for row in ADM_qso:
try:
indx = min(np.where(row['SOURCEBIT'] & myers_binary)[0])
except ValueError:
indx = 0
# Fill
zem.append(row['ZBEST'][myers_pref[indx]])
zem_source.append(myers_source[indx])
# Add to Table
ADM_qso['ZEM'] = zem
ADM_qso['ZEM_SOURCE'] = zem_source
def spectro_myers(ADM_qso):
""" Returns indices of objects in the Myers catalog which are real spectroscopic QSOs.
Parameters
----------
ADM_qso : Table
Myers catalog without ZEM, ZEM_SOURCE columns
Returns
-------
Aligned array of booleans with True indicating a spectroscopic QSO.
0 SDSS (Schneider et al. with Hewett and Wild redshifts)
1 2QZ
2 2SLAQ
3 AUS
4 AGES
5 COSMOS
6 FAN
7 BOSS (Paris et al. through DR12+SEQUELS)
8 MMT
9 KDE (Photometric; Richards et al.)
10 XDQSOZ (Photometric; Bovy et al.)
11 PAPOVICH
12 GLIKMAN
13 MADDOX
14 LAMOST
15 VHS (Photometric; calculated using the Vista Hemisphere Survey IR-data)
16 MCGREER
17 VCV
18 ALLBOSS
Notes from discussion with Myers:
For this reason we now exclude anyting with only bit 18 set
--------
I've also added, as bit 2L^18, a list of everything that
was visually inspected for BOSS. The redshifts for these objects
aren't necessarily correct, but if this bit is set for an
object and the object does not have a redshift corresponding
to bit 2L^0 or bit 2L^7 then this is not a quasar, as it was
visually inspected and not ultimately
included in a quasar catalog.
"""
ispec = ((ADM_qso['SOURCEBIT'] & 2**0) != False) | \
((ADM_qso['SOURCEBIT'] & 2**1) != False) | \
((ADM_qso['SOURCEBIT'] & 2**2) != False) | \
((ADM_qso['SOURCEBIT'] & 2**3) != False) | \
((ADM_qso['SOURCEBIT'] & 2**4) != False) | \
((ADM_qso['SOURCEBIT'] & 2**5) != False) | \
((ADM_qso['SOURCEBIT'] & 2**6) != False) | \
((ADM_qso['SOURCEBIT'] & 2**7) != False) | \
((ADM_qso['SOURCEBIT'] & 2**8) != False) | \
((ADM_qso['SOURCEBIT'] & 2**11) != False) | \
((ADM_qso['SOURCEBIT'] & 2**12) != False) | \
((ADM_qso['SOURCEBIT'] & 2**13) != False) | \
((ADM_qso['SOURCEBIT'] & 2**14) != False) | \
((ADM_qso['SOURCEBIT'] & 2**16) != False) | \
((ADM_qso['SOURCEBIT'] & 2**17) != False) & \
(ADM_qso['SOURCEBIT'] != 2**18)
return ispec
def orig_add_to_hdf(hdf):
""" Add Myers catalog to hdf file
Parameters
----------
hdf : HDF5 file
"""
print("Adding Myers catalog")
# Load
ADM_qso, date = load()
# Redshifts
zbest_myers(ADM_qso)
# Cut down
ztrim = (ADM_qso['ZEM'] >= 0.1) & (ADM_qso['ZEM'] <= 7.0)
coordtrim = (ADM_qso['RA'] >= 0.0) & (ADM_qso['RA'] <= 360.0) & (np.abs(
ADM_qso['DEC']) <= 90.0)
keep = ztrim & coordtrim
ADM_qso = ADM_qso[keep]
# Add
hdf['quasars'] = ADM_qso
hdf['quasars'].attrs['DATE'] = date
#
return
def load():
""" Load catalog
Parameters
----------
Returns
-------
cat : Table
date : str
DATE of creation
"""
ADM_file = os.getenv('RAW_IGMSPEC')+'/Myers/GTR-ADM-QSO-master-wvcv.fits.gz'
ADM_qso = Table.read(ADM_file)
# Grab header for DATE
head1 = fits.open(ADM_file)[1].header
# Return
return ADM_qso, head1['DATE']
| 13,950 | 34.589286 | 114 | py |
igmspec | igmspec-master/igmspec/ingest/boss_dr14.py | """ Module to ingest SDSS III (aka BOSS) data products
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, json
import pdb
import datetime
from pkg_resources import resource_filename
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy.io import fits
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_for_duplicates
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
def grab_meta(test=False):
""" Grab BOSS meta Table
Returns
-------
boss_meta : Table
"""
# Paris et al.
qsos_dr14 = Table.read(os.getenv('RAW_IGMSPEC')+'/BOSS_DR14/DR14Q_v4_4.fits.gz')
# Add original ID to coordinate with spectra and remove it later
qsos_dr14['ORIG_ID'] = np.arange(len(qsos_dr14))
# Cut on BOSS (do not include DR7 files)
boss_dr14 = qsos_dr14[qsos_dr14['SPECTRO'] == 'BOSS']
# Cut out bad ones
bad_meta = Table.read(resource_filename('igmspec', 'ingest/files/bad_dr14.fits'))
keep = np.array([True]*len(boss_dr14))
for row in bad_meta:
bad_qso = np.where((boss_dr14['PLATE'] == row['PLATE']) & (
boss_dr14['FIBERID'] == row['FIBERID']))[0]
keep[bad_qso] = False
boss_dr14 = boss_dr14[keep]
if test:
boss_dr14 = boss_dr14[:100]
'''
# Cut out Plate 7840
plate7840 = Table.read(resource_filename('igmspec', 'ingest/files/bossdr14_plate7840_matched.ascii'), format='ascii')
not_7840 = boss_dr14['PLATE'] != 7840
for ii in range(len(plate7840)):
isplit = plate7840['sfile'][ii].split('-')
idx = np.where((boss_dr14['PLATE'] == 7840) & (boss_dr14['FIBERID'] == int(isplit[-1][0:4])))[0]
assert len(idx) == 1
not_7840[idx[0]] = True
boss_dr14 = boss_dr14[not_7840]
# Cut out others
for plate in [7879, 3678, 7513, 4869, 7306]:
ptbl = Table.read(resource_filename('igmspec', 'ingest/files/bossdr14_plate{:d}_matched.ascii'.format(plate)),
format='ascii.csv')
not_in = boss_dr14['PLATE'] != plate
for ii in range(len(ptbl)):
idx = np.where((boss_dr14['PLATE'] == ptbl['plate'][ii]) & (boss_dr14['FIBERID'] == ptbl['fiber'][ii]))[0]
not_in[idx[0]] = True
boss_dr14 = boss_dr14[not_in]
'''
# Proceed
boss_dr14['CAT'] = str('DR14')
# Cut on redshift?
#gd = np.any([boss_dr14['Z_PIPE'] > 0., boss_dr14['Z_PCA'] > 0.],axis=0) # CUTS Z_VI
#boss_dr12 = boss_dr12[gd]
#
#
nboss = len(boss_dr14)
# DATE-OBS
t = Time(list(boss_dr14['MJD'].data), format='mjd', out_subfmt='date') # Fixes to YYYY-MM-DD
boss_dr14.add_column(Column(t.iso, name='DATE-OBS'))
# Add columns
boss_dr14.add_column(Column(['BOSS']*nboss, name='INSTR'))
boss_dr14.add_column(Column(['BOTH']*nboss, name='DISPERSER'))
#http://www.sdss.org/instruments/boss_spectrograph/
boss_dr14.add_column(Column([2100.]*nboss, name='R')) # RESOLUTION
boss_dr14.add_column(Column(['SDSS 2.5-M']*nboss, name='TELESCOPE'))
# Redshift logic
boss_dr14['zem_GROUP'] = boss_dr14['Z']
boss_dr14['sig_zem'] = boss_dr14['Z_PIPE_ERR']
boss_dr14['flag_zem'] = boss_dr14['SOURCE_Z']
'''
# Fix bad redshifts
bad_pca = boss_meta['Z_PCA'] < 0.
boss_meta['zem_GROUP'][bad_pca] = boss_meta['Z_PIPE'][bad_pca]
boss_meta['sig_zem'][bad_pca] = boss_meta['ERR_ZPIPE'][bad_pca]
boss_meta['flag_zem'][bad_pca] = str('BOSS_PIPE')
'''
# Rename RA/DEC
boss_dr14.rename_column('RA', 'RA_GROUP')
boss_dr14.rename_column('DEC', 'DEC_GROUP')
# STYPE
boss_dr14['STYPE'] = str('QSO')
# Check
assert chk_meta(boss_dr14, chk_cat_only=True)
# Return
return boss_dr14
'''
def meta_for_build():
""" Load the meta info
DR12 quasars : https://data.sdss.org/datamodel/files/BOSS_QSO/DR12Q/DR12Q.html
Returns
-------
"""
boss_meta = grab_meta()
# Cut down to unique
c_main = SkyCoord(ra=boss_meta['RA_SPEC'], dec=boss_meta['DEC_SPEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(c_main, c_main, nthneighbor=2)
dups = np.where(d2d < 2*u.arcsec)[0]
flgs = np.array([True]*len(boss_meta))
#
for ii in dups:
if boss_meta[ii]['CAT'] == 'SUPBD':
flgs[ii] = False
boss_meta = boss_meta[flgs]
if not chk_for_duplicates(boss_meta):
raise ValueError("DUPLICATES IN BOSS")
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = boss_meta[key]
meta['STYPE'] = [str('QSO')]*len(meta)
# Return
return meta
'''
def get_specfil(idx, meta, KG=False, hiz=False):
"""Grab the BOSS file name + path
KG : bool, optional
Grab MFR continuum generated by KG
"""
pnm = '{0:04d}'.format(meta['PLATE'][idx])
fnm = '{0:04d}'.format(meta['FIBERID'][idx])
mjd = str(meta['MJD'][idx])
# Generate file name (DR4 is different)
path = os.getenv('RAW_IGMSPEC')+'/BOSS_DR14/'
oid = meta['ORIG_ID'][idx]
if pnm == '6190':
path += 'dr12_quasar_PlATE_6190/'
elif oid < 50000:
path += 'dr14_quasar_0-50000/'
elif oid < 100000:
path += 'dr14_quasar_50000-100000/'
elif oid < 150000:
path += 'dr14_quasar_100000-150000/'
elif oid < 200000:
path += 'dr14_quasar_150000-200000/'
elif oid < 250000:
path += 'dr14_quasar_200000-250000/'
elif oid < 300000:
path += 'dr14_quasar_250000-300000/'
elif oid < 350000:
path += 'dr14_quasar_300000-350000/'
elif oid < 400000:
path += 'dr14_quasar_350000-400000/'
elif oid < 450000:
path += 'dr14_quasar_400000-450000/'
elif oid < 500000:
path += 'dr14_quasar_450000-500000/'
elif oid < 600000:
path += 'dr14_quasar_500000-526356/'
else:
pdb.set_trace()
raise ValueError("Uh oh")
specfil = path+'spec-{:s}-{:s}-{:s}.fits'.format(pnm, mjd, fnm)
# Finish
return specfil
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False, boss_hdf=None, **kwargs):
""" Add BOSS data to the DB
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
boss_hdf : str, optional
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
if boss_hdf is not None:
print("Using previously generated {:s} dataset...".format(sname))
boss_hdf.copy(sname, hdf)
return
boss_grp = hdf.create_group(sname)
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 4660 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
speclist = []
npixlist = []
# Loop
maxpix = 0
bad_spec = np.array([False]*len(meta))
for jj in range(len(meta)):
# Generate full file
full_file = get_specfil(jj, meta)
if full_file == 'None':
continue
# Read
try:
spec = lsio.readspec(full_file, masking='edges')
except:
print("Failed on full_file: {:s}, {:d}".format(full_file, jj))
bad_spec[jj] = True
continue
# npix
npix = spec.npix
if npix < 10:
print("Not enough pixels in file: {:s}, {:d}".format(full_file, jj))
bad_spec[jj] = True
continue
'''
# Kludge for higest redshift systems
if npix < 10:
full_file = get_specfil(jj, meta, hiz=True)
try:
spec = lsio.readspec(full_file)
except:
print("Missing: {:s}".format(full_file))
npix = spec.npix
elif npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
'''
maxpix = max(npix,maxpix)
# Parse name
fname = full_file.split('/')[-1]
# Fill
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
'''
# GZ Continuum -- packed in with spectrum, generated by my IDL script
try:
co = spec.co.value
except AttributeError:
co = np.zeros_like(spec.flux.value)
# KG Continuum
KG_file = get_specfil(row, KG=True)
if os.path.isfile(KG_file) and (npix>1): # Latter is for junk in GZ file. Needs fixing
hduKG = fits.open(KG_file)
KGtbl = hduKG[1].data
wvKG = 10.**KGtbl['LOGLAM']
try:
assert (wvKG[0]-spec.wavelength[0].value) < 1e-5
except:
pdb.set_trace()
gdpix = np.where(wvKG < (1+row['zem_GROUP'])*1200.)[0]
co[gdpix] = KGtbl['CONT'][gdpix]
data['co'][0][:npix] = co
'''
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
# Deal with null spec -- Should only be done once, saved and then ready to go
if np.any(bad_spec):
bad_meta = meta[bad_spec]
pdb.set_trace()
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
meta.add_column(Column([2000.]*len(meta), name='EPOCH'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
pdb.set_trace()
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2015ApJS..219...12A',
bib='boss_qso_dr12'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: BOSS DR14 Quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 11,371 | 31.772334 | 121 | py |
igmspec | igmspec-master/igmspec/ingest/hdlls.py | """ Module to ingest HD-LLS Survey data
Prochaska et al. 2015
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, json, glob, imp
import datetime
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.io import fits
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_meta, set_resolution, init_data
igms_path = imp.find_module('igmspec')[1]
def grab_meta_mike():
""" Grab MIKE meta Table
Returns
-------
"""
mike_file = igms_path+'/data/meta/HD-LLS_DR1_MIKE.ascii'
mike_meta = Table.read(mike_file, format='ascii', delimiter='&',
guess=False, comment='#')
# RA/DEC, DATE
ra = []
dec = []
dateobs = []
for row in mike_meta:
# Fix DEC
if '--' in row['sDEC']:
row['sDEC'] = row['sDEC'].replace('--','-')
# Get RA/DEC
coord = ltu.radec_to_coord((row['sRA'],row['sDEC']))
ra.append(coord.ra.value)
dec.append(coord.dec.value)
# DATE
dvals = row['DATE'].split(' ')
dateobs.append(str('{:s}-{:s}-{:s}'.format(dvals[2],dvals[1],dvals[0])))
mike_meta.add_column(Column(ra, name='RA_GROUP'))
mike_meta.add_column(Column(dec, name='DEC_GROUP'))
mike_meta.add_column(Column(dateobs, name='DATE-OBS'))
#
return mike_meta
def grab_meta():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
# Read
hdlls_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/HD-LLS_DR1/HD-LLS_DR1.fits')
# Rename
hdlls_meta.rename_column('RA', 'RA_GROUP')
hdlls_meta.rename_column('DEC', 'DEC_GROUP')
hdlls_meta.rename_column('Z_QSO', 'zem_GROUP')
# Kludgy Table judo
hdlls_full = hdlls_meta[0:1]
spec_files = []
# Loop me to bid the full survey catalog
for kk,row in enumerate(hdlls_meta):
for spec_file in row['SPEC_FILES']:
if spec_file == 'NULL':
continue
# Add to full table
hdlls_full.add_row(row)
spec_files.append(spec_file)
# Build
hdlls_full = hdlls_full[1:]
hdlls_full.remove_column('SPEC_FILES')
hdlls_full.add_column(Column(np.array(spec_files).astype(str),name='SPEC_FILE'))
# Cut on unique SPEC_FILEs
uni, uni_idx = np.unique(np.array(spec_files).astype(str), return_index=True)
# REMOVE ONE FILE (A DUPLICATE) BY HAND
mt = uni != 'HD-LLS_J130756.73+042215.5_MIKE.fits'
uni_idx = uni_idx[mt]
#
hdlls_full = hdlls_full[uni_idx]
#
nspec = len(hdlls_full)
hdlls_full['sig_zem'] = [0.]*nspec
hdlls_full['flag_zem'] = [str('UNKN')]*nspec
hdlls_full['STYPE'] = [str('QSO')]*nspec
assert chk_meta(hdlls_full, chk_cat_only=True)
# Return
return hdlls_full
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append HD-LLS data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
from specdb import defs
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hdlls_grp = hdf.create_group(sname)
# Load up
Rdicts = defs.get_res_dicts()
mike_meta = grab_meta_mike()
mike_coord = SkyCoord(ra=mike_meta['RA_GROUP'], dec=mike_meta['DEC_GROUP'], unit='deg')
# Checks
if sname != 'HD-LLS_DR1':
raise IOError("Not expecting this survey..")
full_coord = SkyCoord(ra=meta['RA_GROUP'], dec=meta['DEC_GROUP'], unit='deg')
# Build spectra (and parse for meta)
if mk_test_file:
meta = meta[0:3]
nspec = len(meta)
max_npix = 210000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
full_idx = np.zeros(len(meta), dtype=int)
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
dateobslist = []
npixlist = []
instrlist = []
gratinglist = []
telelist = []
# Loop
members = glob.glob(os.getenv('RAW_IGMSPEC')+'/{:s}/*fits'.format(sname))
kk = -1
for jj,member in enumerate(members):
if 'HD-LLS_DR1.fits' in member:
continue
kk += 1
# Extract
f = member
hdu = fits.open(f)
# Parse name
fname = f.split('/')[-1]
mt = np.where(meta['SPEC_FILE'] == fname)[0]
if mk_test_file and (jj>=3):
continue
if len(mt) != 1:
pdb.set_trace()
raise ValueError("HD-LLS: No match to spectral file?!")
else:
print('loading {:s}'.format(fname))
full_idx[kk] = mt[0]
# npix
head = hdu[0].header
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
# Double check
if kk == 0:
assert hdu[1].name == 'ERROR'
assert hdu[2].name == 'WAVELENGTH'
# Write
spec = lsio.readspec(f) # Handles dummy pixels in ESI
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
#data['flux'][0][:npix] = hdu[0].data
#data['sig'][0][:npix] = hdu[1].data
#data['wave'][0][:npix] = hdu[2].data
# Meta
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if 'HIRES' in fname:
instrlist.append('HIRES')
telelist.append('Keck-I')
gratinglist.append('BOTH')
try:
Rlist.append(set_resolution(head))
except ValueError:
# A few by hand (pulled from Table 1)
if 'J073149' in fname:
Rlist.append(Rdicts['HIRES']['C5'])
tval = datetime.datetime.strptime('2006-01-04', '%Y-%m-%d')
elif 'J081435' in fname:
Rlist.append(Rdicts['HIRES']['C1'])
tval = datetime.datetime.strptime('2006-12-26', '%Y-%m-%d') # 2008 too
elif 'J095309' in fname:
Rlist.append(Rdicts['HIRES']['C1'])
tval = datetime.datetime.strptime('2005-03-18', '%Y-%m-%d')
elif 'J113418' in fname:
Rlist.append(Rdicts['HIRES']['C5'])
tval = datetime.datetime.strptime('2006-01-05', '%Y-%m-%d')
elif 'J135706' in fname:
Rlist.append(Rdicts['HIRES']['C5'])
tval = datetime.datetime.strptime('2007-04-28', '%Y-%m-%d')
elif 'J155556.9' in fname:
Rlist.append(Rdicts['HIRES']['C5'])
tval = datetime.datetime.strptime('2005-04-15', '%Y-%m-%d')
elif 'J212329' in fname:
Rlist.append(Rdicts['HIRES']['E3'])
tval = datetime.datetime.strptime('2006-08-20', '%Y-%m-%d')
else:
pdb.set_trace()
else:
tval = datetime.datetime.strptime(head['DATE-OBS'], '%Y-%m-%d')
dateobslist.append(datetime.datetime.strftime(tval,'%Y-%m-%d'))
elif 'ESI' in fname:
instrlist.append('ESI')
telelist.append('Keck-II')
gratinglist.append('ECH')
try:
Rlist.append(set_resolution(head))
except ValueError:
print("Using R=6,000 for ESI")
Rlist.append(6000.)
try:
tval = datetime.datetime.strptime(head['DATE'], '%Y-%m-%d')
except KeyError:
if ('J223438.5' in fname) or ('J231543' in fname):
tval = datetime.datetime.strptime('2004-09-11', '%Y-%m-%d')
else:
pdb.set_trace()
dateobslist.append(datetime.datetime.strftime(tval,'%Y-%m-%d'))
elif 'MIKE' in fname: # APPROXIMATE
if 'MIKEr' in fname:
instrlist.append('MIKEr')
gratinglist.append('RED')
elif 'MIKEb' in fname:
instrlist.append('MIKEb')
gratinglist.append('BLUE')
else:
instrlist.append('MIKE')
gratinglist.append('BOTH')
telelist.append('Magellan')
sep = full_coord[mt[0]].separation(mike_coord)
imin = np.argmin(sep)
if sep[imin] > 1.*u.arcsec:
pdb.set_trace()
raise ValueError("Bad separation in MIKE")
# R and Date
Rlist.append(25000. / mike_meta['Slit'][imin])
tval = datetime.datetime.strptime(mike_meta['DATE-OBS'][imin], '%Y-%b-%d')
dateobslist.append(datetime.datetime.strftime(tval,'%Y-%m-%d'))
elif 'MAGE' in fname: # APPROXIMATE
instrlist.append('MagE')
if 'Clay' in head['TELESCOP']:
telelist.append('Magellan/Clay')
else:
telelist.append('Magellan/Baade')
gratinglist.append('N/A')
Rlist.append(set_resolution(head))
dateobslist.append(head['DATE-OBS'])
else: # MagE
raise ValueError("UH OH")
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[kk] = data
# Add columns
meta = meta[full_idx]
nmeta = len(meta)
meta.add_column(Column([2000.]*nmeta, name='EPOCH'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column([str(date) for date in dateobslist], name='DATE-OBS'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(np.arange(nmeta,dtype=int),name='GROUP_ID'))
meta.add_column(Column(gratinglist, name='GRATING'))
meta.add_column(Column(instrlist, name='INSTR'))
meta.add_column(Column(telelist, name='TELESCOPE'))
# v02
meta.rename_column('GRATING', 'DISPERSER')
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2015ApJS..221....2P',
bib='prochaska+15'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Keck+Magellan HD-LLS DR1'.format(dset)
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 11,684 | 34.195783 | 91 | py |
igmspec | igmspec-master/igmspec/ingest/sdss.py | """ Module to ingest SDSS II (aka SDSS) data products
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, json
import pdb
import datetime
from astropy.table import Table, Column
from astropy.time import Time
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
def get_specfil(row, dr7=False):
"""Parse the SDSS spectrum file
Requires a link to the database Class
"""
if dr7:
path = os.getenv('RAW_IGMSPEC')+'/SDSS/Schneider/'
else:
path = os.getenv('RAW_IGMSPEC')+'/SDSS/spectro_DR7/1d_26/'
# Generate file name (DR4 is different)
pnm = '{0:04d}'.format(row['PLATE'])
#fnm = '{0:03d}'.format(row['FIBERID'])
fnm = '{0:03d}'.format(row['FIBER'])
#mjd = str(row['MJD'])
mjd = str(row['SMJD'])
if dr7:
sfil = path+'spSpec-'
else:
sfil = path+pnm+'/1d/'+'spSpec-'
# Finish
specfil = sfil+mjd+'-'+pnm+'-'+fnm+'.fit.gz' # Is usually gzipped
return specfil
def grab_meta(hdf, old=False):
""" Grab SDSS meta Table
Returns
-------
meta
"""
from specdb.zem.utils import zem_from_radec
#sdss_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/SDSS/SDSS_DR7_qso.fits.gz')
sdss_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/SDSS/dr7qso.fit.gz')
nspec = len(sdss_meta)
# DATE
#t = Time(list(sdss_meta['MJD'].data), format='mjd', out_subfmt='date') # Fixes to YYYY-MM-DD
t = Time(list(sdss_meta['SMJD'].data), format='mjd', out_subfmt='date') # Fixes to YYYY-MM-DD
sdss_meta.add_column(Column(t.iso, name='DATE-OBS'))
# Add a few columns
sdss_meta.add_column(Column([2000.]*nspec, name='EPOCH'))
sdss_meta.add_column(Column([2000.]*nspec, name='R'))
sdss_meta.add_column(Column(['SDSS']*nspec, name='INSTR'))
sdss_meta.add_column(Column(['BOTH']*nspec, name='GRATING'))
sdss_meta.add_column(Column(['SDSS 2.5-M']*nspec, name='TELESCOPE'))
# Rename
if old:
# Some of these were corrected by QPQ
sdss_meta.rename_column('RAOBJ', 'RA')
sdss_meta.rename_column('DECOBJ', 'DEC')
sdss_meta.rename_column('Z_ERR', 'sig_zem')
pdb.set_trace()
else:
sdss_meta.rename_column('z', 'zem_GROUP')
sdss_meta['sig_zem'] = 0.
sdss_meta['flag_zem'] = str(' ')
# Fix zem
zem, zsource = zem_from_radec(sdss_meta['RA'], sdss_meta['DEC'], hdf['quasars'].value, toler=1.0*u.arcsec)
gdz = zem > 0.
sdss_meta['zem_GROUP'][gdz] = zem[gdz]
sdss_meta['flag_zem'] = zsource
sdss_meta['flag_zem'][~gdz] = str('SDSS-DR7')
# Sort
sdss_meta.sort('RA')
# Rename
sdss_meta.rename_column('RA', 'RA_GROUP')
sdss_meta.rename_column('DEC', 'DEC_GROUP')
# Add
sdss_meta['STYPE'] = [str('QSO')]*nspec
# Check
assert chk_meta(sdss_meta, chk_cat_only=True)
# Return
return sdss_meta
'''
def meta_for_build(old=False):
""" Load the meta info
old : bool, optional
JXP made DR7 -- Should add some aspect of the official list..
Am worried about the coordinates some..
Returns
-------
"""
sdss_meta = grab_meta()
# Cut down to unique sources
coord = SkyCoord(ra=sdss_meta['RA'], dec=sdss_meta['DEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(coord, coord, nthneighbor=2)
dups = np.where(d2d < 0.5*u.arcsec)[0]
keep = np.array([True]*len(sdss_meta))
for idup in dups:
dcoord = SkyCoord(ra=sdss_meta['RA'][idup], dec=sdss_meta['DEC'][idup], unit='deg')
sep = dcoord.separation(coord)
isep = np.where(sep < 0.5*u.arcsec)[0]
keep[isep] = False
keep[np.min(isep)] = True # Only keep 1
sdss_meta = sdss_meta[keep]
# Cut one more (pair of QSOs)
if old:
bad_dup_c = SkyCoord(ra=193.96678*u.deg, dec=37.099741*u.deg)
coord = SkyCoord(ra=sdss_meta['RA'], dec=sdss_meta['DEC'], unit='deg')
sep = bad_dup_c.separation(coord)
assert np.sum(sep < 2*u.arcsec) == 2
badi = np.argmin(bad_dup_c.separation(coord))
keep = np.array([True]*len(sdss_meta))
keep[badi] = False
sdss_meta = sdss_meta[keep]
#
nqso = len(sdss_meta)
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = sdss_meta[key]
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False, sdss_hdf=None, **kwargs):
""" Add SDSS data to the DB
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
if sdss_hdf is not None:
print("Using previously generated {:s} dataset...".format(sname))
sdss_hdf.copy(sname, hdf)
return
sdss_grp = hdf.create_group(sname)
# Load up
# Checks
if sname != 'SDSS_DR7':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 4000 # Just needs to be large enough
data = init_data(max_npix, include_co=True)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
# Read Zhu continua, wave file
cfile = os.getenv('RAW_IGMSPEC')+'/SDSS/ALLQSO_SPEC_106_continuum_nointerp.fits'
zhu_conti = Table.read(cfile)
wvfile = cfile.replace('continuum','wave')
zhu_wave = Table.read(wvfile)
#
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
full_file = get_specfil(row)
if not os.path.isfile(full_file):
full_file = get_specfil(row, dr7=True)
# Extract
#print("SDSS: Reading {:s}".format(full_file))
# Parse name
fname = full_file.split('/')[-1]
# Generate full file
spec = lsio.readspec(full_file)
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig','co']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Continuum
mtc = (zhu_conti['PLATE'] == row['PLATE']) & (zhu_conti['FIBER']==row['FIBER'])
mtw = (zhu_wave['PLATE'] == row['PLATE']) & (zhu_wave['FIBER']==row['FIBER'])
if np.sum(mtc) == 1:
imin = np.argmin(np.abs(zhu_wave['WAVE'][0][:,np.where(mtw)[1]]-spec.wavelength[0].value))
data['co'][0][:npix] = zhu_conti['CONTINUUM'][0][imin:npix+imin,np.where(mtc)[1]].flatten()
elif np.sum(mtc) > 1:
print("Multiple continua for plate={:d}, row={:d}. Taking the first".format(row['PLATE'], row['FIBER']))
imin = np.argmin(np.abs(zhu_wave['WAVE'][0][:,np.where(mtw)[1][0]]-spec.wavelength[0].value))
data['co'][0][:npix] = zhu_conti['CONTINUUM'][0][imin:npix+imin,np.where(mtc)[1][0]].flatten()
elif np.sum(mtc) == 0:
print("No SDSS continuum for plate={:d}, row={:d}".format(row['PLATE'], row['FIBER']))
#from xastropy.xutils import xdebug as xdb
#xdb.set_trace()
#xdb.xplot(data['wave'][0], data['flux'][0], data['co'][0])
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2010AJ....139.2360S',
bib='sdss_qso_dr7'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: SDSS DR7 Quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 9,499 | 33.050179 | 117 | py |
igmspec | igmspec-master/igmspec/ingest/esi_z6.py | """ Module to ingest GGG Survey data
Worseck et al. 2014
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os
import json
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
path = os.getenv('RAW_IGMSPEC')+'/ESI_z6/'
def grab_meta():
""" Grab GGG meta Table
Returns
-------
"""
# This table has units in it!
meta = Table.read(os.getenv('RAW_IGMSPEC')+'/ESI_z6/overview_data_igmspec.txt', format='ascii', delimiter='\t')
nqso = len(meta)
# Rename
meta.rename_column('RAdeg', 'RA_GROUP')
meta.rename_column('DECdeg', 'DEC_GROUP')
meta.rename_column('z', 'zem_GROUP')
meta.rename_column('instrument', 'INSTR')
meta.rename_column('telescope', 'TELESCOPE')
meta.rename_column('date', 'DATE-OBS')
#
# Add zem
meta['sig_zem'] = 0.
meta['flag_zem'] = str('ESI_z6')
meta.add_column(Column([2000.]*nqso, name='EPOCH'))
#
meta['STYPE'] = str('QSO')
meta['DISPERSER'] = str('ECH')
# Check
assert chk_meta(meta, chk_cat_only=True)
# Return
return meta
def read_spec(row):
# Filename
coord = SkyCoord(ra=row['RA_GROUP'], dec=row['DEC_GROUP'], unit='deg')
filename = 'J{:s}{:s}.txt'.format(coord.ra.to_string(unit=u.hour,sep='',pad=True)[0:4],
coord.dec.to_string(sep='',pad=True,alwayssign=True)[0:5])
# Read
spec = lsio.readspec(path+filename)
# Return
return filename, spec
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append GGG data to the h5 file
Parameters
----------
hdf : hdf5 pointer
sname : str
Survey name
meta : Table
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
_ = hdf.create_group(sname)
# Load up
if sname != 'ESI_z6':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 30000 # Just needs to be large enough
# Init
data = init_data(max_npix, include_co=True)
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
gratinglist = []
telelist = []
dateobslist = []
instrlist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file, spec = read_spec(row)
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
data['co'][0][:npix] = spec.co.value
# Meta
#head = spec.header
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
#telelist.append(head['OBSERVAT'])
#instrlist.append(head['INSTRUME'])
#tval = Time(head['DATE'], format='isot', out_subfmt='date')
#dateobslist.append(tval.iso)
npixlist.append(npix)
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
#meta.add_column(Column(telelist, name='TELESCOPE'))
#meta.add_column(Column(instrlist, name='INSTR'))
#meta.add_column(Column(dateobslist, name='DATE-OBS'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
#meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2014MNRAS.445.1745W',
bib='worseck+14')]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Giant Gemini GMOS Survey of z>4 quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 5,455 | 28.491892 | 115 | py |
igmspec | igmspec-master/igmspec/ingest/hst_qso.py | """ Module to ingest HD-LLS Survey data
Prochaska et al. 2015
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import warnings
import os, json, glob, imp
import datetime
from astropy.table import Table, Column, vstack
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.io import fits
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
#igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab HSTQSO meta Table
Returns
-------
"""
summ_file = os.getenv('RAW_IGMSPEC')+'/HSTQSO/hstqso.lst'
hstqso_meta = Table.read(summ_file, format='ascii')
spec_files = [str(ii) for ii in hstqso_meta['SPEC_FILE'].data]
nspec = len(hstqso_meta)
# RA/DEC
radec_file = os.getenv('RAW_IGMSPEC')+'/HSTQSO/all_qso_table.txt'
radec = Table.read(radec_file, format='ascii')
# DATE-OBS
date_files = glob.glob(os.getenv('RAW_IGMSPEC')+'/HSTQSO/date_obs*')
for ss,date_file in enumerate(date_files):
if ss == 0:
tab_date = Table.read(date_file, format='ascii')
else:
tab_date = vstack([tab_date, Table.read(date_file, format='ascii')])
# RA/DEC, DATE
hstqso_meta.add_column(Column(['2000-01-01']*nspec, name='DATE-OBS'))
for jj,row in enumerate(hstqso_meta):
if row['INST'] == 'COS':
spec_files[jj] = str(row['QSO_ALT_NAME']+'_hsla.fits')
continue
# DATE
spec = row['SPEC_FILE'].split('.')[0]
mt1 = np.where(tab_date['SPEC'] == spec)[0]
if len(mt1) == 0:
print("NO DATE MATCH for {:s}!".format(spec))
pdb.set_trace()
else:
mt1 = mt1[0] # TAKING THE FIRST ONE
joe_date = tab_date['DATE-OBS'][mt1].split('-')
hstqso_meta[jj]['DATE-OBS'] = '{:s}-{:02d}-{:02d}'.format(joe_date[0], int(joe_date[1]), int(joe_date[2]))
if int(joe_date[1]) > 12:
pdb.set_trace()
# RA/DEC
if row['INST'] != 'FOS':
continue
mt = np.where(radec['File_ID'] == row['QSO_ALT_NAME'])[0]
if len(mt) == 0:
mt = np.where(radec['File_ID'] == row['QSO_NAME'])[0]
if len(mt) == 0:
print("NO RA/DEC MATCH!")
pdb.set_trace()
else:
mt = mt[0]
else:
mt = mt[0]
hstqso_meta[jj]['RA'] = radec['RA'][mt]
hstqso_meta[jj]['DEC'] = radec['DEC'][mt]
# Deal with Dups (mainly bad FOS coords)
coord = SkyCoord(ra=hstqso_meta['RA'], dec=hstqso_meta['DEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(coord, coord, nthneighbor=2)
dups = np.where(d2d < 2.0*u.arcsec)[0] # Closest lens is ~2"
flag_dup = np.array([False]*len(hstqso_meta))
for idup in dups:
if flag_dup[idup]:
continue
dcoord = SkyCoord(ra=hstqso_meta['RA'][idup], dec=hstqso_meta['DEC'][idup], unit='deg')
sep = dcoord.separation(coord)
isep = np.where(sep < 2.0*u.arcsec)[0]
# Search for COS first
icos = np.where(hstqso_meta['INST'][isep] == 'COS')[0]
if len(icos) > 0:
hstqso_meta['RA'][isep] = hstqso_meta['RA'][isep[icos[0]]]
hstqso_meta['DEC'][isep] = hstqso_meta['DEC'][isep[icos[0]]]
flag_dup[isep] = True
else: # STIS
istis = np.where(hstqso_meta['INST'][isep] == 'STIS')[0]
if len(istis) > 0:
hstqso_meta['RA'][isep] = hstqso_meta['RA'][isep[istis[0]]]
hstqso_meta['DEC'][isep] = hstqso_meta['DEC'][isep[istis[0]]]
flag_dup[isep] = True
else: # FOS only -- taking first value
hstqso_meta['RA'][isep] = hstqso_meta['RA'][isep[0]]
hstqso_meta['DEC'][isep] = hstqso_meta['DEC'][isep[0]]
# REPLACE
hstqso_meta.rename_column('SPEC_FILE', 'ORIG_SPEC_FILE')
hstqso_meta['SPEC_FILE'] = spec_files
# RENAME
hstqso_meta.rename_column('GRATE', 'DISPERSER')
hstqso_meta.rename_column('QSO_ZEM', 'zem_GROUP')
hstqso_meta.rename_column('INST', 'INSTR')
hstqso_meta['STYPE'] = str('QSO')
hstqso_meta.rename_column('RA', 'RA_GROUP')
hstqso_meta.rename_column('DEC', 'DEC_GROUP')
# ADD
hstqso_meta.add_column(Column(['HST']*nspec, name='TELESCOPE'))
hstqso_meta['sig_zem'] = 0.
hstqso_meta['flag_zem'] = str('UNKWN')
# Check
assert chk_meta(hstqso_meta, chk_cat_only=True)
# Return
return hstqso_meta
'''
def meta_for_build():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
# Meta
hstqso_meta = grab_meta()
# Cut down to unique sources
coord = SkyCoord(ra=hstqso_meta['RA'], dec=hstqso_meta['DEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(coord, coord, nthneighbor=2)
dups = np.where(d2d < 1.5*u.arcsec)[0] # Closest lens is ~2"
keep = np.array([True]*len(hstqso_meta))
for idup in dups:
dcoord = SkyCoord(ra=hstqso_meta['RA'][idup], dec=hstqso_meta['DEC'][idup], unit='deg')
sep = dcoord.separation(coord)
isep = np.where(sep < 1.5*u.arcsec)[0]
keep[isep] = False
keep[np.min(isep)] = True # Only keep 1
hstqso_meta = hstqso_meta[keep]
nqso = len(hstqso_meta)
#
meta = Table()
meta['RA'] = hstqso_meta['RA']
meta['DEC'] = hstqso_meta['DEC']
meta['zem'] = hstqso_meta['zem']
meta['sig_zem'] = [0.]*nqso
meta['flag_zem'] = [str('UNKWN')]*nqso
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append HSTQSO data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hstz2_grp = hdf.create_group(sname)
# Checks
if sname != 'HSTQSO':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 80000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
npixlist = []
Rlist = []
# Loop
#path = os.getenv('RAW_IGMSPEC')+'/KODIAQ_data_20150421/'
path = os.getenv('RAW_IGMSPEC')+'/HSTQSO/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file = path+row['SPEC_FILE']+'.gz'
# Extract
print("HSTQSO: Reading {:s}".format(full_file))
hduf = fits.open(full_file)
head0 = hduf[0].header
spec = lsio.readspec(full_file, masking='edges')
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
if 'FOS-L' in fname:
Rlist.append(300.)
elif 'FOS-H' in fname:
Rlist.append(14000.)
elif 'STIS' in fname:
if row['DISPERSER'] == 'G230L':
Rlist.append(700.)
elif row['DISPERSER'] == 'G140L':
Rlist.append(1200.)
else:
raise ValueError("Bad STIS grating")
elif 'hsla' in fname: # COS
Rlist.append(18000.)
row['DATE-OBS'] = hduf[1].data['DATEOBS'][0][0]
else:
pdb.set_trace()
raise ValueError("Missing instrument!")
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column([2000.]*nspec, name='EPOCH'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2011ApJ...736...42R',
bib='ribuado11'),
dict(url='http://adsabs.harvard.edu/abs/2016ApJ...818..113N',
bib='neeleman16'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: HST UV spectra for surveying LLS and DLAs'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 10,146 | 33.869416 | 114 | py |
igmspec | igmspec-master/igmspec/ingest/hst_z2.py | """ Module to ingest HD-LLS Survey data
Prochaska et al. 2015
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import warnings
import os, json
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.io import fits
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
#igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab KODIAQ meta Table
Returns
-------
"""
hstz2_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/HST_z2/hst_z2.ascii', format='ascii')
nspec = len(hstz2_meta)
# RA/DEC, DATE
ra = []
dec = []
for row in hstz2_meta:
# Fix DEC
# Get RA/DEC
coord = ltu.radec_to_coord((row['ra'],row['dec']))
ra.append(coord.ra.value)
dec.append(coord.dec.value)
hstz2_meta.add_column(Column(ra, name='RA_GROUP'))
hstz2_meta.add_column(Column(dec, name='DEC_GROUP'))
# z
hstz2_meta.rename_column('zem', 'zem_GROUP')
hstz2_meta['sig_zem'] = [0.]*nspec
hstz2_meta['flag_zem'] = [str('SDSS_PIPE')]*nspec
hstz2_meta['STYPE'] = [str('QSO')]*nspec
#
hstz2_meta.rename_column('obsdate','DATE-OBS')
hstz2_meta.rename_column('tel','TELESCOPE')
hstz2_meta.rename_column('inst','INSTR')
hstz2_meta.rename_column('grating','DISPERSER')
hstz2_meta.rename_column('resolution','R')
# Check
assert chk_meta(hstz2_meta, chk_cat_only=True)
# Return
return hstz2_meta
'''
def meta_for_build():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
# Cut down to unique QSOs
hstz2_meta = grab_meta()
names = np.array([name[0:26] for name in hstz2_meta['qso']])
uni, uni_idx = np.unique(names, return_index=True)
hstz2_meta = hstz2_meta[uni_idx]
nqso = len(hstz2_meta)
#
meta = Table()
meta['RA'] = hstz2_meta['RA']
meta['DEC'] = hstz2_meta['DEC']
meta['zem'] = hstz2_meta['zem']
meta['sig_zem'] = [0.]*nqso
meta['flag_zem'] = [str('SDSS_PIPE')]*nqso
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append HST_z2 data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hstz2_grp = hdf.create_group(sname)
# Checks
if sname != 'HST_z2':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 300 # Just needs to be large enough
data =init_data(max_npix)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
# Loop
#path = os.getenv('RAW_IGMSPEC')+'/KODIAQ_data_20150421/'
path = os.getenv('RAW_IGMSPEC')+'/HST_z2/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
if row['INSTR'] == 'ACS':
full_file = path+row['qso']+'.fits.gz'
elif row['INSTR'] == 'WFC3':
coord = ltu.radec_to_coord((row['RA_GROUP'],row['DEC_GROUP']))
full_file = path+'/J{:s}{:s}_wfc3.fits.gz'.format(coord.ra.to_string(unit=u.hour,sep='',precision=2,pad=True),
coord.dec.to_string(sep='',pad=True,alwayssign=True,precision=1))
# Extract
print("HST_z2: Reading {:s}".format(full_file))
hduf = fits.open(full_file)
#head = hduf[0].header
spec = lsio.readspec(full_file)
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column([2000.]*nspec, name='EPOCH'))
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2011ApJS..195...16O',
bib='omeara11')
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: HST Grism survey of z~2.5 Quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,368 | 29.768116 | 122 | py |
igmspec | igmspec-master/igmspec/ingest/hdla100.py | """ Module to ingest HIRES DLA 100 Survey data
Neeleman et al. 2013
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, json, glob, imp
import datetime
from astropy.table import Table, Column
from astropy import units as u
from astropy.time import Time
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
from specdb.build.utils import set_resolution
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
spec_files : list
List of spec_file names
"""
# Load DLA
from pyigm.surveys.dlasurvey import DLASurvey
hdla100 = DLASurvey.neeleman13_tree()
# Cut down to unique QSOs
spec_files = []
names = []
ra = []
dec = []
coords = hdla100.coord
cnt = 0
for coord in coords:
# Load
names.append('J{:s}{:s}'.format(coord.ra.to_string(unit=u.hour, sep='', pad=True, precision=2),
coord.dec.to_string(sep='', pad=True, precision=1)))
# RA/DEC
ra.append(coord.ra.value)
dec.append(coord.dec.value)
# SPEC_FILE
fname = hdla100._abs_sys[cnt]._datdict['hi res file'].split('/')[-1]
spec_files.append(fname)
cnt += 1
uni, uni_idx = np.unique(names, return_index=True)
nqso = len(uni_idx)
#
meta = Table()
meta['RA_GROUP'] = np.array(ra)[uni_idx]
meta['DEC_GROUP'] = np.array(dec)[uni_idx]
meta['zem_GROUP'] = hdla100.zem[uni_idx]
meta['sig_zem'] = [0.]*nqso
meta['flag_zem'] = [str('UNKN')]*nqso
meta['STYPE'] = [str('QSO')]*nqso
meta['SPEC_FILE'] = np.array(spec_files)[uni_idx]
# Check
assert chk_meta(meta, chk_cat_only=True)
return meta
def hdf5_adddata(hdf, sname, hdla100_meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append HDLA100 data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
from specdb import defs
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hdlls_grp = hdf.create_group(sname)
# Load up
Rdicts = defs.get_res_dicts()
# Checks
if sname != 'HDLA100':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
#if mk_test_file:
# hdla100_full = hdlls_full[0:3]
max_npix = 192000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
nspec = len(hdla100_meta)
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
dateobslist = []
npixlist = []
gratinglist = []
# Loop
for jj,row in enumerate(hdla100_meta):
kk = jj
# Extract
f = os.getenv('RAW_IGMSPEC')+'/HDLA100/'+row['SPEC_FILE']
spec = lsio.readspec(f)
# Parse name
fname = f.split('/')[-1]
# npix
head = spec.header
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
try:
Rlist.append(set_resolution(head))
except ValueError:
raise ValueError("Header is required for {:s}".format(fname))
else:
if '/' in head['DATE-OBS']:
spl = head['DATE-OBS'].split('/')
t = Time(datetime.datetime(int(spl[2])+1900, int(spl[1]), int(spl[0])), format='datetime')
else:
t = Time(head['DATE-OBS'], format='isot', out_subfmt='date')
dateobslist.append(t.iso)
# Grating
try:
gratinglist.append(head['XDISPERS'])
except KeyError:
try:
yr = t.value.year
except AttributeError:
yr = int(t.value[0:4])
if yr <= 1997:
gratinglist.append('RED')
else:
pdb.set_trace()
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[kk] = data
# Add columns
nmeta = len(hdla100_meta)
hdla100_meta.add_column(Column([2000.]*nmeta, name='EPOCH'))
hdla100_meta.add_column(Column(npixlist, name='NPIX'))
hdla100_meta.add_column(Column([str(date) for date in dateobslist], name='DATE-OBS'))
hdla100_meta.add_column(Column(wvminlist, name='WV_MIN'))
hdla100_meta.add_column(Column(wvmaxlist, name='WV_MAX'))
hdla100_meta.add_column(Column(Rlist, name='R'))
hdla100_meta.add_column(Column(np.arange(nmeta,dtype=int),name='GROUP_ID'))
hdla100_meta.add_column(Column(gratinglist, name='DISPERSER'))
hdla100_meta['INSTR'] = ['HIRES']*nspec
hdla100_meta['TELESCOPE'] = ['Keck-I']*nspec
#hdla100_meta.rename_column('Z_QSO', 'zem')
# Add HDLLS meta to hdf5
if chk_meta(hdla100_meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = hdla100_meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2013ApJ...769...54N',
bib='neeleman+13'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: The Keck/HIRES Survey of 100 Damped Lya Systems'.format(dset)
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,680 | 30.663507 | 106 | py |
igmspec | igmspec-master/igmspec/ingest/utils.py | """ Module to for ingest utilities
"""
from __future__ import print_function, absolute_import, division, unicode_literals
| 124 | 19.833333 | 82 | py |
igmspec | igmspec-master/igmspec/ingest/kodiaq_two.py | """ Module to ingest KODIAQ DR2 Survey data
O'Meara et al. 2017
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, json
import datetime
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.io import fits
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_meta
from specdb.build.utils import set_resolution
from specdb.build.utils import init_data
def grab_meta():
""" Grab KODIAQ meta Table
Returns
-------
"""
kodiaq_file = os.getenv('RAW_IGMSPEC')+'/KODIAQ2/KODIAQ_DR2_summary.ascii'
kodiaq_meta = Table.read(kodiaq_file, format='ascii', comment='#')
# Verify DR1
dr2 = kodiaq_meta['kodrelease'] == 2
kodiaq_meta = kodiaq_meta[dr2]
# RA/DEC, DATE
ra = []
dec = []
dateobs = []
for row in kodiaq_meta:
# Fix DEC
# Get RA/DEC
coord = ltu.radec_to_coord((row['sRA'],row['sDEC']))
ra.append(coord.ra.value)
dec.append(coord.dec.value)
# DATE
dvals = row['pi_date'].split('_')
tymd = str('{:s}-{:s}-{:02d}'.format(dvals[-1],dvals[1][0:3],int(dvals[2])))
tval = datetime.datetime.strptime(tymd, '%Y-%b-%d')
dateobs.append(datetime.datetime.strftime(tval,'%Y-%m-%d'))
kodiaq_meta.add_column(Column(ra, name='RA_GROUP'))
kodiaq_meta.add_column(Column(dec, name='DEC_GROUP'))
kodiaq_meta.add_column(Column(dateobs, name='DATE-OBS'))
#
kodiaq_meta['INSTR'] = 'HIRES'
kodiaq_meta['TELESCOPE'] = 'Keck-I'
kodiaq_meta['STYPE'] = str('QSO')
# z
kodiaq_meta.rename_column('zem', 'zem_GROUP')
kodiaq_meta['sig_zem'] = 0.
kodiaq_meta['flag_zem'] = str('SDSS-SIMBAD')
#
assert chk_meta(kodiaq_meta, chk_cat_only=True)
return kodiaq_meta
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append KODIAQ data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
kodiaq_grp = hdf.create_group(sname)
# Load up
# Checks
if sname != 'KODIAQ_DR2':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 60000 # Just needs to be large enough
# Init
data = init_data(max_npix, include_co=False)
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
# Lists
Rlist = []
wvminlist = []
wvmaxlist = []
gratinglist = []
npixlist = []
speclist = []
# Loop
path = os.getenv('RAW_IGMSPEC')+'/KODIAQ2/Data/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file = path+row['qso']+'/'+row['pi_date']+'/'+row['spec_prefix']+'_f.fits'
# Extract
print("KODIAQ: Reading {:s}".format(full_file))
hduf = fits.open(full_file)
head = hduf[0].header
spec = lsio.readspec(full_file)
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
if 'XDISPERS' in head.keys():
if head['XDISPERS'].strip() == 'UV':
gratinglist.append('BLUE')
else:
gratinglist.append('RED')
else: # Original, earl data
gratinglist.append('RED')
npixlist.append(npix)
try:
Rlist.append(set_resolution(head))
except ValueError:
pdb.set_trace()
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column([2000.]*nspec, name='EPOCH'))
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(gratinglist, name='DISPERSER'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2017AJ....154..114O',
bib='kodiaq')
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Keck/HIRES KODIAQ DR1'.format(dset)
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,004 | 29.637755 | 87 | py |
igmspec | igmspec-master/igmspec/ingest/xq100.py | """ Module to ingest XQ-100 Survey data
Lopez et al. 2016
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, glob
import imp
import json
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy.io import fits
from astropy import units as u
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab XQ-100 meta Table
Returns
-------
"""
from specdb.specdb import IgmSpec
igmsp = IgmSpec()
#
xq100_table = Table.read(os.getenv('RAW_IGMSPEC')+'/XQ-100/XQ100_v1_2.fits.gz')
nqso = len(xq100_table)
# ESO meta
eso_tbl = Table.read(os.getenv('RAW_IGMSPEC')+'/XQ-100/metadata_eso_XQ100.csv', format='ascii.csv')
ar_files = eso_tbl['ARCFILE'].data
# Spectral files
spec_files = glob.glob(os.getenv('RAW_IGMSPEC')+'/XQ-100/ADP.*')
# Dummy column
xq100_coords = SkyCoord(ra=xq100_table['RA'], dec=xq100_table['DEC'], unit='deg')
matches = []
sv_spec_files = []
sv_orig_files = []
sv_rescale_files = []
for spec_file in spec_files:
if 'ADP.2016-07-15T08:22:40.682.fits' in spec_file:
print("XQ-100: Skipping summary file")
continue
# ESO file
ssfile = spec_file[spec_file.rfind('/')+1:-5]
eso_mt = np.where(ar_files == ssfile)[0]
try:
ofile = eso_tbl['ORIGFILE'][eso_mt][0]
except IndexError:
print("XQ-100: File {:s} not really in XQ100!".format(spec_file))
continue
if ('_1' in ofile) or ('_2' in ofile) or ('_3' in ofile) or ('_4' in ofile):
print("XQ-100: Skipping additional file: {:s}".format(ofile))
continue
# Match
hdu = fits.open(spec_file)
head0 = hdu[0].header
if head0['DISPELEM'] == 'UVB,VIS,NIR':
print("XQ-100: Skipping merged spectrum file")
if 'rescale' not in ofile:
print('no rescale')
pdb.set_trace()
continue
try:
coord = SkyCoord(ra=head0['RA'], dec=head0['DEC'], unit='deg')
except KeyError:
pdb.set_trace()
sep = coord.separation(xq100_coords)
imt = np.argmin(sep)
if sep[imt] > 0.1*u.arcsec:
pdb.set_trace()
raise ValueError("Bad offset")
# Save
matches.append(imt)
sv_spec_files.append(spec_file)
sv_orig_files.append(ofile)
# Finish up
xq100_meta = xq100_table[np.array(matches)]
nspec = len(xq100_meta)
# Add spec_files
xq100_meta['SPEC_FILE'] = sv_spec_files
xq100_meta['ORIG_FILE'] = sv_orig_files
# Add zem
xq100_meta['zem_GROUP'] = xq100_meta['Z_QSO']
xq100_meta['sig_zem'] = xq100_meta['ERR_ZQSO']
xq100_meta['flag_zem'] = [str('XQ-100')]*nspec
# Rename
xq100_meta.rename_column('RA','RA_GROUP')
xq100_meta.rename_column('DEC','DEC_GROUP')
# Match to Myers
myers = Table(igmsp.hdf['quasars'].value)
myers_coord = SkyCoord(ra=myers['RA'], dec=myers['DEC'], unit='deg')
xq100_coord = SkyCoord(ra=xq100_meta['RA_GROUP'], dec=xq100_meta['DEC_GROUP'], unit='deg')
idx, d2d, _ = match_coordinates_sky(xq100_coord, myers_coord, nthneighbor=1)
xq100_meta['RA_GROUP'] = myers_coord.ra.value[idx]
xq100_meta['DEC_GROUP'] = myers_coord.dec.value[idx]
# One bad one (Taking RA/DEC from Simbad)
bad_c = d2d.to('arcsec') > 20*u.arcsec
xq100_meta['RA_GROUP'][bad_c] = 215.2823
xq100_meta['DEC_GROUP'][bad_c] = -6.73232
# DATE-OBS
meanmjd = []
for row in xq100_meta:
gdm = row['MJD_OBS'] > 0.
meanmjd.append(np.mean(row['MJD_OBS'][gdm]))
t = Time(meanmjd, format='mjd', out_subfmt='date') # Fixes to YYYY-MM-DD
xq100_meta.add_column(Column(t.iso, name='DATE-OBS'))
#
xq100_meta.add_column(Column([2000.]*nspec, name='EPOCH'))
xq100_meta['STYPE'] = str('QSO')
# Sort
xq100_meta.sort('RA_GROUP')
# Check
assert chk_meta(xq100_meta, chk_cat_only=True)
#
return xq100_meta
'''
def meta_for_build(xq100_meta=None):
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
if xq100_meta is None:
xq100_meta = grab_meta()
# Cut down to unique QSOs
names = np.array([name[0:20] for name in xq100_meta['OBJ_NAME']])
uni, uni_idx = np.unique(names, return_index=True)
xq100_meta = xq100_meta[uni_idx]
nqso = len(xq100_meta)
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = xq100_meta[key]
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append XQ-100 data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
xq100_grp = hdf.create_group(sname)
if len(meta) != 300:
pdb.set_trace()
# Checks
if sname != 'XQ-100':
raise IOError("Expecting XQ-100!!")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 20000 # Just needs to be large enough
data = init_data(max_npix, include_co=True)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
gratinglist = []
telelist = []
instrlist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
#
print("XQ-100: Reading {:s}".format(row['SPEC_FILE']))
spec = lsio.readspec(row['SPEC_FILE'])
# Parse name
fname = row['SPEC_FILE'].split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Continuum
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.to('AA').value
data['co'][0][:npix] = spec.co.value
# Meta
head = spec.header
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
telelist.append(head['TELESCOP'])
instrlist.append(head['INSTRUME'])
gratinglist.append(head['DISPELEM'])
npixlist.append(npix)
if gratinglist[-1] == 'NIR': # From Lopez+16
Rlist.append(4350.)
elif gratinglist[-1] == 'VIS':
Rlist.append(7450.)
elif gratinglist[-1] == 'UVB':
Rlist.append(5300.)
else:
pdb.set_trace()
raise ValueError("UH OH")
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(gratinglist, name='DISPERSER'))
meta.add_column(Column(telelist, name='TELESCOPE'))
meta.add_column(Column(instrlist, name='INSTR'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2016arXiv160708776L',
bib='lopez+16')]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: The XQ-100 Survey of 100 z>3 quasars with VLT/XShooter'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 9,012 | 30.848057 | 103 | py |
igmspec | igmspec-master/igmspec/ingest/cos_dwarfs.py | """ Module to ingest COS-Dwarfs
Bordoloi et al. 201X
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import warnings
import os, json, glob, imp
from astropy.table import Table, Column, vstack
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.time import Time
from linetools.spectra import io as lsio
from linetools import utils as ltu
from pyigm.cgm.cos_halos import COSDwarfs
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
#igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab COS-Dwarfs meta table
Returns
-------
"""
from time import strptime
cosdwarfs = COSDwarfs()
cosdwarfs.load_sys(tfile=cosdwarfs.cdir+'/cos-dwarfs_systems.v1.1.tar.gz', chk_lowz=False)
visit_file = os.getenv('RAW_IGMSPEC')+'/COS-Dwarfs/HST_Observing_Dates.dat'
cd_visits = Table.read(visit_file,format='ascii')
# Coord
lst = [getattr(cgm_abs.igm_sys, 'coord') for cgm_abs in cosdwarfs.cgm_abs]
ra = [coord.ra.value for coord in lst]
dec = [coord.dec.value for coord in lst]
# Short names
shrt_names = [name.split('_')[0] for name in cosdwarfs.name]
cdwarfs_meta = Table()
cdwarfs_meta['RA'] = ra
cdwarfs_meta['DEC'] = dec
# RA/DEC, DATE
datet = []
for kk,row in enumerate(cdwarfs_meta):
#
name = shrt_names[kk]
mtv = np.where(cd_visits['QSO'] == name)[0]
if len(mtv) != 1:
pdb.set_trace()
else:
chv = cd_visits['Start_UT'][mtv].data[0]
icmma = chv.find(',')
datet.append('{:s}-{:02d}-{:02d}'.format(
chv[icmma+1:icmma+5], strptime(chv[:3],'%b').tm_mon,
int(chv[3:icmma])))
cdwarfs_meta.add_column(Column(datet, name='DATE-OBS'))
# Others
cdwarfs_meta.add_column(Column(['G130M/G160M']*len(cdwarfs_meta), name='DISPERSER'))
cdwarfs_meta.add_column(Column([20000.]*len(cdwarfs_meta), name='R'))
cdwarfs_meta.add_column(Column([2000.]*len(cdwarfs_meta), name='EPOCH'))
cdwarfs_meta['INSTR'] = 'COS' # Deals with padding
cdwarfs_meta['TELESCOPE'] = 'HST'
cdwarfs_meta['zem_GROUP'] = cosdwarfs.zem
cdwarfs_meta['sig_zem'] = 0. # Need to add
cdwarfs_meta['flag_zem'] = 'SDSS'
cdwarfs_meta['STYPE'] = str('QSO')
# Rename
cdwarfs_meta.rename_column('RA', 'RA_GROUP')
cdwarfs_meta.rename_column('DEC', 'DEC_GROUP')
# Check
assert chk_meta(cdwarfs_meta, chk_cat_only=True)
# Done
return cdwarfs_meta
'''
def meta_for_build():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
cdwarfs_meta = grab_meta()
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = cdwarfs_meta[key]
meta['STYPE'] = str('QSO')
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append COS-Dwarfs data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
cdwarfs_grp = hdf.create_group(sname)
# Checks
if sname != 'COS-Dwarfs':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 20000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
# Loop
path = os.getenv('RAW_IGMSPEC')+'/COS-Dwarfs/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
coord = ltu.radec_to_coord((row['RA_GROUP'],row['DEC_GROUP']))
full_file = path+'/J{:s}{:s}_nbin3_coadd.fits.gz'.format(coord.ra.to_string(unit=u.hour,sep='',pad=True)[0:4],
coord.dec.to_string(sep='',pad=True,alwayssign=True)[0:5])
if 'J1051-0051' in full_file:
full_file = path+'/PG1049-005_nbin3_coadd.fits.gz'
if 'J1204+2754' in full_file:
full_file = path+'/PG1202+281_nbin3_coadd.fits.gz'
# Parse name
fname = full_file.split('/')[-1]
# Extract
print("COS-Dwarfs: Reading {:s}".format(full_file))
spec = lsio.readspec(full_file)
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int), name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2014ApJ...796..136B',
bib='bordoloi+14'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: HST/COS Spectra from the COS-Dwarfs Survey'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,939 | 30.402715 | 118 | py |
igmspec | igmspec-master/igmspec/ingest/kodiaq.py | """ Module to ingest KODIAQ Survey data
O'Meara et al. 2016
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, json
import imp
import datetime
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.io import fits
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_meta
from specdb.build.utils import set_resolution
from specdb.build.utils import init_data
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab KODIAQ meta Table
Returns
-------
"""
kodiaq_file = igms_path+'/data/meta/KODIAQ_DR1_summary.ascii'
kodiaq_meta = Table.read(kodiaq_file, format='ascii', comment='#')
nspec = len(kodiaq_meta)
# Verify DR1
for row in kodiaq_meta:
assert row['kodrelease'] == 1
# RA/DEC, DATE
ra = []
dec = []
dateobs = []
for row in kodiaq_meta:
# Fix DEC
# Get RA/DEC
coord = ltu.radec_to_coord((row['sRA'],row['sDEC']))
ra.append(coord.ra.value)
dec.append(coord.dec.value)
# DATE
dvals = row['pi_date'].split('_')
tymd = str('{:s}-{:s}-{:02d}'.format(dvals[-1],dvals[1][0:3],int(dvals[2])))
tval = datetime.datetime.strptime(tymd, '%Y-%b-%d')
dateobs.append(datetime.datetime.strftime(tval,'%Y-%m-%d'))
kodiaq_meta.add_column(Column(ra, name='RA_GROUP'))
kodiaq_meta.add_column(Column(dec, name='DEC_GROUP'))
kodiaq_meta.add_column(Column(dateobs, name='DATE-OBS'))
#
kodiaq_meta.add_column(Column(['HIRES']*nspec, name='INSTR'))
kodiaq_meta.add_column(Column(['Keck-I']*nspec, name='TELESCOPE'))
kodiaq_meta['STYPE'] = [str('QSO')]*nspec
# z
kodiaq_meta.rename_column('zem', 'zem_GROUP')
kodiaq_meta['sig_zem'] = [0.]*nspec
kodiaq_meta['flag_zem'] = [str('SIMBAD')]*nspec
#
assert chk_meta(kodiaq_meta, chk_cat_only=True)
return kodiaq_meta
'''
def meta_for_build():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
kodiaq_meta = grab_meta()
# Cut down to unique QSOs
names = np.array([name[0:26] for name in kodiaq_meta['qso']])
uni, uni_idx = np.unique(names, return_index=True)
kodiaq_meta = kodiaq_meta[uni_idx]
nqso = len(kodiaq_meta)
#
meta = Table()
meta['RA'] = kodiaq_meta['RA']
meta['DEC'] = kodiaq_meta['DEC']
meta['zem'] = kodiaq_meta['zem']
meta['sig_zem'] = [0.]*nqso
meta['flag_zem'] = [str('SIMBAD')]*nqso
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append KODIAQ data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
kodiaq_grp = hdf.create_group(sname)
# Load up
# Checks
if sname != 'KODIAQ_DR1':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 200000 # Just needs to be large enough
# Init
data = init_data(max_npix, include_co=False)
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
# Lists
Rlist = []
wvminlist = []
wvmaxlist = []
gratinglist = []
npixlist = []
speclist = []
# Loop
#path = os.getenv('RAW_IGMSPEC')+'/KODIAQ_data_20150421/'
path = os.getenv('RAW_IGMSPEC')+'/KODIAQ_data_20160618/' # BZERO FIXED
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file = path+row['qso']+'/'+row['pi_date']+'/'+row['spec_prefix']+'_f.fits'
# Extract
print("KODIAQ: Reading {:s}".format(full_file))
hduf = fits.open(full_file)
head = hduf[0].header
spec = lsio.readspec(full_file)
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
if head['XDISPERS'].strip() == 'UV':
gratinglist.append('BLUE')
else:
gratinglist.append('RED')
npixlist.append(npix)
try:
Rlist.append(set_resolution(head))
except ValueError:
pdb.set_trace()
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column([2000.]*nspec, name='EPOCH'))
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(gratinglist, name='GRATING'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2015AJ....150..111O',
bib='kodiaq2')
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Keck/HIRES KODIAQ DR1'.format(dset)
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,761 | 29.597285 | 87 | py |
igmspec | igmspec-master/igmspec/ingest/twodf.py | """ Module to ingest 2dF/6dF quasars
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, json
import pdb
from astropy.table import Table, Column
from astropy.io import fits
from astropy.time import Time
from linetools.spectra import io as lsio
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
def get_specfil(row):
"""Parse the 2QZ spectrum file
Requires a link to the database Class
"""
path = os.getenv('RAW_IGMSPEC')+'/2QZ/2df/fits/'
# RA/DEC folder
path += 'ra{:02d}_{:02d}/'.format(row['RAh00'], row['RAh00']+1)
# File name
sfil = path+row['Name']
if row['ispec'] == 1:
sfil += 'a'
elif row['ispec'] == 2:
sfil += 'b'
else:
raise ValueError("Bad ispec value")
# Finish
specfil = sfil+'.fits.gz'
return specfil
def grab_meta():
""" Grab GGG meta Table
Catalog -- http://www.2dfquasar.org/Spec_Cat/catalogue.html
Returns
-------
meta
"""
catfil = os.getenv('RAW_IGMSPEC')+'/2QZ/2QZ393524355693.out'
tdf_meta = Table.read(catfil, format='ascii')
# Rename columns
clms = ['Name', 'RAh00', 'RAm00', 'RAs00', 'DECd00', 'DECm00', 'DECs00',
'ID','cat_name', 'Sector', 'RAh50', 'RAm50', 'RAs50', 'DECd50', 'DECm50', 'DECs50',
'UKST', 'XAPM','YAPM','RA50','DEC50','bj','u-b','b-r','Nobs',
'z1','q1','ID1','date1','fld1','fiber1','SN1',
'z2','q2','ID2','date2','fld2','fiber2','SN2',
'zprev','rflux','Xray','EBV','comm1','comm2']
for ii in range(1,46):
tdf_meta.rename_column('col{:d}'.format(ii), clms[ii-1])
# Cut down to QSOs and take only 1 spectrum
ispec = []
zspec = []
datespec = []
for row in tdf_meta:
if 'QSO' in row['ID1']:
ispec.append(1)
zspec.append(row['z1'])
sdate = str(row['date1'])
datespec.append('{:s}-{:s}-{:s}'.format(sdate[0:4],sdate[4:6], sdate[6:8]))
elif 'QSO' in row['ID2']:
ispec.append(2)
zspec.append(row['z2'])
sdate = str(row['date2'])
datespec.append('{:s}-{:s}-{:s}'.format(sdate[0:4],sdate[4:6], sdate[6:8]))
else:
ispec.append(0)
zspec.append(-1.)
datespec.append('')
tdf_meta['ispec'] = ispec
tdf_meta['zem_GROUP'] = zspec
tdf_meta['DATE'] = datespec
cut = tdf_meta['ispec'] > 0
tdf_meta = tdf_meta[cut]
nspec = len(tdf_meta)
# DATE
t = Time(list(tdf_meta['DATE'].data), format='iso', out_subfmt='date') # Fixes to YYYY-MM-DD
tdf_meta.add_column(Column(t.iso, name='DATE-OBS'))
# Add a few columns
tdf_meta.add_column(Column([2000.]*nspec, name='EPOCH'))
# Resolution
# 2df 8.6A FWHM
# R=580 at 5000A
# 6df??
tdf_meta.add_column(Column([580.]*nspec, name='R'))
#
tdf_meta.add_column(Column([str('2dF')]*nspec, name='INSTR'))
tdf_meta.add_column(Column([str('300B')]*nspec, name='DISPERSER'))
tdf_meta.add_column(Column([str('UKST')]*nspec, name='TELESCOPE'))
# Rename
rad = (tdf_meta['RAh00']*3600 + tdf_meta['RAm00']*60 + tdf_meta['RAs00'])*360./86400.
decd = np.abs(tdf_meta['DECd00']) + tdf_meta['DECm00']/60 + tdf_meta['DECs00']/3600.
# Yup the following is necessary
neg = [False]*len(rad)
for jj,row in enumerate(tdf_meta):
if '-' in row['Name']:
neg[jj] = True
#if '-00' in row['Name']:
# print('jj={:d}'.format(jj))
neg = np.array(neg)
decd[neg] = -1.*decd[neg]
tdf_meta['RA_GROUP'] = rad
tdf_meta['DEC_GROUP'] = decd
tdf_meta['sig_zem'] = [0.]*nspec
tdf_meta['flag_zem'] = str('2QZ')
tdf_meta['STYPE'] = str('QSO')
# Require a spectrum exist
gdm = np.array([True]*len(tdf_meta))
for jj,row in enumerate(tdf_meta):
full_file = get_specfil(row)
if not os.path.isfile(full_file):
print("{:s} has no spectrum. Not including".format(full_file))
gdm[jj] = False
continue
tdf_meta = tdf_meta[gdm]
# Sort
tdf_meta.sort('RA_GROUP')
# Check
assert chk_meta(tdf_meta, chk_cat_only=True)
# Return
return tdf_meta
'''
def meta_for_build():
""" Load the meta info
JXP made DR7 -- Should add some aspect of the official list..
Am worried about the coordinates some..
Returns
-------
"""
tdf_meta = grab_meta()
nqso = len(tdf_meta)
#
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem']:
meta[key] = tdf_meta[key]
meta['flag_zem'] = [str('2QZ')]*nqso # QPQ too
meta['STYPE'] = [str('QSO')]*nqso # QPQ too
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Add 2QZ data to the DB
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
tqz_grp = hdf.create_group(sname)
# Checks
if sname != '2QZ':
raise IOError("Not expecting this survey..")
# Add zem
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 4000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
full_file = get_specfil(row)
# Parse name
fname = full_file.split('/')[-1]
# Read
hdu = fits.open(full_file)
head0 = hdu[0].header
wave = lsio.setwave(head0)
flux = hdu[0].data
var = hdu[2].data
sig = np.zeros_like(flux)
gd = var > 0.
if np.sum(gd) == 0:
print("{:s} has a bad var array. Not including".format(fname))
pdb.set_trace()
continue
sig[gd] = np.sqrt(var[gd])
# npix
spec = XSpectrum1D.from_tuple((wave,flux,sig))
npix = spec.npix
spec.meta['headers'][0] = head0
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
#
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2004MNRAS.349.1397C',
bib='2QZ')
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: The 2QZ Quasar Survey'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 8,483 | 29.517986 | 97 | py |
igmspec | igmspec-master/igmspec/ingest/esidla.py | """ Module to ingest High z ESI DLA
Rafelski et al. 2012, 2014
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os, glob
import imp
import json
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy.io import fits
from astropy import units as u
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab High-z ESI meta Table
Returns
-------
"""
#
esidla_meta = Table.read(os.getenv('RAW_IGMSPEC')+'/HighzESIDLA/ascii_highz_rafelski.list', format='ascii')
nspec = len(esidla_meta)
# DATE
datearr = [day.split('/') for day in list(esidla_meta['ObsDate'])]
ndate = ['20'+str(day[2])+'-'+str(day[0])+'-'+str(day[1]) for day in datearr]
t = Time(ndate, out_subfmt='date') # Fixes to YYYY-MM-DD
esidla_meta.add_column(Column(t.iso, name='DATE-OBS'))
# Add zem
esidla_meta['sig_zem'] = [0.]*nspec
esidla_meta['flag_zem'] = [str('SDSS')]*nspec
#
esidla_meta.add_column(Column([2000.]*nspec, name='EPOCH'))
esidla_meta.add_column(Column(['KeckII']*nspec, name='TELESCOPE'))
esidla_meta.add_column(Column(['ESI']*nspec, name='INSTR'))
esidla_meta.add_column(Column(['ECH']*nspec, name='DISPERSER'))
# Rename
esidla_meta.rename_column('RA', 'RA_GROUP')
esidla_meta.rename_column('DEC', 'DEC_GROUP')
esidla_meta.rename_column('zem', 'zem_GROUP')
esidla_meta['STYPE'] = str('QSO')
# Sort
esidla_meta.sort('RA_GROUP')
# Check
assert chk_meta(esidla_meta, chk_cat_only=True)
#
return esidla_meta
'''
def meta_for_build(esidla_meta=None):
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
if esidla_meta is None:
esidla_meta = grab_meta()
nqso = len(esidla_meta)
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = esidla_meta[key]
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append ESI data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
from specdb import defs
# Add Survey
print("Adding {:s} survey to DB".format(sname))
esidla_grp = hdf.create_group(sname)
# Load up
Rdicts = defs.get_res_dicts()
# Checks
if sname != 'ESI_DLA':
raise IOError("Expecting ESI_DLA!!")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 50000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
#
specfile = os.getenv('RAW_IGMSPEC')+'/HighzESIDLA/{:s}a_xF.fits'.format(
row['Name'])
print("ESI_DLA: Reading {:s}".format(specfile))
spec = lsio.readspec(specfile)
# Parse name
fname = specfile.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Continuum
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.to('AA').value
#data['co'][0][:npix] = spec.co.value
# Meta
head = spec.header
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
try:
Rlist.append(Rdicts['ESI'][head['SLMSKNAM']])
except KeyError:
if row['Slit'] == 0.75:
Rlist.append(Rdicts['ESI']['0.75_arcsec'])
elif row['Slit'] == 0.5:
Rlist.append(Rdicts['ESI']['0.50_arcsec'])
else:
pdb.set_trace()
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2012ApJ...755...89R',
bib='rafelski+12'),
dict(url='http://adsabs.harvard.edu/abs/2014ApJ...782L..29R',
bib='rafelski+14'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: The Keck/ESI Survey for high-z DLAs'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 6,300 | 28.862559 | 111 | py |
igmspec | igmspec-master/igmspec/ingest/hst_cooksey.py | """ Module to ingest HST+FUSE AGN spectra
Cooksey et al. 2010
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import warnings
import os, json
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.io.fits import Header
from astropy.io import fits
from astropy.time import Time
from linetools.spectra import io as lsio
from linetools import utils as ltu
#from igmspec.ingest import utils as iiu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
from specdb import defs
#igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab HST/FUSE Cooksey meta Table
Returns
-------
"""
hstc_file = os.getenv('RAW_IGMSPEC')+'/HST_Cooksey/HSTQSO_pre-SM4.lst'
hstc_meta = Table.read(hstc_file, format='ascii')
# Cutting those without proper header (for now)
#badf = ['PKS2005-489lif1a.fits', 'NGC7469lif2a.fits', 'NGC7469sic2b.fits',
# 'NGC7469lif2b.fits', 'NGC7469sic2a.fits',
# 'AKN564lif1a.fits', 'AKN564lif1b.fits', 'AKN564lif2a.fits', 'AKN564lif2b.fits',
# 'AKN564sic1a.fits', 'AKN564sic1b.fits', 'AKN564sic2a.fits', 'AKN564sic2b.fits']
badf = []
gdm = np.array([True]*len(hstc_meta))
for ibadf in badf:
mt = np.where(hstc_meta['SPEC_FILE'] == ibadf)[0]
gdm[mt] = False
for jj, row in enumerate(hstc_meta): # Skip continua
if '_c.fits' in row['SPEC_FILE']:
gdm[jj] = False
if '_E.fits' in row['SPEC_FILE']:
gdm[jj] = False
#if row['INSTR'] == 'GHRS':
# gdm[jj] = False
hstc_meta = hstc_meta[gdm]
gdf = hstc_meta['INSTR'] == 'FUSE'
#hstc_meta = hstc_meta[gdf]
hstc_meta['TELESCOPE'] = 'FUSE'
hstc_meta[~gdf]['TELESCOPE'] = 'HST'
#
hstc_meta.add_column(Column([2000.]*len(hstc_meta), name='EPOCH'))
hstc_meta['sig_zem'] = 0.
hstc_meta['flag_zem'] = str('UNKWN')
hstc_meta['STYPE'] = str('QSO')
# RENAME
hstc_meta.rename_column('RA', 'RA_GROUP')
hstc_meta.rename_column('DEC', 'DEC_GROUP')
hstc_meta.rename_column('zem', 'zem_GROUP')
# Check
assert chk_meta(hstc_meta, chk_cat_only=True)
return hstc_meta
'''
def meta_for_build():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
# Cut down to unique QSOs
hstc_meta = grab_meta()
names = hstc_meta['QSO'].data
uni, uni_idx = np.unique(names, return_index=True)
hstc_meta = hstc_meta[uni_idx]
nqso = len(hstc_meta)
#
meta = Table()
meta['RA'] = hstc_meta['RA']
meta['DEC'] = hstc_meta['DEC']
meta['zem'] = hstc_meta['zem']
meta['sig_zem'] = [0.]*nqso
meta['flag_zem'] = [str('UNKWN')]*nqso
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append HST/FUSE data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
Rdicts = defs.get_res_dicts()
# Add Survey
print("Adding {:s} survey to DB".format(sname))
hstc_grp = hdf.create_group(sname)
# Checks
if sname != 'UVpSM4':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 40000 # Just needs to be large enough
# Init
data = init_data(max_npix, include_co=True)
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
npixlist = []
gratinglist = []
datelist = []
badf = []
badstis = []
badghrs = []
# Loop
path = os.getenv('RAW_IGMSPEC')+'/HST_Cooksey/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file = path+'{:s}/{:s}/{:s}'.format(
row['QSO'],row['INSTR'],row['SPEC_FILE'])
# Extract
if row['INSTR'] == 'FUSE':
hext = 1
else:
hext = 0
print("HST_Cooksey: Reading {:s}".format(full_file))
try:
spec = lsio.readspec(full_file, head_exten=hext, masking='edges')
except: # BAD HEADER
hdu = fits.open(full_file)
head1 = hdu[1].header
hdu[1].verify('fix')
tbl = Table(hdu[1].data)
spec = lsio.readspec(tbl, masking='edges')
spec.meta['headers'][spec.select] = head1
# Continuum
cfile = full_file.replace('.fits', '_c.fits')
if os.path.isfile(cfile):
# Watch that mask!
gdp = ~spec.data['flux'][spec.select].mask
spec.data['co'][spec.select][gdp] = (fits.open(cfile)[0].data)[gdp]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig', 'co']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
if spec.co_is_set:
try:
data['co'][0][:npix] = spec.co.value
except ValueError:
pdb.set_trace()
# Meta
datet = None
if row['INSTR'] == 'FUSE':
if 'HISTORY' in spec.header.keys():
ncards = len(spec.header['HISTORY'])
flg_H = True
else:
flg_H = False
hdu = fits.open(full_file)
head0 = hdu[0].header
ncards = len(head0)
# Is this a good one?
if 'APER_ACT' in head0:
pass
else: # Need to fight harder for the header
# Look for untrim
untrim = full_file+'.untrim'
if not os.path.isfile(untrim):
pdb.set_trace()
# Read
hduu = fits.open(untrim)
if 'PKS2005' in untrim: # One extra kludge..
head0 = hduu[1].header
flg_H = True
ncards = len(head0['HISTORY'])
else:
head0 = hduu[0].header
ncards = len(head0)
spec.meta['headers'][spec.select] = head0
# Read from history
for ss in range(ncards):
if flg_H:
try:
card = Header.fromstring(spec.header['HISTORY'][ss])
except:
pdb.set_trace()
try:
ckey = list(card.keys())[0]
except IndexError:
continue
else:
card0 = card[0]
else:
ckey, card0 = list(spec.header.keys())[ss], spec.header[ss]
# Parse
if ckey == 'APERTURE':
aper = card0
elif ckey == 'DETECTOR':
det = card0
elif ckey == 'APER_ACT': # Extracted aperture
ext_ap = card0
elif ckey == 'DATE': # Extracted aperture
datet = card0
gratinglist.append(ext_ap+det)
elif row['INSTR'] == 'STIS':
try:
datet = spec.header['DATE']
except KeyError: # handful of kludged coadds
if 'HISTORY' not in spec.header.keys():
# Grab from the other extension, e.g. PKS0405
hdu = fits.open(full_file)
head1 = hdu[1].header
spec.meta['headers'][0] = head1
for ihist in spec.header['HISTORY']:
if 'TDATEOBS' in ihist:
idash = ihist.find('-')
datet = ihist[idash-4:idash+6]
# Grating from name
i0 = full_file.rfind('_')
i1 = full_file.rfind('.fits')
gratinglist.append(full_file[i0+1:i1])
if datet is None:
pdb.set_trace()
else:
gratinglist.append(spec.header['OPT_ELEM'])
elif row['INSTR'] == 'GHRS':
# Date
try:
tmp = spec.header['DATE-OBS']
except KeyError:
# Pull header from parallel file
iM = full_file.find('M_1')
if iM <= 0:
iM = full_file.find('L_1')
ofile = full_file[:iM+1]+'_F.fits'
if not os.path.isfile(ofile):
if 'NGC4151' in ofile: # Kludge
ofile = ofile.replace('G160M', 'G160Mmd')
elif 'PKS2155-304_GHRS_G140L' in ofile: # Kludge
ofile = ofile.replace('G140L', 'G140Llo')
elif 'PKS2155-304_GHRS_G160M' in ofile: # Kludge
ofile = ofile.replace('G160M', 'G160Mmd')
else:
pdb.set_trace()
hdu = fits.open(ofile)
head0 = hdu[0].header
spec.meta['headers'][spec.select] = head0
# Reformat
prs = tmp.split('/')
if prs[2][0] == '9':
yr = '19'+prs[2]
else:
yr = '20'+prs[2]
datet = yr+'-'+prs[1]+'-{:02d}'.format(int(prs[0]))
# Grating
gratinglist.append(spec.header['GRATING'])
else:
pdb.set_trace()
if datet is None:
try:
datet = spec.header['DATE-OBS']
except KeyError:
print("Missing Header for file: {:s}".format(full_file))
badf.append(full_file)
datet = '9999-9-9'
t = Time(datet, format='isot', out_subfmt='date') # Fixes to YYYY-MM-DD
datelist.append(t.iso)
try:
Rlist.append(Rdicts[row['INSTR']][gratinglist[-1]])
except KeyError:
print(gratinglist[-1])
pdb.set_trace()
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
#
if (len(badstis)) > 0:
raise ValueError("Somehow have a bad STIS header..")
if len(badf) > 0:
print("We still have bad FUSE headers")
pdb.set_trace()
if len(badghrs) > 0:
print("We still have bad GHRS headers")
pdb.set_trace()
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(gratinglist, name='DISPERSER'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(datelist, name='DATE-OBS'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
pdb.set_trace()
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2010ApJ...708..868C',
bib='cooksey10')
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: HST and FUSE spectra of AGN and Quasars by Cooksey et al. (2010)'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 12,809 | 33.904632 | 97 | py |
igmspec | igmspec-master/igmspec/ingest/__init__.py | 0 | 0 | 0 | py | |
igmspec | igmspec-master/igmspec/ingest/ggg.py | """ Module to ingest GGG Survey data
Worseck et al. 2014
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import os
import imp
import json
from astropy.table import Table, Column, vstack
from astropy.time import Time
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab GGG meta Table
Returns
-------
"""
# This table has units in it!
meta = Table.read(os.getenv('RAW_IGMSPEC')+'/GGG/GGG_catalog.fits.gz')
nqso = len(meta)
# Turn off RA/DEC units
for key in ['RA', 'DEC']:
meta[key].unit = None
meta.rename_column('RA', 'RA_GROUP')
meta.rename_column('DEC', 'DEC_GROUP')
#
# Add zem
meta['zem_GROUP'] = meta['z_gmos']
meta['sig_zem'] = meta['zerror_gmos']
meta['flag_zem'] = [str('GGG')]*nqso
meta.add_column(Column([2000.]*nqso, name='EPOCH'))
#
meta['STYPE'] = [str('QSO')]*nqso
# Double up for the two gratings
ggg_meta = vstack([meta,meta])
# Check
assert chk_meta(ggg_meta, chk_cat_only=True)
# Return
return ggg_meta
'''
def meta_for_build():
""" Generates the meta data needed for the IGMSpec build
Returns
-------
meta : Table
"""
ggg_meta = grab_meta()
# Cut down to unique QSOs
names = np.array([name[0:26] for name in ggg_meta['SDSSJ']])
uni, uni_idx = np.unique(names, return_index=True)
ggg_meta = ggg_meta[uni_idx]
nqso = len(ggg_meta)
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = ggg_meta[key]
meta['STYPE'] = [str('QSO')]*nqso
# Return
return meta
'''
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False):
""" Append GGG data to the h5 file
Parameters
----------
hdf : hdf5 pointer
sname : str
Survey name
meta : Table
chk_meta_only : bool, optional
Only check meta file; will not write
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
ggg_grp = hdf.create_group(sname)
# Load up
if sname != 'GGG':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 1600 # Just needs to be large enough
# Init
data = init_data(max_npix, include_co=False)
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
Rlist = []
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
gratinglist = []
telelist = []
dateobslist = []
instrlist = []
# Loop
path = os.getenv('RAW_IGMSPEC')+'/GGG/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
if jj >= nspec//2:
full_file = path+row['name']+'_R400.fits.gz'
gratinglist.append('R400')
else:
full_file = path+row['name']+'_B600.fits.gz'
gratinglist.append('B600')
# Extract
print("GGG: Reading {:s}".format(full_file))
spec = lsio.readspec(full_file)
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
head = spec.header
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
telelist.append(head['OBSERVAT'])
instrlist.append(head['INSTRUME'])
tval = Time(head['DATE'], format='isot', out_subfmt='date')
dateobslist.append(tval.iso)
npixlist.append(npix)
if 'R400' in fname:
Rlist.append(833.)
else:
Rlist.append(940.)
# Only way to set the dataset correctly
if chk_meta_only:
continue
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(gratinglist, name='GRATING'))
meta.add_column(Column(telelist, name='TELESCOPE'))
meta.add_column(Column(instrlist, name='INSTR'))
meta.add_column(Column(dateobslist, name='DATE-OBS'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2014MNRAS.445.1745W',
bib='worseck+14')]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Giant Gemini GMOS Survey of z>4 quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 5,990 | 27.802885 | 84 | py |
igmspec | igmspec-master/igmspec/ingest/boss.py | """ Module to ingest SDSS III (aka BOSS) data products
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, json
import pdb
import datetime
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy.io import fits
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_for_duplicates
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
def grab_meta():
""" Grab BOSS meta Table
Returns
-------
boss_meta : Table
"""
#http://www.sdss.org/dr12/algorithms/boss-dr12-quasar-catalog/
boss_dr12 = Table.read(os.getenv('RAW_IGMSPEC')+'/BOSS/DR12Q.fits.gz')
boss_dr12['CAT'] = ['DR12Q']*len(boss_dr12)
gd = np.any([boss_dr12['Z_PIPE'] > 0., boss_dr12['Z_PCA'] > 0.],axis=0) # CUTS Z_VI
boss_dr12 = boss_dr12[gd]
#
boss_sup = Table.read(os.getenv('RAW_IGMSPEC')+'/BOSS/DR12Q_sup.fits.gz')
boss_sup['CAT'] = ['SUPGD']*len(boss_sup)
boss_supbad = Table.read(os.getenv('RAW_IGMSPEC')+'/BOSS/DR12Q_supbad.fits.gz')
boss_supbad['CAT'] = ['SUPBD']*len(boss_supbad)
# Collate
boss_meta = vstack([boss_dr12, boss_sup, boss_supbad], join_type='outer')
#
nboss = len(boss_meta)
# DATE-OBS
t = Time(list(boss_meta['MJD'].data), format='mjd', out_subfmt='date') # Fixes to YYYY-MM-DD
boss_meta.add_column(Column(t.iso, name='DATE-OBS'))
# Add columns
boss_meta.add_column(Column(['BOSS']*nboss, name='INSTR'))
boss_meta.add_column(Column(['BOTH']*nboss, name='GRATING'))
#http://www.sdss.org/instruments/boss_spectrograph/
boss_meta.add_column(Column([2100.]*nboss, name='R')) # RESOLUTION
boss_meta.add_column(Column(['SDSS 2.5-M']*nboss, name='TELESCOPE'))
# Redshift logic
boss_meta['zem_GROUP'] = boss_meta['Z_PCA']
boss_meta['sig_zem'] = boss_meta['ERR_ZPCA']
boss_meta['flag_zem'] = [str('BOSS_PCA ')]*nboss
# Fix bad redshifts
bad_pca = boss_meta['Z_PCA'] < 0.
boss_meta['zem_GROUP'][bad_pca] = boss_meta['Z_PIPE'][bad_pca]
boss_meta['sig_zem'][bad_pca] = boss_meta['ERR_ZPIPE'][bad_pca]
boss_meta['flag_zem'][bad_pca] = str('BOSS_PIPE')
# Rename RA/DEC
boss_meta.rename_column('RA', 'RA_GROUP')
boss_meta.rename_column('DEC', 'DEC_GROUP')
# STYPE
boss_meta['STYPE'] = [str('QSO')]*len(boss_meta)
# Check
assert chk_meta(boss_meta, chk_cat_only=True)
# Return
return boss_meta
'''
def meta_for_build():
""" Load the meta info
DR12 quasars : https://data.sdss.org/datamodel/files/BOSS_QSO/DR12Q/DR12Q.html
Returns
-------
"""
boss_meta = grab_meta()
# Cut down to unique
c_main = SkyCoord(ra=boss_meta['RA_SPEC'], dec=boss_meta['DEC_SPEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(c_main, c_main, nthneighbor=2)
dups = np.where(d2d < 2*u.arcsec)[0]
flgs = np.array([True]*len(boss_meta))
#
for ii in dups:
if boss_meta[ii]['CAT'] == 'SUPBD':
flgs[ii] = False
boss_meta = boss_meta[flgs]
if not chk_for_duplicates(boss_meta):
raise ValueError("DUPLICATES IN BOSS")
#
meta = Table()
for key in ['RA', 'DEC', 'zem', 'sig_zem', 'flag_zem']:
meta[key] = boss_meta[key]
meta['STYPE'] = [str('QSO')]*len(meta)
# Return
return meta
'''
def get_specfil(row, KG=False, hiz=False):
"""Grab the BOSS file name + path
KG : bool, optional
Grab MFR continuum generated by KG
"""
pnm = '{0:04d}'.format(row['PLATE'])
fnm = '{0:04d}'.format(row['FIBERID'])
mjd = str(row['MJD'])
# KG?
if KG:
path = os.getenv('RAW_IGMSPEC')+'/BOSS/BOSSLyaDR12_spectra_v1.0/{:s}/'.format(pnm)
specfil = path+'speclya-{:04d}-{:d}-{:04d}.fits.gz'.format(row['PLATE'], row['MJD'], row['FIBERID'])
return specfil
# Generate file name (DR4 is different)
path = os.getenv('RAW_IGMSPEC')+'/BOSS/'
#
if hiz:
path += 'hiz/'
elif row['CAT'] == 'SUPGD':
path += 'Sup12/'
elif row['CAT'] == 'SUPBD':
path += 'SupBad/'
#specfil = path+'spec-{:04d}-{:d}-{:04d}.fits.gz'.format(row['PLATE'], row['MJD'], row['FIBERID'])
elif row['CAT'] == 'DR12Q':
path += 'DR12Q/'
else:
raise ValueError("Uh oh")
specfil = path+'spec-{:04d}-{:d}-{:04d}.fits.gz'.format(row['PLATE'], row['MJD'], row['FIBERID'])
# Finish
return specfil
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False, boss_hdf=None, **kwargs):
""" Add BOSS data to the DB
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
boss_hdf : str, optional
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
if boss_hdf is not None:
print("Using previously generated {:s} dataset...".format(sname))
boss_hdf.copy(sname, hdf)
return
boss_grp = hdf.create_group(sname)
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 4650 # Just needs to be large enough
data = init_data(max_npix, include_co=True)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
speclist = []
npixlist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file = get_specfil(row)
if full_file == 'None':
continue
# Read
spec = lsio.readspec(full_file)
# npix
npix = spec.npix
# Kludge for higest redshift systems
if npix < 10:
full_file = get_specfil(row, hiz=True)
try:
spec = lsio.readspec(full_file)
except:
print("Missing: {:s}".format(full_file))
npix = spec.npix
elif npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Parse name
fname = full_file.split('/')[-1]
# Fill
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# GZ Continuum -- packed in with spectrum, generated by my IDL script
try:
co = spec.co.value
except AttributeError:
co = np.zeros_like(spec.flux.value)
# KG Continuum
KG_file = get_specfil(row, KG=True)
if os.path.isfile(KG_file) and (npix>1): # Latter is for junk in GZ file. Needs fixing
hduKG = fits.open(KG_file)
KGtbl = hduKG[1].data
wvKG = 10.**KGtbl['LOGLAM']
try:
assert (wvKG[0]-spec.wavelength[0].value) < 1e-5
except:
pdb.set_trace()
gdpix = np.where(wvKG < (1+row['zem_GROUP'])*1200.)[0]
co[gdpix] = KGtbl['CONT'][gdpix]
data['co'][0][:npix] = co
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
meta.add_column(Column([2000.]*len(meta), name='EPOCH'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
pdb.set_trace()
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2015ApJS..219...12A',
bib='boss_qso_dr12'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_coflag(hdf):
""" Generate a continuum flag to add to the meta table
Parameters
----------
hdf
Returns
-------
new_meta : Table
New column 'flag_co' which is a bitwise flag
1 : GZ
2 : KG
"""
dir = os.getenv('RAW_IGMSPEC')
# Load meta
meta = Table(hdf['BOSS_DR12']['meta'].value)
nspec = len(meta)
flg_co = np.zeros(nspec).astype(int)
# Loop on chunks of 10000 spectra
chunk = 10000
nsub = 0
while(nsub < nspec):
idx = np.arange(nsub,min(nsub+chunk, nspec)).astype(int)
# Grab spectra
msk = np.array([False]*nspec)
msk[idx] = True
data = hdf['BOSS_DR12']['spec'][msk]
# Add GZ flag
zem = meta['zem_GROUP'][idx]
# Avoid KG region
wvlya = 1215.67*(1+zem)
wvarr = np.outer(wvlya, np.ones(data['wave'].shape[1]))
gdwv = wvarr > data['wave']
# Check co
gdco = data['co'] > 0.
gdarr = gdwv & gdco
# Finish
gd_GZ = np.sum(gdarr,axis=1) > 0
flg_co[idx[gd_GZ]] += 1
# KG -- Loop on rows
plates = meta['PLATE'][idx].data
fibers = meta['FIBERID'][idx].data
mjds = meta['MJD'][idx].data
for ii in range(len(idx)):
pnm = '{0:04d}'.format(plates[ii])
path = dir+'/BOSS/BOSSLyaDR12_spectra_v1.0/{:s}/'.format(pnm)
specfil = path+'speclya-{:04d}-{:d}-{:04d}.fits.gz'.format(plates[ii], mjds[ii], fibers[ii])
if os.path.isfile(specfil):
flg_co[idx[ii]] += 2
nsub = min(nsub+chunk, nspec)
# Finish
meta['flag_co'] = flg_co
return meta
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: BOSS DR12 Quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
| 10,801 | 31.14881 | 108 | py |
igmspec | igmspec-master/igmspec/ingest/cos_halos.py | """ Module to ingest COS-Halos
Tumlinson et al. 2013
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import warnings
import os, json, glob, imp
from astropy.table import Table, Column, vstack
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from astropy.time import Time
from linetools.spectra import io as lsio
from linetools import utils as ltu
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
#igms_path = imp.find_module('igmspec')[1]
def grab_meta():
""" Grab COS-Halos meta table
Returns
-------
"""
from time import strptime
from specdb.zem.utils import zem_from_radec
from specdb.specdb import IgmSpec
from specdb.defs import get_res_dicts
Rdicts = get_res_dicts()
igmsp = IgmSpec(db_file=os.getenv('SPECDB')+'/IGMspec_DB_v01.hdf5', skip_test=True)
summ_file = os.getenv('RAW_IGMSPEC')+'/COS-Halos/cos_halos_obs.ascii'
chalos_meta = Table.read(summ_file, format='ascii')
# RA/DEC, DATE
# Visits from this page: http://www.stsci.edu/cgi-bin/get-visit-status?id=11598&markupFormat=html
visit_file = os.getenv('RAW_IGMSPEC')+'/COS-Halos/cos_halos_visits.ascii'
ch_visits = Table.read(visit_file,format='ascii')
ra = []
dec = []
datet = []
for row in chalos_meta:
coord = ltu.radec_to_coord(row['QSO'])
ra.append(coord.ra.value)
dec.append(coord.dec.value)
#
visit = row['Visit']
mtv = np.where(ch_visits['Visit'] == visit)[0]
if len(mtv) != 1:
pdb.set_trace()
else:
chv = ch_visits['Start_UT'][mtv].data[0]
icmma = chv.find(',')
datet.append('{:s}-{:02d}-{:02d}'.format(
chv[icmma+1:icmma+5], strptime(chv[:3],'%b').tm_mon,
int(chv[3:icmma])))
chalos_meta.add_column(Column(ra, name='RA'))
chalos_meta.add_column(Column(dec, name='DEC'))
chalos_meta.add_column(Column(datet, name='DATE-OBS'))
# Others
chalos_meta.add_column(Column([' ']*len(chalos_meta), name='TELESCOPE')) # Padding
chalos_meta.add_column(Column([' ']*len(chalos_meta), name='INSTR')) # Padding for HIRES
chalos_meta.add_column(Column(['G130M/G160M']*len(chalos_meta), name='DISPERSER'))
chalos_meta.add_column(Column([20000.]*len(chalos_meta), name='R'))
chalos_meta.add_column(Column([2000.]*len(chalos_meta), name='EPOCH'))
chalos_meta['INSTR'] = 'COS' # Deals with padding
chalos_meta['TELESCOPE'] = 'HST'
# Myers for zem
zem, zsource = zem_from_radec(chalos_meta['RA'], chalos_meta['DEC'], Table(igmsp.hdf['quasars'].value))
badz = zem <= 0.
if np.sum(badz) > 0:
raise ValueError("Bad zem in COS-Halos")
chalos_meta['zem'] = zem
chalos_meta['sig_zem'] = 0. # Need to add
chalos_meta['flag_zem'] = zsource
# HIRES
hires_files = glob.glob(os.getenv('RAW_IGMSPEC')+'/COS-Halos/HIRES/J*f.fits.gz')
hires_tab = chalos_meta[0:0]
subnm = np.array([row['QSO'][4:9] for row in chalos_meta])
signs = np.array([row['QSO'][14] for row in chalos_meta])
for ifile in hires_files:
print(ifile)
fname = ifile.split('/')[-1]
mt = np.where((subnm == fname[0:5]) & (signs == fname[5]))[0]
if len(mt) != 1:
pdb.set_trace()
# Add row
hires_tab.add_row(chalos_meta[mt[0]])
hires_tab[-1]['INSTR'] = 'HIRES'
hires_tab[-1]['TELESCOPE'] = 'Keck I'
hires_tab[-1]['DISPERSER'] = 'Red'
hires_tab[-1]['R'] = Rdicts['HIRES']['C1']
# Combine
chalos_meta = vstack([chalos_meta, hires_tab])
chalos_meta['STYPE'] = str('QSO')
# Rename
chalos_meta.rename_column('RA', 'RA_GROUP')
chalos_meta.rename_column('DEC', 'DEC_GROUP')
chalos_meta.rename_column('zem', 'zem_GROUP')
# Check
assert chk_meta(chalos_meta, chk_cat_only=True)
# Done
return chalos_meta
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False,
mk_test_file=False):
""" Append COS-Halos data to the h5 file
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
mk_test_file : bool, optional
Generate the debug test file for Travis??
Returns
-------
"""
# Add Survey
print("Adding {:s} survey to DB".format(sname))
chalos_grp = hdf.create_group(sname)
# Load up
# Checks
if sname != 'COS-Halos':
raise IOError("Not expecting this survey..")
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 160000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
npixlist = []
speclist = []
# Loop
path = os.getenv('RAW_IGMSPEC')+'/COS-Halos/'
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
coord = ltu.radec_to_coord((row['RA_GROUP'],row['DEC_GROUP']))
if row['INSTR'].strip() == 'COS':
full_file = path+'/J{:s}{:s}_nbin3_coadd.fits.gz'.format(coord.ra.to_string(unit=u.hour,sep='',pad=True)[0:4],
coord.dec.to_string(sep='',pad=True,alwayssign=True)[0:5])
else: # HIRES
full_file = path+'/HIRES/J{:s}{:s}_f.fits.gz'.format(coord.ra.to_string(unit=u.hour,sep='',pad=True)[0:4], coord.dec.to_string(sep='',pad=True,alwayssign=True)[0:5])
# Extract
print("COS-Halos: Reading {:s}".format(full_file))
spec = lsio.readspec(full_file)
# Parse name
fname = full_file.split('/')[-1]
# npix
npix = spec.npix
if npix > max_npix:
raise ValueError("Not enough pixels in the data... ({:d})".format(npix))
else:
maxpix = max(npix,maxpix)
# Some fiddling about
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if chk_meta_only:
continue
# Only way to set the dataset correctly
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(np.arange(nspec,dtype=int), name='GROUP_ID'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2013ApJ...777...59T',
bib='tumlinson+13'),
dict(url='http://adsabs.harvard.edu/abs/2013ApJS..204...17W',
bib='werk+13')
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: Quasar Spectra from the COS-Halos Survey'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='ABSOLUTE')
hdf[dset]['meta'].attrs['SSA_COS'] = json.dumps(ltu.jsonify(ssa_dict))
# HIRES
ssa_dict = default_fields(Title, flux='normalized')
hdf[dset]['meta'].attrs['SSA_HIRES'] = json.dumps(ltu.jsonify(ssa_dict))
| 8,296 | 34.762931 | 177 | py |
igmspec | igmspec-master/igmspec/ingest/tests/test_ingest.py | # Module to run tests on ingest scripts
import os
import pytest
from igmspec.ingest.hdlls import grab_meta
#def data_path(filename):
# data_dir = os.path.join(os.path.dirname(__file__), 'files')
# return os.path.join(data_dir, filename)
def test_hdlls():
if os.getenv('RAW_IGMSPEC') is None:
assert True
return
#
meta = grab_meta()
assert len(meta) == 145
| 400 | 18.095238 | 64 | py |
igmspec | igmspec-master/igmspec/ingest/tests/__init__.py | 0 | 0 | 0 | py | |
igmspec | igmspec-master/papers/v02_release/py/igmspec_v02_tabs.py | #Module for Tables for the igmspec v02 paper
# Imports
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, sys
import json, yaml
import pdb
from astropy.table import Table
#from astropy import units as u
from specdb.specdb import IgmSpec
# Local
#sys.path.append(os.path.abspath("../Analysis/py"))
#import lls_sample as lls_s
#def mktab_all_lines -- LateX table listing all measured transitions
# Summary table of the NHI models
def mktab_datasets(outfil='tab_datasets.tex'):
""" Generate a Table summarizing the datasets
Parameters
----------
outfil
Returns
-------
"""
# Setup [confirm v02 eventually]
igmsp = IgmSpec()
groups = igmsp.groups
groups.sort()
# Open
tbfil = open(outfil, 'w')
# Header
tbfil.write('\\clearpage\n')
tbfil.write('\\begin{table}[ht]\n')
tbfil.write('\\caption{{\\it igmspec} DATA GROUPS \\label{tab:datasets}}\n')
#tbfil.write('\\tabletypesize{\\tiny}\n')
tbfil.write('\\begin{tabular}{lcccccc}\n')
tbfil.write('Group & $N_{\\rm source}^a$ \n')
tbfil.write('& $N_{\\rm spec}^b$ & $\\lambda_{\\rm min}$ (\\AA) \n')
tbfil.write('& $\\lambda_{\\rm max}$ (\\AA) & $R^c$ & Flux$^d$\\\\ \n')
#tbfil.write('& References & Website \n')
#tbfil.write('} \n')
tbfil.write('\\hline \n')
# Looping on systems
restrict = False
for survey in groups:
if survey == 'quasars':
continue
# Restrict
#if survey != 'HD-LLS_DR1':
# continue
if restrict:
if survey in ['BOSS_DR12', 'SDSS_DR7']:
continue
print("Working on survey={:s}".format(survey))
# Setup
meta = Table(igmsp.hdf[survey]['meta'].value)
try:
ssa = json.loads(igmsp.hdf[survey+'/meta'].attrs['SSA'])
except KeyError:
if survey not in ['COS-Halos']:
pdb.set_trace()
fluxc = 'MIXED'
else:
fluxc = ssa['FluxCalib']
# Survey
msurvey = survey.replace('_','\\_')
tbfil.write(msurvey)
# N sources
uniq = np.unique(meta['IGM_ID'])
tbfil.write('& {:d}'.format(len(uniq)))
# N spectra
tbfil.write('& {:d}'.format(len(meta)))
# Wave min
sig = igmsp.hdf[survey]['spec']['sig']
gds = sig > 0.
gdwv = igmsp.hdf[survey]['spec']['wave'][gds]
tbfil.write('& {:d}'.format(int(np.round(np.min(gdwv)))))
# Wave max
tbfil.write('& {:d}'.format(int(np.round(np.max(gdwv)))))
# R
tbfil.write('& {:d}'.format(int(np.round(np.median(meta['R'])))))
# Flux
tbfil.write('& {:s}'.format(fluxc))
# Write
tbfil.write('\\\\ \n')
# End
tbfil.write('\\hline \n')
#tbfil.write('\\enddata \n')
#tbfil.write('\\tablecomments{This table is available as a YAML file at ')
#tbfil.write('http://blah')
#tbfil.write('} \n')
tbfil.write('\\multicolumn{6}{l}{{$^a$}{Number of unique sources in the dataset. }} \\\\ \n')
tbfil.write('\\multicolumn{6}{l}{{$^b$}{Number of unique spectra in the dataset. }} \\\\ \n')
tbfil.write('\\multicolumn{6}{l}{{$^c$}{Characteristic FWHM resolution of the spectra. }} \\\\ \n')
tbfil.write('\\multicolumn{6}{l}{{$^d$}{Indicates whether the data are fluxed (absolute or relative) or normalized. The COS-Halos spectra include both fluxed (COS) and normalized (HIRES) spectra.}} \\\\ \n')
#tbfil.write('\\tablenotetext{a}{Number of positive detections constraining the model.}')
# End
tbfil.write('\\end{tabular} \n')
tbfil.write('\\end{table} \n')
tbfil.close()
#### ########################## #########################
#### ########################## #########################
#### ########################## #########################
# Command line execution
if __name__ == '__main__':
flg_tab = 0
flg_tab += 2**0 # Datasets
#flg_tab += 2**1 # Ionization models
#flg_tab += 2**2 # Edits to COS-Halos
# NHI fits
if (flg_tab % 2**1) >= 2**0:
mktab_datasets()
| 4,190 | 27.317568 | 211 | py |
igmspec | igmspec-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# igmspec documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 13 13:39:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../igmspec'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Napoleon settings
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'igmspec'
copyright = u'2016, Prochaska, and Associates'
author = u'Prochaska, and Associates'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'globaltoc.html', 'relations.html', 'sourcelink.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'igmspecdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'igmspec.tex', u'igmspec Documentation',
u'Prochaska, and Associates', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'igmspec', u'igmspec Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'igmspec', u'igmspec Documentation',
author, 'igmspec', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 10,057 | 31.031847 | 80 | py |
neural-splines | neural-splines-main/fit-grid.py | import argparse
import numpy as np
import point_cloud_utils as pcu
import torch
import tqdm
from scipy.ndimage import binary_erosion
from skimage.measure import marching_cubes
from neural_splines import load_point_cloud, point_cloud_bounding_box, fit_model_to_pointcloud, eval_model_on_grid, \
voxel_chunks, points_in_bbox, affine_transform_pointcloud, get_weights
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input_point_cloud", type=str, help="Path to the input point cloud to reconstruct.")
argparser.add_argument("num_nystrom_samples", type=int, default=-1,
help="Number of Nyström samples to use for kernel ridge regression. "
"If negative, don't use Nyström sampling."
"This is the number of basis centers to use to represent the final function. "
"If this value is too small, the reconstruction can miss details in the input. "
"Values between 10-100 times sqrt(N) (where N = number of input points) are "
"generally good depending on the complexity of the input shape.")
argparser.add_argument("grid_size", type=int,
help="When reconstructing the mesh, use this many voxels along the longest side of the "
"bounding box.")
argparser.add_argument("cells_per_axis", type=int,
help="Number of cells per axis to split the input along")
argparser.add_argument("--trim", type=float, default=-1.0,
help="If set to a positive value, trim vertices of the reconstructed mesh whose nearest "
"point in the input is greater than this value. The units of this argument are voxels "
"(where the grid_size determines the size of a voxel) Default is -1.0.")
argparser.add_argument("--overlap", type=float, default=0.25,
help="By how much should each grid cell overlap as a fraction of the bounding "
"box diagonal. Default is 0.25")
argparser.add_argument("--weight-type", type=str, default='trilinear',
help="How to interpolate predictions in overlapping cells. Must be one of 'trilinear' "
"or 'none', where 'trilinear' interpolates using a partition of unity defined using a"
"bicubic spline and 'none' does not interpolate overlapping cells. "
"Default is 'trilinear'.")
argparser.add_argument("--min-pts-per-cell", type=int, default=0,
help="Ignore cells with fewer points than this value. Default is zero.")
argparser.add_argument("--eps", type=float, default=0.05,
help="Perturbation amount for finite differencing in voxel units. i.e. we perturb points by "
"eps times the diagonal length of a voxel "
"(where the grid_size determines the size of a voxel). "
"To approximate the gradient of the function, we sample points +/- eps "
"along the normal direction.")
argparser.add_argument("--scale", type=float, default=1.1,
help="Reconstruct the surface in a bounding box whose diameter is --scale times bigger than"
" the diameter of the bounding box of the input points. Defaults is 1.1.")
argparser.add_argument("--regularization", type=float, default=1e-10,
help="Regularization penalty for kernel ridge regression. Default is 1e-10.")
argparser.add_argument("--nystrom-mode", type=str, default="blue-noise",
help="How to generate nystrom samples. Default is 'k-means'. Must be one of "
"(1) 'random': choose Nyström samples at random from the input, "
"(2) 'blue-noise': downsample the input with blue noise to get Nyström samples, or "
"(3) 'k-means': use k-means clustering to generate Nyström samples. "
"Default is 'blue-noise'")
argparser.add_argument("--voxel-downsample-threshold", type=int, default=150_000,
help="If the number of input points is greater than this value, downsample it by "
"averaging points and normals within voxels on a grid. The size of the voxel grid is "
"determined via the --grid-size argument. Default is 150_000."
"NOTE: This can massively speed up reconstruction for very large point clouds and "
"generally won't throw away any details.")
argparser.add_argument("--kernel", type=str, default="neural-spline",
help="Which kernel to use. Must be one of 'neural-spline', 'spherical-laplace', or "
"'linear-angle'. Default is 'neural-spline'."
"NOTE: The spherical laplace is a good approximation to the neural tangent kernel"
"(see https://arxiv.org/pdf/2007.01580.pdf for details)")
argparser.add_argument("--seed", type=int, default=-1, help="Random number generator seed to use.")
argparser.add_argument("--out", type=str, default="recon.ply", help="Path to file to save reconstructed mesh in.")
argparser.add_argument("--save-grid", action="store_true",
help="If set, save the function evaluated on a voxel grid to {out}.grid.npy "
"where out is the value of the --out argument.")
argparser.add_argument("--save-points", action="store_true",
help="If set, save the tripled input points, their occupancies, and the Nyström samples "
"to an npz file named {out}.pts.npz where out is the value of the --out argument.")
argparser.add_argument("--cg-max-iters", type=int, default=20,
help="Maximum number of conjugate gradient iterations. Default is 20.")
argparser.add_argument("--cg-stop-thresh", type=float, default=1e-5,
help="Stop threshold for the conjugate gradient algorithm. Default is 1e-5.")
argparser.add_argument("--dtype", type=str, default="float64",
help="Scalar type of the data. Must be one of 'float32' or 'float64'. "
"Warning: float32 may not work very well for complicated inputs.")
argparser.add_argument("--outer-layer-variance", type=float, default=0.001,
help="Variance of the outer layer of the neural network from which the neural "
"spline kernel arises from. Default is 0.001.")
argparser.add_argument("--use-abs-units", action="store_true",
help="If set, then use absolute units instead of voxel units for --eps and --trim.")
argparser.add_argument("--verbose", action="store_true", help="Spam your terminal with debug information")
args = argparser.parse_args()
if args.dtype == "float64":
dtype = torch.float64
elif args.dtype == "float32":
dtype = torch.float32
else:
raise ValueError(f"invalid --dtype argument. Must be one of 'float32' or 'float64' but got {args.dtype}")
if args.seed > 0:
seed = args.seed
else:
seed = np.random.randint(2 ** 32 - 1)
torch.manual_seed(seed)
np.random.seed(seed)
print("Using random seed", seed)
x, n = load_point_cloud(args.input_point_cloud, dtype=dtype)
scaled_bbox = point_cloud_bounding_box(x, args.scale)
out_grid_size = torch.round(scaled_bbox[1] / scaled_bbox[1].max() * args.grid_size).to(torch.int32)
voxel_size = scaled_bbox[1] / out_grid_size # size of one voxel
# Downsample points to grid resolution if there are enough points
if x.shape[0] > args.voxel_downsample_threshold:
print("Downsampling input point cloud to voxel resolution.")
x, n, _ = pcu.downsample_point_cloud_voxel_grid(voxel_size, x.numpy(), n.numpy(),
min_bound=scaled_bbox[0],
max_bound=scaled_bbox[0] + scaled_bbox[1])
x, n = torch.from_numpy(x), torch.from_numpy(n)
# Voxel grid to store the output
out_grid = torch.zeros(*out_grid_size, dtype=torch.float32)
out_mask = torch.zeros(*out_grid_size, dtype=torch.bool)
print(f"Fitting {x.shape[0]} points using {args.cells_per_axis ** 3} cells")
# Iterate over each grid cell
tqdm_bar = tqdm.tqdm(total=args.cells_per_axis ** 3)
current_num_points = 0 # The number of points in this cell (used to log to the tqdm bar)
for cell_idx, cell_vmin, cell_vmax in voxel_chunks(out_grid_size, args.cells_per_axis):
tqdm_bar.set_postfix({"Cell": str(cell_idx), "Num Points": current_num_points})
# Bounding box of the cell in world coordinates
cell_vox_size = cell_vmax - cell_vmin
cell_bbox = scaled_bbox[0] + cell_vmin * voxel_size, cell_vox_size * voxel_size
# If there are no points in this region, then skip it
mask_cell = points_in_bbox(x, cell_bbox)
if mask_cell.sum() <= max(args.min_pts_per_cell, 0):
tqdm_bar.update(1)
continue
# Amount of voxels by which to pad each cell in each direction
cell_pad_vox = torch.round(0.5 * args.overlap * out_grid_size.to(torch.float64) / args.cells_per_axis)
# Minimum and maximum voxel indices of the padded cell
cell_pvmin = torch.maximum(cell_vmin - cell_pad_vox, torch.zeros(3).to(cell_vmin)).to(torch.int32)
cell_pvmax = torch.minimum(cell_vmax + cell_pad_vox, torch.tensor(out_grid.shape).to(cell_vmin)).to(torch.int32)
# Bounding box and point mask for the padded cell
cell_pad_amount = cell_pad_vox * voxel_size
padded_cell_bbox = cell_bbox[0] - cell_pad_amount, cell_bbox[1] + 2.0 * cell_pad_amount
mask_padded_cell = points_in_bbox(x, padded_cell_bbox)
# Center the cell so it lies in [-0.5, 0.5]^3
tx = -padded_cell_bbox[0] - 0.5 * padded_cell_bbox[1], 1.0 / torch.max(padded_cell_bbox[1])
x_cell = x[mask_padded_cell].clone()
n_cell = n[mask_padded_cell].clone()
x_cell = affine_transform_pointcloud(x_cell, tx)
current_num_points = x_cell.shape[0]
tqdm_bar.set_postfix({"Cell": str(cell_idx), "Num Points": current_num_points})
# Cell trilinear blending weights, and index range for which voxels to reconstruct
weights, idxmin, idxmax = get_weights(cell_vmin, cell_vmax, cell_pvmin, cell_pvmax, args.weight_type)
# Finite differencing epsilon in world units
if args.use_abs_units:
eps_world_coords = args.eps
else:
eps_world_coords = args.eps * torch.norm(voxel_size).item()
# Fit the model and evaluate it on the subset of voxels corresponding to this cell
cell_model, _ = fit_model_to_pointcloud(x_cell, n_cell,
num_ny=args.num_nystrom_samples, eps=eps_world_coords,
kernel=args.kernel, reg=args.regularization, ny_mode=args.nystrom_mode,
cg_max_iters=args.cg_max_iters, cg_stop_thresh=args.cg_stop_thresh,
outer_layer_variance=args.outer_layer_variance,
verbosity_level=7 if not args.verbose else 0,
normalize=False)
cell_recon = eval_model_on_grid(cell_model, scaled_bbox, tx, out_grid_size,
cell_vox_min=idxmin, cell_vox_max=idxmax, print_message=False)
w_cell_recon = weights * cell_recon
out_grid[idxmin[0]:idxmax[0], idxmin[1]:idxmax[1], idxmin[2]:idxmax[2]] += w_cell_recon
out_mask[cell_vmin[0]:cell_vmax[0], cell_vmin[1]:cell_vmax[1], cell_vmin[2]:cell_vmax[2]] = True
tqdm_bar.update(1)
out_grid[torch.logical_not(out_mask)] = 1.0
if args.save_grid:
np.savez(args.out + ".grid", grid=out_grid.detach().cpu().numpy(), mask=out_mask.detach().cpu().numpy(),
bbox=[b.numpy() for b in scaled_bbox])
# Erode the mask so we don't get weird boundaries
eroded_mask = binary_erosion(out_mask.numpy().astype(np.bool), np.ones([3, 3, 3]).astype(np.bool))
v, f, n, c = marching_cubes(out_grid.numpy(), level=0.0, mask=eroded_mask, spacing=voxel_size,
gradient_direction='ascent')
v += scaled_bbox[0].numpy() + 0.5 * voxel_size.numpy()
# Possibly trim regions which don't contain samples
if args.trim > 0.0:
# Trim distance in world coordinates
if args.use_abs_units:
trim_dist_world = args.trim
else:
trim_dist_world = args.trim * torch.norm(voxel_size).item()
nn_dist, _ = pcu.k_nearest_neighbors(v, x.numpy(), k=2)
nn_dist = nn_dist[:, 1]
f_mask = np.stack([nn_dist[f[:, i]] < trim_dist_world for i in range(f.shape[1])], axis=-1)
f_mask = np.all(f_mask, axis=-1)
f = f[f_mask]
pcu.save_mesh_vfn(args.out, v, f, n)
if __name__ == "__main__":
main()
| 13,776 | 60.231111 | 120 | py |
neural-splines | neural-splines-main/trim-surface.py | import argparse
import numpy as np
import point_cloud_utils as pcu
import torch
from neural_splines.geometry import point_cloud_bounding_box
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input_points", type=str)
argparser.add_argument("mesh", type=str)
argparser.add_argument("grid_size", type=int,
help="When trimming the mesh, use this many voxels along the longest side of the "
"bounding box. This is used to determine the size of a voxel and "
"hence the units of distance to use. You should set this to the save value you used in "
"fit.py or fit-grid.py")
argparser.add_argument("trim_distance", type=float,
help="Trim vertices of the reconstructed mesh whose nearest "
"point in the input is greater than this value. The units of this argument are voxels "
"(where the cells_per_axis determines the size of a voxel) Default is -1.0.")
argparser.add_argument("--scale", type=float, default=1.1,
help="Pad the bounding box of the input point cloud by a factor if --scale. "
"i.e. the the diameter of the padded bounding box is --scale times bigger than the "
"diameter of the bounding box of the input points. Defaults is 1.1.")
argparser.add_argument("--out", type=str, default="trimmed.ply", help="Path to file to save trim mesh to.")
argparser.add_argument("--use-abs-units", action="store_true",
help="If set, then use absolute units instead of voxel units for the trim distance.")
args = argparser.parse_args()
print(f"Loading input point cloud {args.input_points}")
p = pcu.load_mesh_v(args.input_points)
scaled_bbox = point_cloud_bounding_box(torch.from_numpy(p), args.scale)
out_grid_size = np.round(scaled_bbox[1].numpy() / scaled_bbox[1].max().item() * args.grid_size).astype(np.int32)
voxel_size = scaled_bbox[1] / out_grid_size # size of one voxel
print(f"Loading reconstructed mesh {args.mesh}")
v, f, n = pcu.load_mesh_vfn(args.mesh)
print("Trimming mesh...")
# Trim distance in world coordinates
if args.use_abs_units:
trim_dist_world = args.trim_distance
else:
trim_dist_world = args.trim_distance * torch.norm(voxel_size).item()
nn_dist, _ = pcu.k_nearest_neighbors(v, p, k=2)
nn_dist = nn_dist[:, 1]
f_mask = np.stack([nn_dist[f[:, i]] < trim_dist_world for i in range(f.shape[1])], axis=-1)
f_mask = np.all(f_mask, axis=-1)
f = f[f_mask]
print("Saving trimmed mesh...")
pcu.save_mesh_vfn(args.out, v, f, n)
print("Done!")
if __name__ == "__main__":
main()
| 2,884 | 47.083333 | 120 | py |
neural-splines | neural-splines-main/fit.py | import argparse
import numpy as np
import point_cloud_utils as pcu
import torch
from skimage.measure import marching_cubes
from neural_splines import load_point_cloud, fit_model_to_pointcloud, eval_model_on_grid, point_cloud_bounding_box
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input_point_cloud", type=str, help="Path to the input point cloud to reconstruct.")
argparser.add_argument("num_nystrom_samples", type=int, default=-1,
help="Number of Nyström samples to use for kernel ridge regression. "
"If negative, don't use Nyström sampling."
"This is the number of basis centers to use to represent the final function. "
"If this value is too small, the reconstruction can miss details in the input. "
"Values between 10-100 times sqrt(N) (where N = number of input points) are "
"generally good depending on the complexity of the input shape.")
argparser.add_argument("grid_size", type=int,
help="When reconstructing the mesh, use this many voxels along the longest side of the "
"bounding box. Default is 128.")
argparser.add_argument("--trim", type=float, default=-1.0,
help="If set to a positive value, trim vertices of the reconstructed mesh whose nearest "
"point in the input is greater than this value. The units of this argument are voxels "
"(where the grid_size determines the size of a voxel) Default is -1.0.")
argparser.add_argument("--eps", type=float, default=0.05,
help="Perturbation amount for finite differencing in voxel units. i.e. we perturb points by "
"eps times the diagonal length of a voxel "
"(where the grid_size determines the size of a voxel). "
"To approximate the gradient of the function, we sample points +/- eps "
"along the normal direction.")
argparser.add_argument("--scale", type=float, default=1.1,
help="Reconstruct the surface in a bounding box whose diameter is --scale times bigger than"
" the diameter of the bounding box of the input points. Defaults is 1.1.")
argparser.add_argument("--regularization", type=float, default=1e-10,
help="Regularization penalty for kernel ridge regression. Default is 1e-10.")
argparser.add_argument("--nystrom-mode", type=str, default="blue-noise",
help="How to generate nystrom samples. Default is 'k-means'. Must be one of "
"(1) 'random': choose Nyström samples at random from the input, "
"(2) 'blue-noise': downsample the input with blue noise to get Nyström samples, or "
"(3) 'k-means': use k-means clustering to generate Nyström samples. "
"Default is 'blue-noise'")
argparser.add_argument("--voxel-downsample-threshold", type=int, default=150_000,
help="If the number of input points is greater than this value, downsample it by "
"averaging points and normals within voxels on a grid. The size of the voxel grid is "
"determined via the --grid-size argument. Default is 150_000."
"NOTE: This can massively speed up reconstruction for very large point clouds and "
"generally won't throw away any details.")
argparser.add_argument("--kernel", type=str, default="neural-spline",
help="Which kernel to use. Must be one of 'neural-spline', 'spherical-laplace', or "
"'linear-angle'. Default is 'neural-spline'."
"NOTE: The spherical laplace is a good approximation to the neural tangent kernel"
"(see https://arxiv.org/pdf/2007.01580.pdf for details)")
argparser.add_argument("--seed", type=int, default=-1, help="Random number generator seed to use.")
argparser.add_argument("--out", type=str, default="recon.ply", help="Path to file to save reconstructed mesh in.")
argparser.add_argument("--save-grid", action="store_true",
help="If set, save the function evaluated on a voxel grid to {out}.grid.npy "
"where out is the value of the --out argument.")
argparser.add_argument("--save-points", action="store_true",
help="If set, save the tripled input points, their occupancies, and the Nyström samples "
"to an npz file named {out}.pts.npz where out is the value of the --out argument.")
argparser.add_argument("--cg-max-iters", type=int, default=20,
help="Maximum number of conjugate gradient iterations. Default is 20.")
argparser.add_argument("--cg-stop-thresh", type=float, default=1e-5,
help="Stop threshold for the conjugate gradient algorithm. Default is 1e-5.")
argparser.add_argument("--dtype", type=str, default="float64",
help="Scalar type of the data. Must be one of 'float32' or 'float64'. "
"Warning: float32 may not work very well for complicated inputs.")
argparser.add_argument("--outer-layer-variance", type=float, default=0.001,
help="Variance of the outer layer of the neural network from which the neural "
"spline kernel arises from. Default is 0.001.")
argparser.add_argument("--use-abs-units", action="store_true",
help="If set, then use absolute units instead of voxel units for --eps and --trim.")
argparser.add_argument("--verbose", action="store_true", help="Spam your terminal with debug information")
args = argparser.parse_args()
if args.dtype == "float64":
dtype = torch.float64
elif args.dtype == "float32":
dtype = torch.float32
else:
raise ValueError(f"invalid --dtype argument. Must be one of 'float32' or 'float64' but got {args.dtype}")
if args.seed > 0:
seed = args.seed
else:
seed = np.random.randint(2 ** 32 - 1)
torch.manual_seed(seed)
np.random.seed(seed)
print("Using random seed", seed)
x, n = load_point_cloud(args.input_point_cloud, dtype=dtype)
scaled_bbox = point_cloud_bounding_box(x, args.scale)
out_grid_size = torch.round(scaled_bbox[1] / scaled_bbox[1].max() * args.grid_size).to(torch.int32)
voxel_size = scaled_bbox[1] / out_grid_size # size of one voxel
# Downsample points to grid resolution if there are enough points
if x.shape[0] > args.voxel_downsample_threshold:
print("Downsampling input point cloud to voxel resolution.")
x, n, _ = pcu.downsample_point_cloud_voxel_grid(voxel_size, x.numpy(), n.numpy(),
min_bound=scaled_bbox[0],
max_bound=scaled_bbox[0] + scaled_bbox[1])
x, n = torch.from_numpy(x), torch.from_numpy(n)
# Finite differencing epsilon in world units
if args.use_abs_units:
eps_world_coords = args.eps
else:
eps_world_coords = args.eps * torch.norm(voxel_size).item()
model, tx = fit_model_to_pointcloud(x, n, num_ny=args.num_nystrom_samples, eps=eps_world_coords,
kernel=args.kernel, reg=args.regularization, ny_mode=args.nystrom_mode,
cg_max_iters=args.cg_max_iters, cg_stop_thresh=args.cg_stop_thresh,
outer_layer_variance=args.outer_layer_variance)
recon = eval_model_on_grid(model, scaled_bbox, tx, out_grid_size)
v, f, n, _ = marching_cubes(recon.numpy(), level=0.0, spacing=voxel_size)
v += scaled_bbox[0].numpy() + 0.5 * voxel_size.numpy()
# Possibly trim regions which don't contain samples
if args.trim > 0.0:
# Trim distance in world coordinates
if args.use_abs_units:
trim_dist_world = args.trim
else:
trim_dist_world = args.trim * torch.norm(voxel_size).item()
nn_dist, _ = pcu.k_nearest_neighbors(v, x.numpy(), k=2)
nn_dist = nn_dist[:, 1]
f_mask = np.stack([nn_dist[f[:, i]] < trim_dist_world for i in range(f.shape[1])], axis=-1)
f_mask = np.all(f_mask, axis=-1)
f = f[f_mask]
pcu.save_mesh_vfn(args.out, v.astype(np.float32), f.astype(np.int32), n.astype(np.float32))
if args.save_grid:
np.savez(args.out + ".grid", grid=recon.detach().cpu().numpy(), bbox=[b.numpy() for b in scaled_bbox])
if args.save_points:
x_ny = model.ny_points_[:, :3] if model.ny_points_ is not None else None
np.savez(args.out + ".pts",
x=x.detach().cpu().numpy(),
n=n.detach().cpu().numpy(),
eps=args.eps,
x_ny=x_ny.detach().cpu().numpy())
if __name__ == "__main__":
main()
| 9,506 | 60.733766 | 120 | py |
neural-splines | neural-splines-main/neural_splines/falkon_kernels.py | import functools
from abc import ABC
from typing import Optional
import cupy as cp
import numpy as np
import torch
from falkon.kernels import Kernel, KeopsKernelMixin
from falkon.options import FalkonOptions
from falkon.sparse.sparse_tensor import SparseTensor
from torch.utils.dlpack import to_dlpack
def _extract_float(d):
if isinstance(d, torch.Tensor):
try:
# tensor.item() works if tensor is a scalar, otherwise it throws
# a value error.
return d.item()
except ValueError:
raise ValueError("Item is not a scalar")
else:
try:
return float(d)
except TypeError:
raise TypeError("Item must be a scalar or a tensor.")
class NeuralSplineKernel(Kernel, KeopsKernelMixin, ABC):
kernel_type = "angle"
def __init__(self, variance: float = 1.0, opt: Optional[FalkonOptions] = None):
super().__init__("NeuralSpline", self.kernel_type, opt)
self.debug = opt.debug if opt is not None else False
self.variance = _extract_float(variance)
def extra_mem(self):
return {
# We transpose X2 in _apply
'nd': 0,
'md': 1,
# Norm results in prepare
'm': 0,
'n': 0,
# We do a copy in _apply
'nm': 1,
}
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt):
if self.debug:
print(f"NeuralSpline._keops_mmv_impl(X1={X1.shape}, X2={X2.shape}, v, kernel, out, opt)")
theta = 'two * Atan2(Norm2(Norm2(Y) * X - Norm2(X) * Y), Norm2(Norm2(Y) * X + Norm2(X) * Y))'
norm_xy = '(Norm2(X) * Norm2(Y))'
j01 = f'({norm_xy} * (Sin({theta}) + (one + variance) * (pi - {theta}) * Cos({theta})))'
formula = f'({j01} / pi) * v'
aliases = [
'X = Vi(%d)' % (X1.shape[1]),
'Y = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'pi = Pm(1)',
'variance = Pm(1)',
'one = Pm(1)',
'two = Pm(1)'
]
other_vars = [torch.tensor([np.pi]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([self.variance]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([1.0]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([2.0]).to(dtype=X1.dtype, device=X1.device)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2, **kwargs):
if self.debug:
print(f"NeuralSpline._prepare(X1={X1.shape}, X2={X2.shape}, *kwargs)")
return []
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
raise NotImplementedError("NeuralSpline does not implement sparse prepare")
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor):
if self.debug:
print(f"NeuralSpline._apply(X1={X1.shape}, X2={X2.shape}, out={out.shape})")
kernel_code = r'''
#define PI (DTYPE) (3.1415926535897932384626433832795028841971693993751058209749445923078164062)
#define ONE (DTYPE) (1.0)
extern "C" __global__
void stable_kernel(const DTYPE* x1, const DTYPE* x2, DTYPE* out, const double variance,
const int N, int M, int D) {
const int I = (blockIdx.x * blockDim.x) + threadIdx.x;
const int J = (blockIdx.y * blockDim.y) + threadIdx.y;
if (I >= N || J >= M) {
return;
}
DTYPE norm_x = (DTYPE) 0.0; //normf(D, &x1[I*D]);
DTYPE norm_y = (DTYPE) 0.0; //normf(D, &x2[J*D]);
#pragma unroll
for (int k = 0; k < D; k += 1) {
norm_x = fma(x1[I * D + k], x1[I * D + k], norm_x);
norm_y = fma(x2[J * D + k], x2[J * D + k], norm_y);
}
norm_x = sqrt(norm_x);
norm_y = sqrt(norm_y);
DTYPE arg1 = (DTYPE) 0.0;
DTYPE arg2 = (DTYPE) 0.0;
#pragma unroll
for (int k = 0; k < D; k += 1) {
DTYPE x1_ik = x1[I * D + k];
DTYPE x2_jk = x2[J * D + k];
DTYPE a1 = norm_y * x1_ik - norm_x * x2_jk;
DTYPE a2 = norm_y * x1_ik + norm_x * x2_jk;
arg1 = fma(a1, a1, arg1);
arg2 = fma(a2, a2, arg2);
}
arg1 = sqrt(arg1);
arg2 = sqrt(arg2);
DTYPE angle = 2.0 * atan2(arg1, arg2);
DTYPE norm_xy = norm_x * norm_y;
DTYPE cos_angle = cos(angle);
DTYPE sin_angle = sin(angle);
DTYPE opv = ONE + (DTYPE)(variance);
DTYPE K = norm_xy * (sin_angle + opv * (PI - angle) * cos_angle) / PI;
out[I * M + J] = K;
}
'''
assert X1.dtype == X2.dtype == out.dtype, "X1, X2, and out don't have the same dtype"
assert X1.device == X2.device == out.device, "X1, X2, and out are not on the same device"
assert out.device.index is not None, "None device index"
if X1.dtype == torch.float32:
str_dtype = "float"
cupy_dtype = cp.float32
elif X1.dtype == torch.float64:
str_dtype = "double"
cupy_dtype = cp.float64
else:
raise ValueError("Invalid dtype must be float32 or float64")
kernel_code = kernel_code.replace("DTYPE", str_dtype)
kernel = cp.RawKernel(kernel_code, 'stable_kernel')
# The .contiguous should be a no-op in both these cases, but add them in for good measure
X1 = X1.contiguous()
X2 = X2.T.contiguous()
# Convert X1 and X2 to CuPy arrays.
x1cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X1))
x2cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X2))
with cp.cuda.Device(out.device.index):
outcp = cp.zeros((out.shape[0], out.shape[1]), dtype=cupy_dtype)
# Run the CUDA kernel to build the matrix K
pt_dim = int(X1.shape[1])
dims = int(X1.shape[0]), int(X2.shape[0])
threads_per_block = (16, 16) # TODO: Maybe hardcoding this is bad
blocks_per_grid = tuple((dims[i] + threads_per_block[i] - 1) // threads_per_block[i] for i in range(2))
kernel(blocks_per_grid, threads_per_block, (x1cp, x2cp, outcp, self.variance, dims[0], dims[1], pt_dim))
cp.cuda.stream.get_current_stream().synchronize() # Need to synchronize so we can copy to PyTorch
# print("COPYING CUPY OUT TO PYTORCH")
# print("OUT CUPY\n", outcp)
# Copy the kernel back into the output PyTorch tensor
outcp_dlpack = outcp.toDlpack()
out_dlpack = torch.utils.dlpack.from_dlpack(outcp_dlpack)
out.copy_(out_dlpack)
# print("OUT PYTORCH\n", out)
# rand_idx_i, rand_idx_j = np.random.randint(X1.shape[0]), np.random.randint(X2.shape[0])
# xi, xj = X1[rand_idx_i].detach().cpu().numpy(), X2[rand_idx_j].detach().cpu().numpy()
# nxi, nxj = np.linalg.norm(xi), np.linalg.norm(xj)
# angle1, angle2 = np.linalg.norm(nxj * xi - nxi * xj), np.linalg.norm(nxj * xi + nxi * xj)
# angle = 2.0 * np.arctan2(angle1, angle2)
# kij = nxi * nxj * (np.sin(angle) + (1.0 + self.variance) * (np.pi - angle) * np.cos(angle)) / np.pi
# print(np.abs(kij - out[rand_idx_i, rand_idx_j].item()))
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
raise NotImplementedError("NeuralSpline does not implement sparse apply")
def _finalize(self, A: torch.Tensor, d):
if self.debug:
print(f"NeuralSpline._finalize(A={A.shape}, d)")
return A
def __str__(self):
return f"NeuralSplineKernel()"
def __repr__(self):
return self.__str__()
class LaplaceKernelSphere(Kernel, KeopsKernelMixin, ABC):
kernel_type = "angle"
def __init__(self, alpha, gamma, opt: Optional[FalkonOptions] = None):
super().__init__("LaplaceKernelSphere", self.kernel_type, opt)
self.debug = opt.debug if opt is not None else False
self.alpha = _extract_float(alpha)
self.gamma = _extract_float(gamma)
def extra_mem(self):
return {
# We transpose X2 in _apply
'nd': 0,
'md': 1,
# Norm results in prepare
'm': 0,
'n': 0,
# We do a copy in _apply
'nm': 1,
}
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt):
if self.debug:
print("LaplaceKernelSphere._keops_mmv_impl(X1, X2, v, kernel, out, opt)")
theta = 'two * Atan2(Norm2(Norm2(Y) * X - Norm2(X) * Y), Norm2(Norm2(Y) * X + Norm2(X) * Y))'
norm_xy = '(Norm2(X) * Norm2(Y))'
j01 = f'({norm_xy} * (Exp(alpha * Powf(one - Cos({theta}), gamma))))'
formula = f'({j01}) * v'
aliases = [
'X = Vi(%d)' % (X1.shape[1]),
'Y = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'alpha = Pm(1)',
'gamma = Pm(1)',
'one = Pm(1)',
'two = Pm(1)',
]
other_vars = [torch.tensor([self.alpha]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([self.gamma]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([1.0]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([2.0]).to(dtype=X1.dtype, device=X1.device)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2, **kwargs):
if self.debug:
print("LaplaceKernelSphere._prepare(X1, X2, *kwargs)")
return []
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
raise NotImplementedError("LaplaceKernelSphere does not implement sparse prepare")
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor):
if self.debug:
print("LaplaceKernelSphere._apply(X1, X2, out)")
kernel_code = r'''
#define PI (DTYPE) (3.1415926535897932384626433832795028841971693993751058209749445923078164062)
#define ONE (DTYPE) (1.0)
extern "C" __global__
void stable_kernel(const DTYPE* x1, const DTYPE* x2, DTYPE* out, const double alpha, double gamma,
const int N, int M, int D) {
const int I = (blockIdx.x * blockDim.x) + threadIdx.x;
const int J = (blockIdx.y * blockDim.y) + threadIdx.y;
if (I >= N || J >= M) {
return;
}
DTYPE norm_x = (DTYPE) 0.0; //normf(D, &x1[I*D]);
DTYPE norm_y = (DTYPE) 0.0; //normf(D, &x2[J*D]);
#pragma unroll
for (int k = 0; k < D; k += 1) {
norm_x = fma(x1[I * D + k], x1[I * D + k], norm_x);
norm_y = fma(x2[J * D + k], x2[J * D + k], norm_y);
}
norm_x = sqrt(norm_x);
norm_y = sqrt(norm_y);
DTYPE arg1 = (DTYPE) 0.0;
DTYPE arg2 = (DTYPE) 0.0;
#pragma unroll
for (int k = 0; k < D; k += 1) {
DTYPE x1_ik = x1[I * D + k];
DTYPE x2_jk = x2[J * D + k];
DTYPE a1 = norm_y * x1_ik - norm_x * x2_jk;
DTYPE a2 = norm_y * x1_ik + norm_x * x2_jk;
arg1 = fma(a1, a1, arg1);
arg2 = fma(a2, a2, arg2);
}
arg1 = sqrt(arg1);
arg2 = sqrt(arg2);
DTYPE angle = 2.0 * atan2(arg1, arg2);
DTYPE norm_xy = norm_x * norm_y;
DTYPE cos_angle = cos(angle);
DTYPE K = norm_xy * exp((DTYPE) alpha * pow(ONE - cos_angle, (DTYPE) gamma));
out[I * M + J] = K;
}
'''
assert X1.dtype == X2.dtype == out.dtype, "X1, X2, and out don't have the same dtype"
assert X1.device == X2.device == out.device, "X1, X2, and out are not on the same device"
assert out.device.index is not None, "None device index"
if X1.dtype == torch.float32:
str_dtype = "float"
cupy_dtype = cp.float32
elif X1.dtype == torch.float64:
str_dtype = "double"
cupy_dtype = cp.float64
else:
raise ValueError("Invalid dtype must be float32 or float64")
kernel_code = kernel_code.replace("DTYPE", str_dtype)
kernel = cp.RawKernel(kernel_code, 'stable_kernel')
# The .contiguous should be a no-op in both these cases, but add them in for good measure
X1 = X1.contiguous()
X2 = X2.T.contiguous()
# Convert X1 and X2 to CuPy arrays.
x1cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X1))
x2cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X2))
with cp.cuda.Device(out.device.index):
outcp = cp.zeros((out.shape[0], out.shape[1]), dtype=cupy_dtype)
# Run the CUDA kernel to build the matrix K
pt_dim = int(X1.shape[1])
dims = int(X1.shape[0]), int(X2.shape[0])
threads_per_block = (16, 16) # TODO: Maybe hardcoding this is bad
blocks_per_grid = tuple((dims[i] + threads_per_block[i] - 1) // threads_per_block[i] for i in range(2))
kernel(blocks_per_grid, threads_per_block,
(x1cp, x2cp, outcp, self.alpha, self.gamma, dims[0], dims[1], pt_dim))
cp.cuda.stream.get_current_stream().synchronize() # Need to synchronize so we can copy to PyTorch
# Copy the kernel back into the output PyTorch tensor
outcp_dlpack = outcp.toDlpack()
out_dlpack = torch.utils.dlpack.from_dlpack(outcp_dlpack)
out.copy_(out_dlpack)
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
raise NotImplementedError("LaplaceKernelSphere does not implement sparse apply")
def _finalize(self, A, d):
if self.debug:
print("LaplaceKernelSphere._finalize(A, d)")
return A
def __str__(self):
return f"LaplaceKernelSphere(alpha={self.alpha})"
def __repr__(self):
return self.__str__()
class LinearAngleKernel(Kernel, KeopsKernelMixin, ABC):
kernel_type = "angle"
def __init__(self, multiply_norm=False, opt: Optional[FalkonOptions] = None):
super().__init__("LinearAngleKernel", self.kernel_type, opt)
self.debug = opt.debug if opt is not None else False
self.multiply_norm = multiply_norm
def extra_mem(self):
return {
# We transpose X2 in _apply
'nd': 0,
'md': 1,
# Norm results in prepare
'm': 0,
'n': 0,
# We do a copy in _apply
'nm': 1,
}
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt):
if self.debug:
print(f"LinearAngleKernel._keops_mmv_impl(X1={X1.shape}, X2={X2.shape}, v, kernel, out, opt)")
theta = 'two * Atan2(Norm2(Norm2(Y) * X - Norm2(X) * Y), Norm2(Norm2(Y) * X + Norm2(X) * Y))'
if self.multiply_norm:
norm_xy = '(Norm2(X) * Norm2(Y))'
j01 = f'({norm_xy} * (pi - {theta}))'
else:
j01 = f'(pi - {theta})'
formula = f'({j01} / pi) * v'
aliases = [
'X = Vi(%d)' % (X1.shape[1]),
'Y = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'pi = Pm(1)',
'two = Pm(1)'
]
other_vars = [torch.tensor([np.pi]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([2.0]).to(dtype=X1.dtype, device=X1.device)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2, **kwargs):
if self.debug:
print(f"LinearAngleKernel._prepare(X1={X1.shape}, X2={X2.shape}, *kwargs)")
return []
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
raise NotImplementedError("LinearAngleKernel does not implement sparse prepare")
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor):
if self.debug:
print(f"LinearAngleKernel._apply(X1={X1.shape}, X2={X2.shape}, out={out.shape})")
kernel_code = r'''
#define PI (DTYPE) (3.1415926535897932384626433832795028841971693993751058209749445923078164062)
#define ONE (DTYPE) (1.0)
__MUL_NORM_DEFINE__
extern "C" __global__
void stable_kernel(const DTYPE* x1, const DTYPE* x2, DTYPE* out,
const int N, int M, int D) {
const int I = (blockIdx.x * blockDim.x) + threadIdx.x;
const int J = (blockIdx.y * blockDim.y) + threadIdx.y;
if (I >= N || J >= M) {
return;
}
DTYPE norm_x = (DTYPE) 0.0; //normf(D, &x1[I*D]);
DTYPE norm_y = (DTYPE) 0.0; //normf(D, &x2[J*D]);
#pragma unroll
for (int k = 0; k < D; k += 1) {
norm_x = fma(x1[I * D + k], x1[I * D + k], norm_x);
norm_y = fma(x2[J * D + k], x2[J * D + k], norm_y);
}
norm_x = sqrt(norm_x);
norm_y = sqrt(norm_y);
DTYPE arg1 = (DTYPE) 0.0;
DTYPE arg2 = (DTYPE) 0.0;
#pragma unroll
for (int k = 0; k < D; k += 1) {
DTYPE x1_ik = x1[I * D + k];
DTYPE x2_jk = x2[J * D + k];
DTYPE a1 = norm_y * x1_ik - norm_x * x2_jk;
DTYPE a2 = norm_y * x1_ik + norm_x * x2_jk;
arg1 = fma(a1, a1, arg1);
arg2 = fma(a2, a2, arg2);
}
arg1 = sqrt(arg1);
arg2 = sqrt(arg2);
DTYPE angle = 2.0 * atan2(arg1, arg2);
#ifdef MULTIPLY_NORM
DTYPE norm_xy = norm_x * norm_y;
DTYPE K = norm_xy * (PI - angle) / PI;
#else
DTYPE K = (PI - angle) / PI;
#endif
out[I * M + J] = K;
}
'''
assert X1.dtype == X2.dtype == out.dtype, "X1, X2, and out don't have the same dtype"
assert X1.device == X2.device == out.device, "X1, X2, and out are not on the same device"
assert out.device.index is not None, "None device index"
if X1.dtype == torch.float32:
str_dtype = "float"
cupy_dtype = cp.float32
elif X1.dtype == torch.float64:
str_dtype = "double"
cupy_dtype = cp.float64
else:
raise ValueError("Invalid dtype must be float32 or float64")
kernel_code = kernel_code.replace("DTYPE", str_dtype)
if self.multiply_norm:
kernel_code = kernel_code.replace("__MUL_NORM_DEFINE__", "#define MULTIPLY_NORM\n")
else:
kernel_code = kernel_code.replace("__MUL_NORM_DEFINE__", "\n")
kernel = cp.RawKernel(kernel_code, 'stable_kernel')
# The .contiguous should be a no-op in both these cases, but add them in for good measure
X1 = X1.contiguous()
X2 = X2.T.contiguous()
# Convert X1 and X2 to CuPy arrays.
x1cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X1))
x2cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X2))
with cp.cuda.Device(out.device.index):
outcp = cp.zeros((out.shape[0], out.shape[1]), dtype=cupy_dtype)
# Run the CUDA kernel to build the matrix K
pt_dim = int(X1.shape[1])
dims = int(X1.shape[0]), int(X2.shape[0])
threads_per_block = (16, 16) # TODO: Maybe hardcoding this is bad
blocks_per_grid = tuple((dims[i] + threads_per_block[i] - 1) // threads_per_block[i] for i in range(2))
kernel(blocks_per_grid, threads_per_block, (x1cp, x2cp, outcp, dims[0], dims[1], pt_dim))
cp.cuda.stream.get_current_stream().synchronize() # Need to synchronize so we can copy to PyTorch
# print("COPYING CUPY OUT TO PYTORCH")
# print("OUT CUPY\n", outcp)
# Copy the kernel back into the output PyTorch tensor
outcp_dlpack = outcp.toDlpack()
out_dlpack = torch.utils.dlpack.from_dlpack(outcp_dlpack)
out.copy_(out_dlpack)
# print("OUT PYTORCH\n", out)
# rand_idx_i, rand_idx_j = np.random.randint(X1.shape[0]), np.random.randint(X2.shape[0])
# xi, xj = X1[rand_idx_i].detach().cpu().numpy(), X2[rand_idx_j].detach().cpu().numpy()
# nxi, nxj = np.linalg.norm(xi), np.linalg.norm(xj)
# angle1, angle2 = np.linalg.norm(nxj * xi - nxi * xj), np.linalg.norm(nxj * xi + nxi * xj)
# angle = 2.0 * np.arctan2(angle1, angle2)
# kij = nxi * nxj * (np.sin(angle) + (1.0 + self.variance) * (np.pi - angle) * np.cos(angle)) / np.pi
# print(np.abs(kij - out[rand_idx_i, rand_idx_j].item()))
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
raise NotImplementedError("LinearAngleKernel does not implement sparse apply")
def _finalize(self, A: torch.Tensor, d):
if self.debug:
print(f"LinearAngleKernel._finalize(A={A.shape}, d)")
return A
def __str__(self):
return f"NeuralSplineKernel()"
def __repr__(self):
return self.__str__()
| 22,944 | 39.183888 | 112 | py |
neural-splines | neural-splines-main/neural_splines/kmeans.py | import pykeops.torch as keops
import torch
def kmeans(x, k, num_iters=10):
"""
Implements Lloyd's algorithm for the Euclidean metric.
:param x: A tensor representing a set of N points of dimension D (shape [N, D])
:param k: The number of centroids to compute
:param num_iters: The number of K means iterations to do
:return: cl, c where cl are cluster labels for each input point (shape [N]) and c are the
cluster centroids (shape [K, D])
"""
N, D = x.shape # Number of samples, dimension of the ambient space
# Simplistic initialization for the centroids
perm = torch.randperm(N)[:k]
c = x[perm, :].clone()
cl = None
x_i = keops.LazyTensor(x.view(N, 1, D)) # (N, 1, D) samples
c_j = keops.LazyTensor(c.view(1, k, D)) # (1, K, D) centroids
# K-means loop:
# - x is the (N, D) point cloud,
# - cl is the (N,) vector of class labels
# - c is the (K, D) cloud of cluster centroids
for i in range(num_iters):
# E step: assign points to the closest cluster -------------------------
D_ij = ((x_i - c_j) ** 2).sum(-1) # (N, K) symbolic squared distances
cl = D_ij.argmin(dim=1).long().view(-1) # Points -> Nearest cluster
# M step: update the centroids to the normalized cluster average: ------
# Compute the sum of points per cluster:
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
# Divide by the number of points per cluster:
Ncl = torch.bincount(cl, minlength=k).type_as(c).view(k, 1)
c /= Ncl # in-place division to compute the average
return cl, c
| 1,649 | 35.666667 | 93 | py |
neural-splines | neural-splines-main/neural_splines/geometry.py | import torch
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def normalize_pointcloud_transform(x):
"""
Compute an affine transformation that normalizes the point cloud x to lie in [-0.5, 0.5]^2
:param x: A point cloud represented as a tensor of shape [N, 3]
:return: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale
"""
min_x, max_x = x.min(0)[0], x.max(0)[0]
bbox_size = max_x - min_x
translate = -(min_x + 0.5 * bbox_size)
scale = 1.0 / torch.max(bbox_size)
return translate, scale
def affine_transform_pointcloud(x, tx):
"""
Apply the affine transform tx to the point cloud x
:param x: A pytorch tensor of shape [N, 3]
:param tx: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale
:return: The transformed point cloud
"""
translate, scale = tx
return scale * (x + translate)
def affine_transform_bounding_box(bbox, tx):
"""
Apply the affine transform tx to the bounding box bbox
:param bbox: A bounding box reprented as 2 3D vectors (origin, size)
:param tx: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale
:return: The transformed point bounding box
"""
translate, scale = tx
return scale * (bbox[0] + translate), scale * bbox[1]
def points_in_bbox(x, bbox):
"""
Compute a mask indicating which points in x lie in the bouning box bbox
:param x: A point cloud represented as a tensor of shape [N, 3]
:param bbox: A bounding box reprented as 2 3D vectors (origin, size)
:return: A mask of shape [N] where True values correspond to points in x which lie inside bbox
"""
mask = torch.logical_and(x > bbox[0], x <= bbox[0] + bbox[1])
mask = torch.min(mask, axis=-1)[0].to(torch.bool)
return mask
def point_cloud_bounding_box(x, scale=1.0):
"""
Get the axis-aligned bounding box for a point cloud (possibly scaled by some factor)
:param x: A point cloud represented as an [N, 3]-shaped tensor
:param scale: A scale factor by which to scale the bounding box diagonal
:return: The (possibly scaled) axis-aligned bounding box for a point cloud represented as a pair (origin, size)
"""
bb_min = x.min(0)[0]
bb_size = x.max(0)[0] - bb_min
return scale_bounding_box_diameter((bb_min, bb_size), scale)
def scale_bounding_box_diameter(bbox, scale):
"""
Scale the diagonal of the bounding box bbox while maintaining its center position
:param bbox: A bounding box represented as a pair (origin, size)
:param scale: A scale factor by which to scale the input bounding box's diagonal
:return: The (possibly scaled) axis-aligned bounding box for a point cloud represented as a pair (origin, size)
"""
bb_min, bb_size = bbox
bb_diameter = torch.norm(bb_size)
bb_unit_dir = bb_size / bb_diameter
scaled_bb_size = bb_size * scale
scaled_bb_diameter = torch.norm(scaled_bb_size)
scaled_bb_min = bb_min - 0.5 * (scaled_bb_diameter - bb_diameter) * bb_unit_dir
return scaled_bb_min, scaled_bb_size
def triple_points_along_normals(x, n, eps, homogeneous=False):
"""
Convert a point cloud equipped with normals into a point cloud with points pertubed along those normals.
Each point X with normal N, in the input gets converted to 3 points:
(X, X+eps*N, X-eps*N) which have occupancy values (0, eps, -eps)
:param x: The input points of shape [N, 3]
:param n: The input normals of shape [N, 3]
:param eps: The amount to perturb points about each normal
:param homogeneous: If true, return the points in homogeneous coordinates
:return: A pair, (X, O) consisting of the new point cloud X and point occupancies O
"""
x_in = x - n * eps
x_out = x + n * eps
x_triples = torch.cat([x, x_in, x_out], dim=0)
occ_triples = torch.cat([torch.zeros(x.shape[0]),
-torch.ones(x.shape[0]),
torch.ones(x.shape[0])]).to(x) * eps
if homogeneous:
x_triples = torch.cat([x_triples, torch.ones(x_triples.shape[0], 1, dtype=x_triples.dtype)], dim=-1)
return x_triples, occ_triples
def voxel_chunks(grid_size, cells_per_axis):
"""
Iterator over ranges which partition a voxel grid into non-overlapping chunks.
:param grid_size: Size of the voxel grid to split into chunks
:param cells_per_axis: Number of cells along each axis
:return: Each call returns a pair (vmin, vmax) where vmin is the minimum indexes of the voxel chunk and vmax is
the maximum index. i.e. if vox is a voxel grid with shape grid_size, then vox[vmin:vmax] are the voxels
in the current chunk
"""
if np.isscalar(cells_per_axis):
cells_per_axis = torch.tensor([cells_per_axis] * len(grid_size)).to(torch.int32)
current_vox_min = torch.tensor([0.0, 0.0, 0.0]).to(torch.float64)
current_vox_max = torch.tensor([0.0, 0.0, 0.0]).to(torch.float64)
cell_size_float = grid_size.to(torch.float64) / cells_per_axis
for c_i in range(cells_per_axis[0]):
current_vox_min[0] = current_vox_max[0]
current_vox_max[0] = cell_size_float[0] + current_vox_max[0]
current_vox_min[1:] = 0
current_vox_max[1:] = 0
for c_j in range(cells_per_axis[1]):
current_vox_min[1] = current_vox_max[1]
current_vox_max[1] = cell_size_float[1] + current_vox_max[1]
current_vox_min[2:] = 0
current_vox_max[2:] = 0
for c_k in range(cells_per_axis[2]):
current_vox_min[2] = current_vox_max[2]
current_vox_max[2] = cell_size_float[2] + current_vox_max[2]
vox_min = torch.round(current_vox_min).to(torch.int32)
vox_max = torch.round(current_vox_max).to(torch.int32)
yield (c_i, c_j, c_k), vox_min, vox_max
def cell_weights_trilinear(vmin, vmax, pvmin, pvmax):
"""
Returns a voxel grid of weights used to blend two adjacent cells together which overlap by some amount of voxels.
:param vmin: The minimum voxel indices for the cell
:param vmax: The maximum voxel indices for the cell
:param pvmin: The minimum voxel index for the padded cell
:param pvmax: The maximum voxel index for the padded cell
:return: A voxel grid of size (pvmin - pvmax) of trilinear weights used to interpolate neighboring cells
"""
dmin = vmin - pvmin
dmax = pvmax - vmax
x, y, z = [np.unique(np.array([pvmin[i], pvmin[i] + 2.0 * dmin[i], pvmax[i] - 2.0 * dmax[i], pvmax[i]]))
for i in range(3)]
vals = np.zeros([x.shape[0], y.shape[0], z.shape[0]])
xyz = (x, y, z)
one_idxs = []
for dim in range(3):
if xyz[dim].shape[0] == 2:
one_idxs.append([0, 1])
elif xyz[dim].shape[0] == 3:
if vmin[dim] == pvmin[dim]:
one_idxs.append([0, 1])
else:
one_idxs.append([1, 2])
else:
one_idxs.append([1, 2])
for i in one_idxs[0]:
for j in one_idxs[1]:
for k in one_idxs[2]:
vals[i, j, k] = 1.0
f_w = RegularGridInterpolator((x, y, z), vals)
psize = (pvmax - pvmin).numpy()
pmin = (pvmin + 0.5).numpy()
pmax = (pvmax - 0.5).numpy()
pts = np.stack([np.ravel(a) for a in
np.mgrid[pmin[0]:pmax[0]:psize[0] * 1j,
pmin[1]:pmax[1]:psize[1] * 1j,
pmin[2]:pmax[2]:psize[2] * 1j]], axis=-1)
return torch.from_numpy(f_w(pts).reshape(psize)), pvmin, pvmax
| 7,727 | 39.673684 | 117 | py |
neural-splines | neural-splines-main/neural_splines/__init__.py | import time
import warnings
import point_cloud_utils as pcu
import falkon
from falkon.utils.tensor_helpers import create_same_stride
from .falkon_kernels import NeuralSplineKernel, LaplaceKernelSphere, LinearAngleKernel
from .geometry import *
from .kmeans import kmeans
_VERBOSITY_LEVEL_DEBUG = 0
_VERBOSITY_LEVEL_INFO = 1
_VERBOSITY_LEVEL_SILENT = 5
class FixedIndexSelector(falkon.center_selection.CenterSelector):
def __init__(self, idx, random_gen=None):
super().__init__(random_gen)
self.idx = idx
def select(self, X, Y, M):
Xc = create_same_stride((M, X.shape[1]), other=X, dtype=X.dtype, device=X.device,
pin_memory=False)
th_idx = torch.from_numpy(self.idx.astype(np.long)).to(X.device)
torch.index_select(X, dim=0, index=th_idx, out=Xc)
if Y is not None:
Yc = create_same_stride((M, Y.shape[1]), other=Y, dtype=Y.dtype, device=Y.device,
pin_memory=False)
th_idx = torch.from_numpy(self.idx.astype(np.long)).to(Y.device)
torch.index_select(Y, dim=0, index=th_idx, out=Yc)
return Xc, Yc
return Xc
def _generate_nystrom_samples(x, num_samples, sampling_method, verbosity_level=1):
if x.shape[1] != 3:
raise ValueError(f"Invalid shape for x, must be [N, 3] but got {x.shape}")
if x.shape[0] < num_samples:
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print(f"Requested more Nyström samples ({num_samples}) than points ({x.shape[0]}) using all points.")
center_selector = 'uniform'
x_ny = None
ny_count = min(num_samples, x.shape[0])
elif sampling_method == 'random':
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Nyström samples chosen uniformly at random from the input.")
center_selector = 'uniform'
x_ny = None
ny_count = min(num_samples, x.shape[0])
elif sampling_method == 'blue-noise':
blue_noise_seed = np.random.randint(2 ** 31 - 1)
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print(f"Generating {num_samples} blue noise Nyström samples for {x.shape[0]} points.")
# Allow generating +/- 5% of the requested samples so the algorighm converges fast
sample_num_tolerance = 0.05
ny_idx = pcu.downsample_point_cloud_poisson_disk(x.numpy(), num_samples, random_seed=blue_noise_seed,
sample_num_tolerance=sample_num_tolerance)
x_ny = x[ny_idx]
x_ny = torch.cat([x_ny, torch.ones(x_ny.shape[0], 1).to(x_ny)], dim=-1)
ny_count = x_ny.shape[0]
center_selector = FixedIndexSelector(idx=ny_idx)
elif sampling_method == 'k-means':
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Generating k-means Nyström samples.")
_, x_ny = kmeans(x.contiguous(), num_samples)
x_ny = torch.cat([x_ny, torch.ones(x_ny.shape[0], 1).to(x_ny)], dim=-1)
ny_count = x_ny.shape[0]
center_selector = falkon.center_selection.FixedSelector(centers=x_ny, y_centers=None)
else:
raise ValueError(f"Invalid value {sampling_method} for --nystrom-mode. "
f"Must be one of 'random', 'blue-noise' or 'k-means'")
return x_ny, center_selector, ny_count
def _run_falkon_fit(x, y, penalty, num_ny, center_selector, kernel_type="neural-spline",
maxiters=20, stop_thresh=1e-7, variance=1.0, falkon_opts=None, verbosity_level=1):
if falkon_opts is None:
falkon_opts = falkon.FalkonOptions()
# Always use cuda for everything
falkon_opts.min_cuda_pc_size_64 = 1
falkon_opts.min_cuda_pc_size_32 = 1
falkon_opts.min_cuda_iter_size_64 = 1
falkon_opts.min_cuda_iter_size_32 = 1
falkon_opts.use_cpu = False
falkon_opts.cg_tolerance = stop_thresh
falkon_opts.debug = verbosity_level <= _VERBOSITY_LEVEL_DEBUG
falkon_opts.cg_print_when_done = verbosity_level <= _VERBOSITY_LEVEL_INFO
elif verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Overiding default FALKON settings with custom options")
if kernel_type == "neural-spline":
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Neural Spline Kernel")
kernel = NeuralSplineKernel(variance=variance, opt=falkon_opts)
elif kernel_type == "spherical-laplace":
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Spherical Laplace Kernel")
kernel = LaplaceKernelSphere(alpha=-0.5, gamma=0.5, opt=falkon_opts)
elif kernel_type == "linear-angle":
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Linear Angle Kernel")
kernel = LinearAngleKernel(opt=falkon_opts)
else:
raise ValueError(f"Invalid kernel_type {kernel_type}, expected one of 'neural-spline' or 'spherical-laplace'")
fit_start_time = time.time()
model = falkon.Falkon(kernel=kernel, penalty=penalty, M=num_ny, options=falkon_opts, maxiter=maxiters,
center_selection=center_selector)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
model.fit(x, y)
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print(f"Fit model in {time.time() - fit_start_time} seconds")
return model
def get_weights(vmin, vmax, pvmin, pvmax, weight_type):
"""
Get the per-voxel partition of unity weights for a cell when reconstructing on a grid of cells
:param vmin: Minimum voxel index for this cell
:param vmax: Maximum voxel index for this cell
:param pvmin: Minimum voxel index for the padded cell
:param pvmax: Maximum voxel index for the padded cell
:param weight_type: What kind of partition-of-unity to use
:return: A triple (weights, idxmin, idxmax) where weights is a (idxmax-idxmin)-shaped voxel grid and
idxmin and idxmax are 3-tensors indicating the index range in the output voxel grid which the
weights should correspond to
"""
if weight_type == 'trilinear':
return cell_weights_trilinear(vmin, vmax, pvmin, pvmax)
elif weight_type == 'none':
return 1.0, vmin, vmax
else:
raise ValueError("Invalid weight_type, must be one of 'trilinear' or 'none'")
def load_point_cloud(filename, min_norm_normal=1e-5, dtype=torch.float64):
"""
Load a point cloud with normals, filtering out points whose normal has a magnitude below the given threshold.
:param filename: Path to a PLY file
:param min_norm_normal: The minimum norm of a normal below which we discard a point
:param dtype: The output dtype of the tensors returned
:return: A pair v, n, where v is a an [N, 3]-shaped tensor of points, n is a [N, 3]-shaped tensor of unit normals
"""
v, _, n = pcu.load_mesh_vfn(filename, dtype=np.float64)
v, idx, _ = pcu.deduplicate_point_cloud(v, 1e-15, return_index=True) # Deduplicate point cloud when loading it
n = n[idx]
# Some meshes have non unit normals, so build a binary mask of points whose normal has a reasonable magnitude
# We use this mask to remove bad vertices
mask = np.linalg.norm(n, axis=-1) > min_norm_normal
# Keep the good points and normals
x = v[mask].astype(np.float64)
n = n[mask].astype(np.float64)
n /= np.linalg.norm(n, axis=-1, keepdims=True)
return torch.from_numpy(x).to(dtype), torch.from_numpy(n).to(dtype)
def fit_model_to_pointcloud(x, n, num_ny, eps, kernel='neural-spline',
reg=1e-7, ny_mode='blue-noise',
cg_stop_thresh=1e-5, cg_max_iters=20,
outer_layer_variance=1.0,
verbosity_level=1, custom_falkon_opts=None,
normalize=True):
"""
Fit a kernel to the point cloud with points x and normals n.
:param x: A tensor of 3D points with shape [N, 3]
:param n: A tensor of unit normals with shape [N, 3]
:param num_ny: The number of Nystrom samples to use. If negative, don't use Nyström sampling.
:param ny_mode: How to generate nystrom samples. Must be one of (1) 'random', (2) 'blue-noise', or (3) 'k-means'.
:param eps: Finite differencing coefficient used to approximate the gradient by perturbing points by this
amount about their normals
:param kernel: Which kernel to use. Must be one of 'neural-spline', 'spherical-laplace', or 'linear-angle'.
:param reg: Amount of regularization to apply when solving the kernel ridge regression
:param cg_stop_thresh: Stop threshold for the conjugate gradient solver
:param cg_max_iters: Maximum number of conjugate gradient iterations
:param outer_layer_variance: Variance o
:param verbosity_level: How much should this function spam your terminal. 0 = debug, 1 = info, >5 = silent
:param custom_falkon_opts: Object of type falkon.FalkonOptions object used to override the default solver settings
:param normalize: If set, then normalize the point cloud to have zero mean
:return: A pair (model, tx) where model is a fitted neural spline model class (with the same API as scikit-learn)
and tx is an affine transformation which converts world space samples to model coordinates.
You *must* apply this transformation to points before evaluating the model.
This transformation is represented as a tuple (t, s) where t is a translation and s is scale.
"""
x, y = triple_points_along_normals(x, n, eps, homogeneous=False)
if normalize:
tx = normalize_pointcloud_transform(x)
else:
tx = 0.0, 1.0
x = affine_transform_pointcloud(x, tx)
x_ny, center_selector, ny_count = _generate_nystrom_samples(x, num_ny, ny_mode, verbosity_level=verbosity_level)
x = torch.cat([x, torch.ones(x.shape[0], 1).to(x)], dim=-1)
model = _run_falkon_fit(x, y, reg, ny_count, center_selector,
maxiters=cg_max_iters, stop_thresh=cg_stop_thresh,
kernel_type=kernel, variance=outer_layer_variance,
verbosity_level=verbosity_level, falkon_opts=custom_falkon_opts)
return model, tx
def eval_model_on_grid(model, bbox, tx, voxel_grid_size, cell_vox_min=None, cell_vox_max=None, print_message=True):
"""
Evaluate the trained model (output of fit_model_to_pointcloud) on a voxel grid.
:param model: The trained model returned from fit_model_to_pointcloud
:param bbox: The bounding box defining the region of space on which to evaluate the model
(represented as the pair (origin, size))
:param tx: An affine transformation which transforms points in world coordinates to model
coordinates before evaluating the model (the second return value of fit_model_to_grid).
The transformation is represented as a tuple (t, s) where t is a translation and s is scale.
:param voxel_grid_size: The size of the voxel grid on which to reconstruct
:param cell_vox_min: If not None, reconstruct on the subset of the voxel grid starting at these indices.
:param cell_vox_max: If not None, reconstruct on the subset of the voxel grid ending at these indices.
:param print_message: If true, print status messages to stdout.
:return: A tensor representing the model evaluated on a grid.
"""
bbox_origin, bbox_size = bbox
voxel_size = bbox_size / voxel_grid_size # size of a single voxel cell
if cell_vox_min is None:
cell_vox_min = torch.tensor([0, 0, 0], dtype=torch.int32)
if cell_vox_max is None:
cell_vox_max = voxel_grid_size
if print_message:
print(f"Evaluating model on grid of size {[_.item() for _ in (cell_vox_max - cell_vox_min)]}.")
eval_start_time = time.time()
xmin = bbox_origin + (cell_vox_min + 0.5) * voxel_size
xmax = bbox_origin + (cell_vox_max - 0.5) * voxel_size
xmin = affine_transform_pointcloud(xmin.unsqueeze(0), tx).squeeze()
xmax = affine_transform_pointcloud(xmax.unsqueeze(0), tx).squeeze()
xmin, xmax = xmin.numpy(), xmax.numpy()
cell_vox_size = (cell_vox_max - cell_vox_min).numpy()
xgrid = np.stack([_.ravel() for _ in np.mgrid[xmin[0]:xmax[0]:cell_vox_size[0] * 1j,
xmin[1]:xmax[1]:cell_vox_size[1] * 1j,
xmin[2]:xmax[2]:cell_vox_size[2] * 1j]], axis=-1)
xgrid = torch.from_numpy(xgrid).to(model.alpha_.dtype)
xgrid = torch.cat([xgrid, torch.ones(xgrid.shape[0], 1).to(xgrid)], dim=-1).to(model.alpha_.dtype)
ygrid = model.predict(xgrid).reshape(tuple(cell_vox_size.astype(np.int))).detach().cpu()
if print_message:
print(f"Evaluated model in {time.time() - eval_start_time}s.")
return ygrid
| 12,994 | 46.600733 | 118 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/misc.py | # to be determined...
from .utilities.optimized_routines import setmin
| 71 | 23 | 48 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/grids.py | # ADD asset_grid in a minute!
from .utilities.discretize import agrid, asset_grid, markov_rouwenhorst, markov_tauchen | 117 | 58 | 87 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/estimation.py | """Functions for calculating the log likelihood of a model from its impulse responses"""
import numpy as np
import scipy.linalg as linalg
from numba import njit
'''Part 1: compute covariances at all lags and log likelihood'''
def all_covariances(M, sigmas):
"""Use Fast Fourier Transform to compute covariance function between O vars up to T-1 lags.
See equation (108) in appendix B.5 of paper for details.
Parameters
----------
M : array (T*O*Z), stacked impulse responses of nO variables to nZ shocks (MA(T-1) representation)
sigmas : array (Z), standard deviations of shocks
Returns
----------
Sigma : array (T*O*O), covariance function between O variables for 0, ..., T-1 lags
"""
T = M.shape[0]
dft = np.fft.rfftn(M, s=(2 * T - 2,), axes=(0,))
total = (dft.conjugate() * sigmas**2) @ dft.swapaxes(1, 2)
return np.fft.irfftn(total, s=(2 * T - 2,), axes=(0,))[:T]
def log_likelihood(Y, Sigma, sigma_measurement=None):
"""Given second moments, compute log-likelihood of data Y.
Parameters
----------
Y : array (Tobs*O)
stacked data for O observables over Tobs periods
Sigma : array (T*O*O)
covariance between observables in model for 0, ... , T lags (e.g. from all_covariances)
sigma_measurement : [optional] array (O)
std of measurement error for each observable, assumed zero if not provided
Returns
----------
L : scalar, log-likelihood
"""
Tobs, nO = Y.shape
if sigma_measurement is None:
sigma_measurement = np.zeros(nO)
V = build_full_covariance_matrix(Sigma, sigma_measurement, Tobs)
y = Y.ravel()
return log_likelihood_formula(y, V)
'''Part 2: helper functions'''
def log_likelihood_formula(y, V):
"""Implements multivariate normal log-likelihood formula using Cholesky with data vector y and variance V.
Calculates -log det(V)/2 - y'V^(-1)y/2
"""
V_factored = linalg.cho_factor(V)
quadratic_form = np.dot(y, linalg.cho_solve(V_factored, y))
log_determinant = 2*np.sum(np.log(np.diag(V_factored[0])))
return -(log_determinant + quadratic_form) / 2
@njit
def build_full_covariance_matrix(Sigma, sigma_measurement, Tobs):
"""Takes in T*O*O array Sigma with covariances at each lag t,
assembles them into (Tobs*O)*(Tobs*O) matrix of covariances, including measurement errors.
"""
T, O, O = Sigma.shape
V = np.empty((Tobs, O, Tobs, O))
for t1 in range(Tobs):
for t2 in range(Tobs):
if abs(t1-t2) >= T:
V[t1, :, t2, :] = np.zeros((O, O))
else:
if t1 < t2:
V[t1, : , t2, :] = Sigma[t2-t1, :, :]
elif t1 > t2:
V[t1, : , t2, :] = Sigma[t1-t2, :, :].T
else:
# want exactly symmetric
V[t1, :, t2, :] = (np.diag(sigma_measurement**2) + (Sigma[0, :, :]+Sigma[0, :, :].T)/2)
return V.reshape((Tobs*O, Tobs*O))
| 3,056 | 34.137931 | 110 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.