repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
ksweta/BeamIt-Server
|
beamit/resources/signin.py
|
from beamit.resources.base import Resource
class SigninRequest(Resource):
MEDIA_TYPE = 'application/vnd.beamit.signin.request+json'
def __init__(self, email, password):
self.email = email
self.password = password
def __repr__(self):
return "<SigninRequest email: {}, password: {}>".format(
self.email,
self.password,
)
def to_dict(self):
return dict(email=self.email, password=self.password)
@classmethod
def from_dict(cls, dct):
return cls(
email=dct.get("email"),
password=dct.get("password"),
)
class SigninResponse(Resource):
MEDIA_TYPE = 'application/vnd.beamit.signin.response+json'
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return "<SigninResponse user_id: {}>".format(self.user_id)
def to_dict(self):
return dict(user_id=self.user_id)
@classmethod
def from_dict(cls, dct):
return cls(user_id=dct.get("user_id"))
|
ksweta/BeamIt-Server
|
beamit/controllers/landing_page.py
|
"""
Landing page controller var miscellaneous pages.
"""
from flask import render_template
def create_landing_page_routes(app):
@app.route('/')
def root():
return 'Hello from BeamIt!!'
@app.route('/android/download')
def android_download():
return render_template('androidAppDownload.html')
|
jerry871002/CSE201-project
|
doc/EcoModelForTransport.py
|
<gh_stars>1-10
import numpy as np
import random
#City
airQuality = 1
averageWage = 1600
revenue = 0
alpha = [0, 0, 0, 0]
pourcentage = [40, 20, 20, 20]
quantity = [0, 0, 0, 0]
alpha[0] = 10.0
alpha[1] = 10.0
alpha[2] = 2.0
alpha[3] = 1.0
#Objects
lifetime = [15, 10, 20, 10]
pricetotal = [30000, 50000, 5000, 600]
capacity = [3, 1, 1, 15]
pricepermonth = [0, 0, 0, 0]
for n in range(10000):
choice = [0, 0, 0, 0]
revenue = max(np.random.normal(averageWage, 1500), 700)
alpha[0] = max(np.random.normal(2000, 500), 0)
alpha[1] = max(np.random.normal(3000, 1000), 0)
alpha[2] = max(np.random.normal(1000, 800), 0) * np.sqrt(airQuality)
alpha[3] = max(np.random.normal(1000, 100), 0)
pricetotal[0] = max(np.random.normal(40000, 50), 0)
pricetotal[1] = max(np.random.normal(200000, 100), 0)
pricetotal[2] = max(np.random.normal(5000, 30), 0)
pricetotal[3] = 4300
sumAlp= sum(alpha)
for i in range(len(alpha)):
alpha[i] = 0.01 * alpha[i] / sumAlp
pricepermonth[i] = pricetotal[i] / (12 * lifetime[i])
for i in range(len(alpha)):
pourcentage[i] = alpha[i] * ( revenue / pricepermonth[i] ) /sum(alpha)
if pourcentage[i] > 1 : choice[i]= alpha[i]
else : choice[i] = 0
quantity[choice.index(max(choice))] += 1
for i in range(len(alpha)):
quantity[i] /= capacity[i]
print(quantity)
|
spelcha/assemblyline-service-apkaye
|
test/test_apkaye.py
|
import os
import json
import pytest
import shutil
# Getting absolute paths, names and regexes
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(TEST_DIR)
SERVICE_CONFIG_NAME = "service_manifest.yml"
SERVICE_CONFIG_PATH = os.path.join(ROOT_DIR, SERVICE_CONFIG_NAME)
TEMP_SERVICE_CONFIG_PATH = os.path.join("/tmp", SERVICE_CONFIG_NAME)
# Samples that we will be sending to the service
sample1 = dict(
sid=1,
metadata={},
service_name='apkaye',
service_config={},
fileinfo=dict(
magic='ASCII text, with no line terminators',
md5='1f09ecbd362fa0dfff88d4788e6f5df0',
mime='text/plain',
sha1='a649bf201cde05724e48f2d397a615b201be34fb',
sha256='dadc624d4454e10293dbd1b701b9ee9f99ef83b4cd07b695111d37eb95abcff8',
size=19,
type='unknown',
),
filename='dadc624d4454e10293dbd1b701b9ee9f99ef83b4cd07b695111d37eb95abcff8',
min_classification='TLP:WHITE',
max_files=501, # TODO: get the actual value
ttl=3600,
)
@pytest.fixture
def class_instance():
temp_service_config_path = os.path.join("/tmp", SERVICE_CONFIG_NAME)
try:
# Placing the service_manifest.yml in the tmp directory
shutil.copyfile(SERVICE_CONFIG_PATH, temp_service_config_path)
from apkaye.apkaye import APKaye
yield APKaye()
finally:
# Delete the service_manifest.yml
os.remove(temp_service_config_path)
class TestAPKaye:
@classmethod
def setup_class(cls):
# Placing the samples in the tmp directory
samples_path = os.path.join(TEST_DIR, "samples")
for sample in os.listdir(samples_path):
sample_path = os.path.join(samples_path, sample)
shutil.copyfile(sample_path, os.path.join("/tmp", sample))
@classmethod
def teardown_class(cls):
# Cleaning up the tmp directory
samples_path = os.path.join(TEST_DIR, "samples")
for sample in os.listdir(samples_path):
temp_sample_path = os.path.join("/tmp", sample)
os.remove(temp_sample_path)
@staticmethod
def test_init(class_instance):
assert class_instance.apktool == "/opt/al_support/apktool.jar"
assert class_instance.dex2jar == "/opt/al_support/dex2jar-2.0/d2j-dex2jar.sh"
assert class_instance.aapt == "/opt/al_support/aapt2/aapt2"
@staticmethod
def test_start():
# TODO: somehow check if error was logged in service.log
# service.start()
pass
@staticmethod
def test_get_tool_version(class_instance):
assert class_instance.get_tool_version() == "APKTOOL: 2.4.0 - D2J: 2.0 - AAPT2: 3.5.1-5435860"
@staticmethod
@pytest.mark.parametrize("sample", [
sample1
])
def test_execute(sample, class_instance):
# Imports required to execute the sample
from assemblyline_v4_service.common.task import Task
from assemblyline.odm.messages.task import Task as ServiceTask
from assemblyline_v4_service.common.request import ServiceRequest
# Creating the required objects for execution
service_task = ServiceTask(sample1)
task = Task(service_task)
class_instance._task = task
service_request = ServiceRequest(task)
# Actually executing the sample
task.service_config = {"resubmit_apk_as_jar": False}
class_instance.execute(service_request)
# Get the result of execute() from the test method
test_result = task.get_service_result()
# Get the assumed "correct" result of the sample
correct_result_path = os.path.join(TEST_DIR, "results", task.file_name + ".json")
with open(correct_result_path, "r") as f:
correct_result = json.loads(f.read())
f.close()
# Assert that the appropriate sections of the dict are equal
# Avoiding date in the response
test_result_response = test_result.pop("response")
correct_result_response = correct_result.pop("response")
assert test_result == correct_result
# Comparing everything in the response except for the date
test_result_response["milestones"].pop("service_completed")
correct_result_response["milestones"].pop("service_completed")
assert test_result_response == correct_result_response
@staticmethod
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_validate_certs(apktool_out_dir, result, class_instance):
class_instance.validate_certs(apktool_out_dir=apktool_out_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_find_scripts_and_exes(apktool_out_dir, result, class_instance):
class_instance.find_scripts_and_exes(apktool_out_dir=apktool_out_dir, result=result)
pass
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_find_network_indicators(self, apktool_out_dir, result, class_instance):
class_instance.find_network_indicators(apktool_out_dir=apktool_out_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_analyse_apktool_output(apktool_out_dir, result, class_instance):
class_instance.analyse_apktool_output(apktool_out_dir=apktool_out_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apk,target_dir,work_dir,result", [
("", "", "", None)
])
def test_run_apktool(apk, target_dir, work_dir, result, class_instance):
class_instance.run_apktool(apk=apk, target_dir=target_dir, work_dir=work_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apk,target", [
("", "")
])
def test_get_dex(apk, target, class_instance):
class_instance.get_dex(apk=apk, target=target)
pass
@staticmethod
@pytest.mark.parametrize("apk_file,target,result,val", [
("", "", None, None)
])
def test_resubmit_dex2jar_output(apk_file, target, result, val, class_instance):
class_instance.resubmit_dex2jar_output(apk_file=apk_file, target=target, result=result, request=val)
pass
@staticmethod
@pytest.mark.parametrize("args", [
[]
])
def test_run_appt(args, class_instance):
class_instance.run_appt(args=args)
pass
@staticmethod
@pytest.mark.parametrize("apk_file,result", [
("", None)
])
def test_run_badging_analysis(apk_file, result, class_instance):
class_instance.run_badging_analysis(apk_file=apk_file, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apk_file,result", [
("", None)
])
def test_run_strings_analysis(apk_file, result, class_instance):
class_instance.run_strings_analysis(apk_file=apk_file, result=result)
pass
|
vanttec/vanttec_uuv
|
lib/choose_side/scripts/auto_nav_position.py
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import rospy
from std_msgs.msg import Float32MultiArray, Int32, String
from geometry_msgs.msg import Pose, PoseStamped
from vanttec_uuv.msg import GuidanceWaypoints
from usv_perception.msg import obj_detected, obj_detected_list
from nav_msgs.msg import Path
# Class Definition
class AutoNav:
def __init__(self):
self.ned_x = 0
self.ned_y = 0
self.yaw = 0
self.objects_list = []
self.activated = True
self.state = -1
self.distance = 0
self.InitTime = rospy.Time.now().secs
self.offset = .55 #camera to ins offset
self.target_x = 0
self.target_y = 0
self.ned_alpha = 0
self.choose_side = 'left'
self.distance_away = 5
self.waypoints = GuidanceWaypoints()
self.uuv_path = Path()
#Waypoint test instead of perception node
# ROS Subscribers
rospy.Subscriber("/uuv_simulation/dynamic_model/pose", Pose, self.ins_pose_callback)
'''
rospy.Subscriber("/usv_perception/yolo_zed/objects_detected", obj_detected_list, self.objs_callback)
'''
# ROS Publishers
self.uuv_waypoints = rospy.Publisher("/uuv_guidance/guidance_controller/waypoints", GuidanceWaypoints, queue_size=10)
self.uuv_path_pub = rospy.Publisher("/uuv_planning/motion_planning/desired_path", Path, queue_size=10)
self.status_pub = rospy.Publisher("/mission/status", Int32, queue_size=10)
self.test = rospy.Publisher("/mission/state", Int32, queue_size=10)
#Waypoint test instead of perception node
self.objects_list = [
{
'X': 7,
'Y': -4,
'Z': 0
},
{
'X': 7,
'Y': 0,
'Z': 0
},
{
'X': 7,
'Y': 4,
'Z': 0
}
]
def ins_pose_callback(self,pose):
self.ned_x = pose.position.x
self.ned_y = pose.position.y
self.ned_z = pose.position.z
self.yaw = pose.orientation.z
'''
def objs_callback(self,data):
self.objects_list = []
for i in range(data.len):
if str(data.objects[i].clase) == 'bouy':
self.objects_list.append({'X' : data.objects[i].X + self.offset,
'Y' : data.objects[i].Y,
'color' : data.objects[i].color,
'class' : data.objects[i].clase})
'''
def center_point(self):
'''
@name: center_point
@brief: Returns two waypoints as desired positions. The first waypoint is
between the middle of the gate and it right or left post, and the second a distance to the front
@param: --
@return: --
'''
x_list = []
y_list = []
distance_list = []
for i in range(len(self.objects_list)):
x_list.append(self.objects_list[i]['X'])
y_list.append(self.objects_list[i]['Y'])
distance_list.append(math.pow(x_list[i]**2 + y_list[i]**2, 0.5))
ind_g1 = np.argsort(distance_list)[0]
ind_g2 = np.argsort(distance_list)[1]
ind_g2 = np.argsort(distance_list)[2]
x1 = x_list[ind_g1]
y1 = -1*y_list[ind_g1]
x2 = x_list[ind_g2]
y2 = -1*y_list[ind_g2]
x3 = x_list[ind_g2]
y3 = -1*y_list[ind_g2]
if (self.choose_side == 'left'):
xc = min([x1,x2]) + abs(x1 - x2)/2 - self.distance_away
yc = min([y1,y2]) + abs(y1 - y2)/2
if y1 < y2:
yl = y1
xl = x1
yr = y2
xr = x2
else:
yl = y2
xl = x2
yr = y1
xr = x1
else:
xc = min([x2,x3]) + abs(x2 - x3)/2 - self.distance_away
yc = min([y2,y3]) + abs(y2 - y3)/2
if y2 < y3:
yl = y2
xl = x2
yr = y3
xr = x3
else:
yl = y3
xl = x3
yr = y2
xr = x2
yd = yl - yr
xd = xl - xr
alpha = math.atan2(yd,xd) + math.pi/2
if (abs(alpha) > (math.pi)):
alpha = (alpha/abs(alpha))*(abs(alpha) - 2*math.pi)
self.ned_alpha = alpha + self.yaw
if (abs(self.ned_alpha) > (math.pi)):
self.ned_alpha = (self.ned_alpha/abs(self.ned_alpha))*(abs(self.ned_alpha) - 2*math.pi)
xm, ym = self.gate_to_body(3,0,alpha,xc,yc)
self.target_x, self.target_y = self.body_to_ned(xm, ym)
#path_array = Float32MultiArray()
#path_array.layout.data_offset = 5
#path_array.data = [xc, yc, xm, ym, 2]
#self.desired(path_array)
self.waypoints.guidance_law = 1
self.waypoints.waypoint_list_length = 2
self.waypoints.waypoint_list_x = [xc, xm]
self.waypoints.waypoint_list_y = [yc, ym]
self.waypoints.waypoint_list_z = [0,0]
self.desired(self.waypoints)
def calculate_distance_to_sub(self):
'''
@name: calculate_distance_to_sub
@brief: Returns the distance from the UUV to the next gate
@param: --
@return: --
'''
x_list = []
y_list = []
distance_list = []
for i in range(len(self.objects_list)):
x_list.append(self.objects_list[i]['X'])
y_list.append(self.objects_list[i]['Y'])
distance_list.append(math.pow(x_list[i]**2 + y_list[i]**2, 0.5))
ind_g1 = np.argsort(distance_list)[0]
ind_g2 = np.argsort(distance_list)[1]
x1 = x_list[ind_g1]
y1 = -1*y_list[ind_g1]
x2 = x_list[ind_g2]
y2 = -1*y_list[ind_g2]
x3 = x_list[ind_g2]
y3 = -1*y_list[ind_g2]
if (self.choose_side == 'left'):
xc = min([x1,x2]) + abs(x1 - x2)/2
yc = min([y1,y2]) + abs(y1 - y2)/2
if y1 < y2:
yl = y1
xl = x1
yr = y2
xr = x2
else:
yl = y2
xl = x2
yr = y1
xr = x1
else:
xc = min([x2,x3]) + abs(x2 - x3)/2
yc = min([y2,y3]) + abs(y2 - y3)/2
if y2 < y3:
yl = y2
xl = x2
yr = y3
xr = x3
else:
yl = y3
xl = x3
yr = y2
xr = x2
self.distance = math.pow(xc*xc + yc*yc, 0.5)
def farther(self):
'''
@name: farther
@brief: Returns a waypoint farther to the front of the vehicle in the NED
reference frame to avoid perturbations.
@param: --
@return: --
'''
self.target_x, self.target_y = self.gate_to_ned(10, 0,
self.ned_alpha,
self.target_x,
self.target_y)
#path_array = Float32MultiArray()
#path_array.layout.data_offset = 3
#path_array.data = [self.target_x, self.target_y, 0]
#self.desired(data)
self.waypoints.guidance_law = 1
self.waypoints.waypoint_list_length = 1
self.waypoints.waypoint_list_x = {self.target_x}
self.waypoints.waypoint_list_y = { self.target_y}
self.waypoints.waypoint_list_z = {0}
self.desired(self.waypoints)
def gate_to_body(self, gate_x2, gate_y2, alpha, body_x1, body_y1):
'''
@name: gate_to_body
@brief: Coordinate transformation between gate and body reference frames.
@param: gate_x2: target x coordinate in gate reference frame
gate_y2: target y coordinate in gate reference frame
alpha: angle between gate and body reference frames
body_x1: gate x coordinate in body reference frame
body_y1: gate y coordinate in body reference frame
@return: body_x2: target x coordinate in body reference frame
body_y2: target y coordinate in body reference frame
'''
p = np.array([[gate_x2],[gate_y2]])
J = self.rotation_matrix(alpha)
n = J.dot(p)
body_x2 = n[0] + body_x1
body_y2 = n[1] + body_y1
return (body_x2, body_y2)
def body_to_ned(self, x2, y2):
'''
@name: body_to_ned
@brief: Coordinate transformation between body and NED reference frames.
@param: x2: target x coordinate in body reference frame
y2: target y coordinate in body reference frame
@return: ned_x2: target x coordinate in ned reference frame
ned_y2: target y coordinate in ned reference frame
'''
p = np.array([x2, y2])
J = self.rotation_matrix(self.yaw)
n = J.dot(p)
ned_x2 = n[0] + self.ned_x
ned_y2 = n[1] + self.ned_y
return (ned_x2, ned_y2)
def gate_to_ned(self, gate_x2, gate_y2, alpha, ned_x1, ned_y1):
'''
@name: gate_to_ned
@brief: Coordinate transformation between gate and NED reference frames.
@param: gate_x2: target x coordinate in gate reference frame
gate_y2: target y coordinate in gate reference frame
alpha: angle between gate and ned reference frames
body_x1: gate x coordinate in ned reference frame
body_y1: gate y coordinate in ned reference frame
@return: body_x2: target x coordinate in ned reference frame
body_y2: target y coordinate in ned reference frame
'''
p = np.array([[gate_x2],[gate_y2]])
J = self.rotation_matrix(alpha)
n = J.dot(p)
ned_x2 = n[0] + ned_x1
ned_y2 = n[1] + ned_y1
return (ned_x2, ned_y2)
def rotation_matrix(self, angle):
'''
@name: rotation_matrix
@brief: Transformation matrix template.
@param: angle: angle of rotation
@return: J: transformation matrix
'''
J = np.array([[math.cos(angle), -1*math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
return (J)
def desired(self, path):
self.uuv_waypoints.publish(path)
self.uuv_path.header.stamp = rospy.Time.now()
self.uuv_path.header.frame_id = "world"
del self.uuv_path.poses[:]
for index in range(path.waypoint_list_length):
pose = PoseStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = "world"
pose.pose.position.x = path.waypoint_list_x[index]
pose.pose.position.y = path.waypoint_list_y[index]
pose.pose.position.z = path.waypoint_list_z[index]
self.uuv_path.poses.append(pose)
self.uuv_path_pub.publish(self.uuv_path)
def main():
rospy.init_node("auto_nav_position", anonymous=False)
rate = rospy.Rate(20)
autoNav = AutoNav()
autoNav.distance = 4
last_detection = []
while not rospy.is_shutdown() and autoNav.activated:
rospy.loginfo("AutoNav is activated")
#rospy.loginfo(autoNav.objects_list)
rospy.loginfo(last_detection)
if autoNav.objects_list != last_detection:
rospy.loginfo("Last detection not activated")
if autoNav.state == -1:
rospy.loginfo("AutoNav.state == -1")
while (not rospy.is_shutdown()) and (len(autoNav.objects_list) < 3):
autoNav.test.publish(autoNav.state)
rospy.loginfo("AutoNav.state in -1")
rate.sleep()
autoNav.state = 0
# last_detection = autoNav.objects_list
if autoNav.state == 0:
rospy.loginfo("AutoNav.state == 0")
autoNav.test.publish(autoNav.state)
if len(autoNav.objects_list) >= 3:
rospy.loginfo("AutoNav.objects_list) >= 3")
autoNav.calculate_distance_to_sub()
if (len(autoNav.objects_list) >= 3) and (autoNav.distance >= 2):
rospy.loginfo("AutoNav.objects_list) >= 3 and (autoNav.distance >= 2)")
autoNav.center_point()
else:
rospy.loginfo("No autoNav.objects_list")
initTime = rospy.Time.now().secs
while ((not rospy.is_shutdown()) and
(len(autoNav.objects_list) < 3 or autoNav.distance < 2)):
rospy.loginfo("not rospy.is_shutdown() and (len(autoNav.objects_list) < 3 or autoNav.distance < 2)")
if rospy.Time.now().secs - initTime > 2:
rospy.loginfo("rospy.Time.now().secs - initTime > 2")
autoNav.state = 1
rate.sleep()
break
#last_detection = autoNav.objects_list
if autoNav.state == 1:
rospy.loginfo("AutoNav.state == 1")
autoNav.test.publish(autoNav.state)
if len(autoNav.objects_list) >= 3:
autoNav.state = 2
else:
initTime = rospy.Time.now().secs
while ((not rospy.is_shutdown()) and
(len(autoNav.objects_list) < 3)):
if rospy.Time.now().secs - initTime > 1:
autoNav.farther()
rate.sleep()
break
#last_detection = autoNav.objects_list
if autoNav.objects_list != last_detection:
rospy.loginfo("autoNav.objects_list != last_detection:")
if autoNav.state == 2:
rospy.loginfo("AutoNav.state == 2")
autoNav.test.publish(autoNav.state)
if len(autoNav.objects_list) >= 3:
autoNav.calculate_distance_to_sub()
if len(autoNav.objects_list) >= 3 and autoNav.distance >= 2:
autoNav.center_point()
else:
initTime = rospy.Time.now().secs
while ((not rospy.is_shutdown()) and
(len(autoNav.objects_list) < 3 or autoNav.distance < 2)):
if rospy.Time.now().secs - initTime > 2:
autoNav.state = 3
rate.sleep()
break
# last_detection = autoNav.objects_list
elif autoNav.state == 3:
autoNav.test.publish(autoNav.state)
time.sleep(1)
autoNav.status_pub.publish(1)
rate.sleep()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
|
Owen-Liuyuxuan/LIGA-Stereo
|
tools/eval_utils/eval_utils.py
|
import pickle
import time
import numpy as np
import torch
import tqdm
from liga.models import load_data_to_gpu
from liga.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
metric['num'] += 1
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
# depth evaluation for stereo detection
for k, v in ret_dict.items():
if k.startswith('depth_error_'):
if k.endswith('perbox'):
if k not in metric:
metric[k] = []
metric[k].extend(v)
else:
metric[k] = metric.get(k, 0.) + ret_dict[k]
if k in ['depth_error_fg_median', 'depth_error_median']:
disp_dict[k] = '%.3f' % (metric[k] / metric['num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
final_2d_output_dir = result_dir / 'final_result' / 'data2d'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
final_2d_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'num': 0,
'gt_num': 0,
# 'depth_error_mean': 0.,
# 'depth_error_median': 0.,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
det_annos_2d = []
iou_results = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
if 'gt_boxes' in batch_dict and 'iou_results' in pred_dicts[0]:
iou_results.extend([x['iou_results'] for x in pred_dicts])
annos_2d = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_2d_output_dir if save_to_file else None,
mode_2d=True
) if 'pred_scores_2d' in pred_dicts[0] else None
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
) if 'pred_scores' in pred_dicts[0] else None
if annos_2d is not None:
det_annos_2d += annos_2d
if annos is not None:
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
iou_results = common_utils.merge_results_dist(iou_results, len(dataset), tmpdir=result_dir / 'tmpdir')
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
det_annos_2d = common_utils.merge_results_dist(det_annos_2d, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
for k in metric:
if k.startswith('depth_error_'):
if not k.endswith('perbox'):
metric[k] /= metric['num']
logger.info('%s: %f' % (k, metric[k]))
ret_dict['depth_error/%s' % (k)] = metric[k]
else:
for kk in metric[k][0]:
if kk.startswith("err_"):
values = [item[kk] for item in metric[k]]
mean_value = np.mean(values)
logger.info('%s: %f' % (k + "_" + kk, mean_value))
ret_dict['%s' % (k + "_" + kk)] = mean_value
# copy iou into metric[k]
if not iou_results:
continue
for x in metric[k]:
x['iou'] = iou_results[x['image_idx']][x['idx']]
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
with open(result_dir / 'metric_result.pkl', 'wb') as f:
pickle.dump(metric, f)
if det_annos and 'gt_boxes' in batch_dict:
logger.info('---- 3d box evaluation ---- ')
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric='3d',
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
if det_annos_2d and 'gt_boxes_2d' in batch_dict:
logger.info('---- 2d box evaluation ---- ')
result_str, _ = dataset.evaluation(
det_annos_2d, class_names,
eval_metric='2d',
output_path=final_2d_output_dir
)
logger.info(result_str)
else:
logger.info(f"no 2d eval: {'gt_boxes_2d' in batch_dict} / {det_annos_2d}")
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
|
Owen-Liuyuxuan/LIGA-Stereo
|
tools/train_utils/optimization/__init__.py
|
from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
class FusedOptimizer(optim.Optimizer):
def __init__(self, all_params, lr=None, weight_decay=None, momentum=None):
self.optimizers = []
ops = set([x['optimizer'] for x in all_params])
for op in ops:
params = [x for x in all_params if x['optimizer'] == op]
if op == 'adam':
optimizer = optim.Adam(params, lr=lr, weight_decay=weight_decay)
elif op == 'adamw':
optimizer = optim.AdamW(params, lr=lr, weight_decay=weight_decay)
elif op == 'sgd':
optimizer = optim.SGD(
params, lr=lr, weight_decay=weight_decay,
momentum=momentum
)
else:
raise ValueError('wrong ops type')
self.optimizers.append(optimizer)
defaults = dict(lr=lr, weight_decay=weight_decay)
super(FusedOptimizer, self).__init__(all_params, defaults)
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
def build_optimizer(model, optim_cfg):
if getattr(optim_cfg, 'PER_PARAMETER_CFG', None) is None:
params = [x for x in model.parameters() if x.requires_grad]
else:
all_parameters = dict(model.named_parameters())
all_parameters = {k: v for k, v in all_parameters.items() if v.requires_grad}
params = []
for cur_cfg in optim_cfg.PER_PARAMETER_CFG:
cur_params = []
for k in list(all_parameters.keys()):
if cur_cfg.START_WITH == 'others':
check_ok = True
elif isinstance(cur_cfg.START_WITH, str):
check_ok = k.startswith(cur_cfg.START_WITH)
elif isinstance(cur_cfg.START_WITH, list):
check_ok = any([k.startswith(start_str) for start_str in cur_cfg.START_WITH])
else:
raise ValueError('wrong start_with config')
if check_ok:
cur_params.append(all_parameters[k])
all_parameters.pop(k)
assert len(cur_params) > 0, 'cannot find any parameter starting with {}'.format(cur_cfg.START_WITH)
print(f"find {len(cur_params)} parameters starting with {cur_cfg.START_WITH}")
params.append({
"params": cur_params,
"lr": optim_cfg.LR * cur_cfg.MUL_LR,
})
if 'optimizer' in cur_cfg:
params[-1]['optimizer'] = cur_cfg.optimizer
if len(all_parameters) > 0:
print(f"find {len(all_parameters)} parameters left")
print(list(all_parameters.keys()))
if optim_cfg.OPTIMIZER == 'adam':
optimizer = optim.Adam(params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'adamw':
optimizer = optim.AdamW(params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
momentum=optim_cfg.MOMENTUM
)
elif optim_cfg.OPTIMIZER == 'fused':
optimizer = FusedOptimizer(params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY, momentum=optim_cfg.MOMENTUM)
elif optim_cfg.OPTIMIZER == 'adam_onecycle':
assert getattr(optim_cfg, 'PER_CHILD_CFG', None) is None
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
def flatten_model(m):
return sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
def get_layer_groups(m):
return [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.LR_DECAY
return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.OPTIMIZER == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.LR_WARMUP:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.WARMUP_EPOCH * total_iters_each_epoch,
eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
)
return lr_scheduler, lr_warmup_scheduler
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/dense_heads/__init__.py
|
<filename>liga/models/dense_heads/__init__.py<gh_stars>10-100
from .anchor_head_single import AnchorHeadSingle
from .det_head import DetHead
from .anchor_head_template import AnchorHeadTemplate
from .mmdet_2d_head import MMDet2DHead
__all__ = {
'AnchorHeadTemplate': AnchorHeadTemplate,
'AnchorHeadSingle': AnchorHeadSingle,
'DetHead': DetHead,
'MMDet2DHead': MMDet2DHead,
}
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/datasets/augmentor/stereo_data_augmentor.py
|
<filename>liga/datasets/augmentor/stereo_data_augmentor.py
# data augmentor for stereo data_dict.
from functools import partial
import numpy as np
from liga.utils import common_utils, box_utils
class StereoDataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
if augmentor_configs is not None:
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
if cur_cfg.NAME in ["gt_sampling"]:
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
else:
cur_augmentor = partial(getattr(self, cur_cfg.NAME), config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def pre_2d_transformation(self, data_dict):
assert 'did_3d_transformation' not in data_dict
assert 'gt_boxes_no3daug' not in data_dict
assert 'points_no3daug' not in data_dict
def pre_world_transformation(self, data_dict):
data_dict['did_3d_transformation'] = True
if 'gt_boxes_no3daug' not in data_dict:
data_dict['gt_boxes_no3daug'] = data_dict['gt_boxes'].copy()
if 'points_no3daug' not in data_dict:
data_dict['points_no3daug'] = data_dict['points'].copy()
def random_crop(self, data_dict, config=None):
self.pre_2d_transformation(data_dict)
crop_rel_x = np.random.uniform(low=config.MIN_REL_X, high=config.MAX_REL_X) / 2 + 0.5
crop_rel_y = np.random.uniform(low=config.MIN_REL_Y, high=config.MAX_REL_Y) / 2 + 0.5
old_h, old_w = data_dict['left_img'].shape[:2]
crop_h, crop_w = min(config.MAX_CROP_H, old_h), min(config.MAX_CROP_W, old_w)
assert crop_h <= old_h and crop_w <= old_w and 0 <= crop_rel_x <= 1 and 0 <= crop_rel_y <= 1
x1 = int((old_w - crop_w) * crop_rel_x)
y1 = int((old_h - crop_h) * crop_rel_y)
data_dict['left_img'] = data_dict['left_img'][y1: y1 + crop_h, x1:x1 + crop_w]
data_dict['right_img'] = data_dict['right_img'][y1: y1 + crop_h, x1:x1 + crop_w]
data_dict['calib'].offset(x1, y1)
if 'image_shape' in data_dict:
data_dict['image_shape'] = data_dict['left_img'].shape[:2]
if 'gt_boxes_2d_ignored' in data_dict:
data_dict['gt_boxes_2d_ignored'] = data_dict['gt_boxes_2d_ignored'].copy()
data_dict['gt_boxes_2d_ignored'][:, [0, 2]] -= x1
data_dict['gt_boxes_2d_ignored'][:, [1, 3]] -= y1
return data_dict
def filter_truncated(self, data_dict, config=None):
assert 'gt_boxes' in data_dict, 'should not call filter_truncated in test mode'
self.pre_2d_transformation(data_dict)
# reproject bboxes into image space and do filtering by truncated ratio
area_ratio_threshold = config.AREA_RATIO_THRESH
area_2d_ratio_threshold = config.AREA_2D_RATIO_THRESH
gt_truncated_threshold = config.GT_TRUNCATED_THRESH
valid_mask = data_dict['gt_boxes_mask'][data_dict['gt_boxes_mask']]
if area_ratio_threshold is not None:
assert area_ratio_threshold >= 0.9, 'AREA_RATIO_THRESH should be >= 0.9'
image_shape = data_dict['left_img'].shape[:2]
calib = data_dict['calib']
gt_boxes_cam = box_utils.boxes3d_lidar_to_kitti_camera(data_dict['gt_boxes'][data_dict['gt_boxes_mask']], None, pseudo_lidar=True)
boxes2d_image, _ = box_utils.boxes3d_kitti_camera_to_imageboxes(gt_boxes_cam, calib, image_shape, return_neg_z_mask=True, fix_neg_z_bug=True)
truncated_ratio = 1 - box_utils.boxes3d_kitti_camera_inside_image_mask(gt_boxes_cam, calib, image_shape, reduce=False).mean(-1)
valid_mask &= truncated_ratio < area_ratio_threshold
if area_2d_ratio_threshold is not None:
assert area_2d_ratio_threshold >= 0.9, 'AREA_2D_RATIO_THRESH should be >= 0.9'
image_shape = data_dict['left_img'].shape[:2]
boxes2d_image, no_neg_z_valids = box_utils.boxes3d_kitti_camera_to_imageboxes(
box_utils.boxes3d_lidar_to_kitti_camera(data_dict['gt_boxes'][data_dict['gt_boxes_mask']], data_dict['calib'], pseudo_lidar=True),
data_dict['calib'],
return_neg_z_mask=True,
fix_neg_z_bug=True
)
boxes2d_inside = np.zeros_like(boxes2d_image)
boxes2d_inside[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_inside[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_inside[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_inside[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
clip_box_area = (boxes2d_inside[:, 2] - boxes2d_inside[:, 0]) * (boxes2d_inside[:, 3] - boxes2d_inside[:, 1])
full_box_area = (boxes2d_image[:, 2] - boxes2d_image[:, 0]) * (boxes2d_image[:, 3] - boxes2d_image[:, 1])
clip_ratio = 1 - clip_box_area / full_box_area
valid_mask &= clip_ratio < area_2d_ratio_threshold
if gt_truncated_threshold is not None:
gt_truncated = data_dict['gt_truncated'][data_dict['gt_boxes_mask']]
valid_mask &= gt_truncated < gt_truncated_threshold
cared_mask = data_dict['gt_boxes_mask'].copy()
if not all(valid_mask):
invalid_mask = ~valid_mask
print(config)
print('filter truncated ratio:', truncated_ratio[invalid_mask] if area_ratio_threshold is not None else 'null',
'3d boxes', data_dict['gt_boxes'][cared_mask][invalid_mask],
'flipped', data_dict['calib'].flipped,
'image idx', data_dict['image_idx'],
'frame_id', data_dict['frame_id'],
'\n')
data_dict['gt_boxes_mask'][cared_mask] = valid_mask
return data_dict
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_world_rotation(self, data_dict, config=None):
self.pre_world_transformation(data_dict)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes, points, T = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range,
return_trans_mat=True
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points # points_for2d is fixed since images do not support rotation
# note that random T is the inverse transformation matrix
data_dict['random_T'] = np.matmul(data_dict.get('random_T', np.eye(4)), T)
return data_dict
def random_world_scaling(self, data_dict, config=None):
self.pre_world_transformation(data_dict)
gt_boxes, points, T = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE'],
return_trans_mat=True
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
# note that random T is the inverse transformation matrix
data_dict['random_T'] = np.matmul(data_dict.get('random_T', np.eye(4)), T)
return data_dict
def random_world_translation(self, data_dict=None, config=None):
self.pre_world_transformation(data_dict)
gt_boxes, points, T = augmentor_utils.global_translation(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_TRANSLATION_RANGE'],
return_trans_mat=True
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
# note that random T is the inverse transformation matrix
data_dict['random_T'] = np.matmul(data_dict.get('random_T', np.eye(4)), T)
return data_dict
def forward(self, data_dict):
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
for key in ['gt_names', 'gt_boxes', 'gt_truncated', 'gt_occluded', 'gt_difficulty', 'gt_index']:
data_dict[key] = data_dict[key][gt_boxes_mask]
for key in ['gt_boxes_no3daug']:
if key in data_dict:
data_dict[key] = data_dict[key][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_3d_stereo/cost_volume.py
|
# Stereo cost volume builder.
import torch
from torch import nn
from liga.ops.build_cost_volume import build_cost_volume
class BuildCostVolume(nn.Module):
def __init__(self, volume_cfgs):
self.volume_cfgs = volume_cfgs
super(BuildCostVolume, self).__init__()
def get_dim(self, feature_channel):
d = 0
for cfg in self.volume_cfgs:
volume_type = cfg["type"]
if volume_type == "concat":
d += feature_channel * 2
return d
def forward(self, left, right, left_raw, right_raw, shift):
volumes = []
for cfg in self.volume_cfgs:
volume_type = cfg["type"]
if volume_type == "concat":
downsample = getattr(cfg, "downsample", 1)
volumes.append(build_cost_volume(left, right, shift, downsample))
else:
raise NotImplementedError
if len(volumes) > 1:
return torch.cat(volumes, dim=1)
else:
return volumes[0]
def __repr__(self):
tmpstr = self.__class__.__name__
return tmpstr
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/datasets/augmentor/augmentor_utils.py
|
# Modified from OpenPCDet. https://github.com/open-mmlab/OpenPCDet
# Augmentation utility functions.
import numpy as np
from liga.utils import common_utils
def random_flip_along_x(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_rotation(gt_boxes, points, rot_range, return_trans_mat=False):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
if not return_trans_mat:
return gt_boxes, points
else:
T = np.eye(4, dtype=np.float32)
T[0, 0] = T[1, 1] = np.cos(noise_rotation)
T[0, 1] = np.sin(noise_rotation)
T[1, 0] = -np.sin(noise_rotation)
return gt_boxes, points, T
def global_scaling(gt_boxes, points, scale_range, return_trans_mat=False):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
if not return_trans_mat:
return gt_boxes, points
else:
T = np.eye(4, dtype=np.float32)
T[0, 0] = 1 / noise_scale
T[1, 1] = 1 / noise_scale
T[2, 2] = 1 / noise_scale
return gt_boxes, points, T
def global_translation(gt_boxes, points, translation_range, return_trans_mat=False):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [x_min, y_min, z_min, x_max, y_max, z_max]
Returns:
"""
# in lidar coordinate
tx = np.random.uniform(low=translation_range[0], high=translation_range[3])
ty = np.random.uniform(low=translation_range[1], high=translation_range[4])
tz = np.random.uniform(low=translation_range[2], high=translation_range[5])
gt_boxes[:, 0] += tx
gt_boxes[:, 1] += ty
gt_boxes[:, 2] += tz
points[:, 0] += tx
points[:, 1] += ty
points[:, 2] += tz
if not return_trans_mat:
return gt_boxes, points
else:
T = np.eye(4, dtype=np.float32)
T[0, 3] -= tx
T[1, 3] -= ty
T[2, 3] -= tz
return gt_boxes, points, T
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_3d_stereo/liga_backbone.py
|
# The backbone of our LIGA model.
# including 2D feature extraction, stereo volume construction, stereo network, stereo space -> 3D space conversion
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from mmdet.models.builder import build_backbone, build_neck
from . import submodule
from .submodule import convbn_3d, feature_extraction_neck
from .cost_volume import BuildCostVolume
def project_pseudo_lidar_to_rectcam(pts_3d):
xs, ys, zs = pts_3d[..., 0], pts_3d[..., 1], pts_3d[..., 2]
return torch.stack([-ys, -zs, xs], dim=-1)
def project_rectcam_to_pseudo_lidar(pts_3d):
xs, ys, zs = pts_3d[..., 0], pts_3d[..., 1], pts_3d[..., 2]
return torch.stack([zs, -xs, -ys], dim=-1)
def project_rect_to_image(pts_3d_rect, P):
n = pts_3d_rect.shape[0]
ones = torch.ones((n, 1), device=pts_3d_rect.device)
pts_3d_rect = torch.cat([pts_3d_rect, ones], dim=1)
pts_2d = torch.mm(pts_3d_rect, torch.transpose(P, 0, 1)) # nx3
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
return pts_2d[:, 0:2]
def unproject_image_to_rect(pts_image, P):
pts_3d = torch.cat([pts_image[..., :2], torch.ones_like(pts_image[..., 2:3])], -1)
pts_3d = pts_3d * pts_image[..., 2:3]
pts_3d = torch.cat([pts_3d, torch.ones_like(pts_3d[..., 2:3])], -1)
P4x4 = torch.eye(4, dtype=P.dtype, device=P.device)
P4x4[:3, :] = P
invP = torch.inverse(P4x4)
pts_3d = torch.matmul(pts_3d, torch.transpose(invP, 0, 1))
return pts_3d[..., :3]
class LigaBackbone(nn.Module):
def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, boxes_gt_in_cam2_view=False, **kwargs):
super().__init__()
self.model_cfg = model_cfg
# general config
self.class_names = class_names
self.GN = model_cfg.GN
self.boxes_gt_in_cam2_view = boxes_gt_in_cam2_view
self.fullres_stereo_feature = model_cfg.feature_neck.with_upconv
# stereo config
self.maxdisp = model_cfg.maxdisp
self.downsample_disp = model_cfg.downsample_disp
self.downsampled_depth_offset = model_cfg.downsampled_depth_offset
self.num_hg = getattr(model_cfg, 'num_hg', 1)
self.use_stereo_out_type = getattr(model_cfg, 'use_stereo_out_type', False)
assert self.use_stereo_out_type in ["feature", "cost", "prob"]
# volume construction config
self.cat_img_feature = model_cfg.cat_img_feature
self.img_feature_attentionbydisp = model_cfg.img_feature_attentionbydisp
self.voxel_attentionbydisp = model_cfg.voxel_attentionbydisp
self.rpn3d_dim = model_cfg.rpn3d_dim
# volume config
self.num_3dconvs = model_cfg.num_3dconvs
self.cv_dim = model_cfg.cv_dim
# feature extraction
self.feature_backbone = build_backbone(model_cfg.feature_backbone)
self.feature_neck = feature_extraction_neck(model_cfg.feature_neck)
if getattr(model_cfg, 'sem_neck', None):
self.sem_neck = build_neck(model_cfg.sem_neck)
else:
self.sem_neck = None
# cost volume
self.build_cost = BuildCostVolume(model_cfg.cost_volume)
# stereo network
CV_INPUT_DIM = self.build_cost.get_dim(
self.feature_neck.stereo_dim[-1])
self.dres0 = nn.Sequential(
convbn_3d(CV_INPUT_DIM, self.cv_dim, 3, 1, 1, gn=self.GN),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(
convbn_3d(self.cv_dim, self.cv_dim, 3, 1, 1, gn=self.GN))
self.hg_stereo = nn.ModuleList()
for _ in range(self.num_hg):
self.hg_stereo.append(submodule.hourglass(self.cv_dim, gn=self.GN))
# stereo predictions
self.pred_stereo = nn.ModuleList()
for _ in range(self.num_hg):
self.pred_stereo.append(self.build_depth_pred_module())
self.dispregression = submodule.disparityregression()
# rpn3d convs
RPN3D_INPUT_DIM = self.cv_dim if not (self.use_stereo_out_type != "feature") else 1
if self.cat_img_feature:
RPN3D_INPUT_DIM += self.feature_neck.sem_dim[-1]
rpn3d_convs = []
for i in range(self.num_3dconvs):
rpn3d_convs.append(
nn.Sequential(
convbn_3d(RPN3D_INPUT_DIM if i == 0 else self.rpn3d_dim,
self.rpn3d_dim, 3, 1, 1, gn=self.GN),
nn.ReLU(inplace=True)))
self.rpn3d_convs = nn.Sequential(*rpn3d_convs)
self.rpn3d_pool = torch.nn.AvgPool3d((4, 1, 1), stride=(4, 1, 1))
self.num_3d_features = self.rpn3d_dim
# prepare tensors
self.prepare_depth(point_cloud_range, in_camera_view=False)
self.prepare_coordinates_3d(point_cloud_range, voxel_size, grid_size)
self.init_params()
feature_backbone_pretrained = getattr(model_cfg, 'feature_backbone_pretrained', None)
if feature_backbone_pretrained:
self.feature_backbone.init_weights(pretrained=feature_backbone_pretrained)
def build_depth_pred_module(self):
return nn.Sequential(
convbn_3d(self.cv_dim, self.cv_dim, 3, 1, 1, gn=self.GN),
nn.ReLU(inplace=True),
nn.Conv3d(self.cv_dim, 1, 3, 1, 1, bias=False),
nn.Upsample(scale_factor=self.downsample_disp, mode='trilinear', align_corners=True))
def prepare_depth(self, point_cloud_range, in_camera_view=True):
if in_camera_view:
self.CV_DEPTH_MIN = point_cloud_range[2]
self.CV_DEPTH_MAX = point_cloud_range[5]
else:
self.CV_DEPTH_MIN = point_cloud_range[0]
self.CV_DEPTH_MAX = point_cloud_range[3]
assert self.CV_DEPTH_MIN >= 0 and self.CV_DEPTH_MAX > self.CV_DEPTH_MIN
depth_interval = (self.CV_DEPTH_MAX - self.CV_DEPTH_MIN) / self.maxdisp
print('stereo volume depth range: {} -> {}, interval {}'.format(self.CV_DEPTH_MIN,
self.CV_DEPTH_MAX, depth_interval))
# prepare downsampled depth
self.downsampled_depth = torch.zeros(
(self.maxdisp // self.downsample_disp), dtype=torch.float32)
for i in range(self.maxdisp // self.downsample_disp):
self.downsampled_depth[i] = (
i + self.downsampled_depth_offset) * self.downsample_disp * depth_interval + self.CV_DEPTH_MIN
# prepare depth
self.depth = torch.zeros((self.maxdisp), dtype=torch.float32)
for i in range(self.maxdisp):
self.depth[i] = (
i + 0.5) * depth_interval + self.CV_DEPTH_MIN
def prepare_coordinates_3d(self, point_cloud_range, voxel_size, grid_size, sample_rate=(1, 1, 1)):
self.X_MIN, self.Y_MIN, self.Z_MIN = point_cloud_range[:3]
self.X_MAX, self.Y_MAX, self.Z_MAX = point_cloud_range[3:]
self.VOXEL_X_SIZE, self.VOXEL_Y_SIZE, self.VOXEL_Z_SIZE = voxel_size
self.GRID_X_SIZE, self.GRID_Y_SIZE, self.GRID_Z_SIZE = grid_size.tolist()
self.VOXEL_X_SIZE /= sample_rate[0]
self.VOXEL_Y_SIZE /= sample_rate[1]
self.VOXEL_Z_SIZE /= sample_rate[2]
self.GRID_X_SIZE *= sample_rate[0]
self.GRID_Y_SIZE *= sample_rate[1]
self.GRID_Z_SIZE *= sample_rate[2]
zs = torch.linspace(self.Z_MIN + self.VOXEL_Z_SIZE / 2., self.Z_MAX - self.VOXEL_Z_SIZE / 2.,
self.GRID_Z_SIZE, dtype=torch.float32)
ys = torch.linspace(self.Y_MIN + self.VOXEL_Y_SIZE / 2., self.Y_MAX - self.VOXEL_Y_SIZE / 2.,
self.GRID_Y_SIZE, dtype=torch.float32)
xs = torch.linspace(self.X_MIN + self.VOXEL_X_SIZE / 2., self.X_MAX - self.VOXEL_X_SIZE / 2.,
self.GRID_X_SIZE, dtype=torch.float32)
zs, ys, xs = torch.meshgrid(zs, ys, xs)
coordinates_3d = torch.stack([xs, ys, zs], dim=-1)
self.coordinates_3d = coordinates_3d.float()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[
2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def pred_depth(self, depth_conv_module, cost1, img_shape):
cost1 = depth_conv_module(cost1)
# cost1 = F.interpolate(
# cost1, [self.maxdisp, *img_shape],
# mode='trilinear',
# align_corners=True)
cost1 = torch.squeeze(cost1, 1)
cost1_softmax = F.softmax(cost1, dim=1)
pred1 = self.dispregression(cost1_softmax,
depth=self.depth.cuda())
return cost1, cost1_softmax, pred1
def get_local_depth(self, d_prob):
with torch.no_grad():
d = self.depth.cuda()[None, :, None, None]
d_mul_p = d * d_prob
local_window = 5
p_local_sum = 0
for off in range(0, local_window):
cur_p = d_prob[:, off:off + d_prob.shape[1] - local_window + 1]
p_local_sum += cur_p
max_indices = p_local_sum.max(1, keepdim=True).indices
pd_local_sum_for_max = 0
for off in range(0, local_window):
cur_pd = torch.gather(d_mul_p, 1, max_indices + off).squeeze(1) # d_prob[:, off:off + d_prob.shape[1] - local_window + 1]
pd_local_sum_for_max += cur_pd
mean_d = pd_local_sum_for_max / torch.gather(p_local_sum, 1, max_indices).squeeze(1)
return mean_d
def forward(self, batch_dict):
left = batch_dict['left_img']
right = batch_dict['right_img']
calib = batch_dict['calib']
fu_mul_baseline = torch.as_tensor(
[x.fu_mul_baseline for x in calib], dtype=torch.float32, device=left.device)
if self.boxes_gt_in_cam2_view:
calibs_Proj = torch.as_tensor(
[x.K3x4 for x in calib], dtype=torch.float32, device=left.device)
else:
calibs_Proj = torch.as_tensor(
[x.P2 for x in calib], dtype=torch.float32, device=left.device)
N = batch_dict['batch_size']
# feature extraction
left_features = self.feature_backbone(left)
left_features = [left] + list(left_features)
right_features = self.feature_backbone(right)
right_features = [right] + list(right_features)
left_stereo_feat, left_sem_feat = self.feature_neck(left_features)
right_stereo_feat, _ = self.feature_neck(right_features)
if self.sem_neck is not None:
batch_dict['sem_features'] = self.sem_neck([left_sem_feat])
else:
batch_dict['sem_features'] = [left_sem_feat]
batch_dict['rpn_feature'] = left_sem_feat
# stereo matching: build stereo volume
downsampled_depth = self.downsampled_depth.cuda()
downsampled_disp = fu_mul_baseline[:, None] / \
downsampled_depth[None, :] / (self.downsample_disp if not self.fullres_stereo_feature else 1)
cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
None, None, downsampled_disp)
# stereo matching network
cost0 = self.dres0(cost_raw)
cost0 = self.dres1(cost0) + cost0
if len(self.hg_stereo) > 0:
all_costs = []
cur_cost = cost0
for hg_stereo_module in self.hg_stereo:
cost_residual, _, _ = hg_stereo_module(cur_cost, None, None)
cur_cost = cur_cost + cost_residual
all_costs.append(cur_cost)
else:
all_costs = [cost0]
assert len(all_costs) > 0, 'at least one hourglass'
# stereo matching: outputs
batch_dict['depth_preds'] = []
if not self.training:
batch_dict['depth_preds_local'] = []
batch_dict['depth_volumes'] = []
batch_dict['depth_samples'] = self.depth.clone().detach().cuda()
for idx in range(len(all_costs)):
upcost_i, cost_softmax_i, pred_i = self.pred_depth(self.pred_stereo[idx], all_costs[idx], left.shape[2:4])
batch_dict['depth_volumes'].append(upcost_i)
batch_dict['depth_preds'].append(pred_i)
if not self.training:
batch_dict['depth_preds_local'].append(self.get_local_depth(cost_softmax_i))
# beginning of 3d detection part
if self.use_stereo_out_type == "feature":
out = all_costs[-1]
elif self.use_stereo_out_type == "prob":
out = cost_softmax_i.unsqueeze(1)
elif self.use_stereo_out_type == "cost":
out = upcost_i.unsqueeze(1)
else:
raise ValueError('wrong self.use_stereo_out_type option')
out_prob = cost_softmax_i
# convert plane-sweep into 3d volume
coordinates_3d = self.coordinates_3d.cuda()
batch_dict['coord'] = coordinates_3d
norm_coord_imgs = []
coord_imgs = []
valids2d = []
for i in range(N):
c3d = coordinates_3d.view(-1, 3)
if 'random_T' in batch_dict:
random_T = batch_dict['random_T'][i]
c3d = torch.matmul(c3d, random_T[:3, :3].T) + random_T[:3, 3]
# in pseudo lidar coord
c3d = project_pseudo_lidar_to_rectcam(c3d)
coord_img = project_rect_to_image(
c3d,
calibs_Proj[i].float().cuda())
coord_img = torch.cat(
[coord_img, c3d[..., 2:]], dim=-1)
coord_img = coord_img.view(*self.coordinates_3d.shape[:3], 3)
coord_imgs.append(coord_img)
img_shape = batch_dict['image_shape'][i]
valid_mask_2d = (coord_img[..., 0] >= 0) & (coord_img[..., 0] <= img_shape[1]) & \
(coord_img[..., 1] >= 0) & (coord_img[..., 1] <= img_shape[0])
valids2d.append(valid_mask_2d)
# TODO: crop augmentation
crop_x1, crop_x2 = 0, left.shape[3]
crop_y1, crop_y2 = 0, left.shape[2]
norm_coord_img = (coord_img - torch.as_tensor([crop_x1, crop_y1, self.CV_DEPTH_MIN], device=coord_img.device)) / torch.as_tensor(
[crop_x2 - 1 - crop_x1, crop_y2 - 1 - crop_y1, self.CV_DEPTH_MAX - self.CV_DEPTH_MIN], device=coord_img.device)
norm_coord_img = norm_coord_img * 2. - 1.
norm_coord_imgs.append(norm_coord_img)
norm_coord_imgs = torch.stack(norm_coord_imgs, dim=0)
coord_imgs = torch.stack(coord_imgs, dim=0)
valids2d = torch.stack(valids2d, dim=0)
batch_dict['norm_coord_imgs'] = norm_coord_imgs
batch_dict['coord_imgs'] = coord_imgs
valids = valids2d & (norm_coord_imgs[..., 2] >= -1.) & (norm_coord_imgs[..., 2] <= 1.)
batch_dict['valids'] = valids
valids = valids.float()
# Retrieve Voxel Feature from Cost Volume Feature
Voxel = F.grid_sample(out, norm_coord_imgs, align_corners=True)
Voxel = Voxel * valids[:, None, :, :, :]
if (self.voxel_attentionbydisp or
(self.img_feature_attentionbydisp and self.cat_img_feature)):
pred_disp = F.grid_sample(out_prob.detach()[:, None],
norm_coord_imgs, align_corners=True)
pred_disp = pred_disp * valids[:, None, :, :, :]
if self.voxel_attentionbydisp:
Voxel = Voxel * pred_disp
# Retrieve Voxel Feature from 2D Img Feature
if self.cat_img_feature:
RPN_feature = left_sem_feat
norm_coord_imgs_2d = norm_coord_imgs.clone().detach()
norm_coord_imgs_2d[..., 2] = 0
Voxel_2D = F.grid_sample(RPN_feature.unsqueeze(2), norm_coord_imgs_2d, align_corners=True)
Voxel_2D = Voxel_2D * valids2d.float()[:, None, :, :, :]
if self.img_feature_attentionbydisp:
Voxel_2D = Voxel_2D * pred_disp
if Voxel is not None:
Voxel = torch.cat([Voxel, Voxel_2D], dim=1)
else:
Voxel = Voxel_2D
# (64, 190, 20, 300)
Voxel = self.rpn3d_convs(Voxel) # (64, 190, 20, 300)
batch_dict['volume_features_nopool'] = Voxel
Voxel = self.rpn3d_pool(Voxel) # [B, C, Nz, Ny, Nx] in cam view
batch_dict['volume_features'] = Voxel
return batch_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/dense_heads/det_head.py
|
"""A variant of anchor_head_single.
The differences are as follows:
* two more options: num_convs, GN
* apply two split convs for regression outputs and classification outputs
* when num_convs == 0, this module should be almost the same as anchor_head_single
* in conv_box/cls, the kernel size is modified to 3 instead of 1
"""
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation, gn=False, groups=32):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes) if not gn else nn.GroupNorm(groups, out_planes))
class DetHead(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.num_convs = model_cfg.NUM_CONVS
self.GN = model_cfg.GN
self.xyz_for_angles = model_cfg.xyz_for_angles
self.hwl_for_angles = model_cfg.hwl_for_angles
if self.num_convs > 0:
self.rpn3d_cls_convs = []
self.rpn3d_bbox_convs = []
for _ in range(self.num_convs):
self.rpn3d_cls_convs.append(
nn.Sequential(
convbn(input_channels, input_channels, 3, 1, 1, 1, gn=self.GN),
nn.ReLU(inplace=True))
)
self.rpn3d_bbox_convs.append(
nn.Sequential(
convbn(input_channels, input_channels, 3, 1, 1, 1, gn=self.GN),
nn.ReLU(inplace=True))
)
assert len(self.rpn3d_cls_convs) == self.num_convs
assert len(self.rpn3d_bbox_convs) == self.num_convs
self.rpn3d_cls_convs = nn.Sequential(*self.rpn3d_cls_convs)
self.rpn3d_bbox_convs = nn.Sequential(*self.rpn3d_bbox_convs)
cls_feature_channels = input_channels
cls_groups = 1
self.conv_cls = nn.Conv2d(
cls_feature_channels, self.num_anchors_per_location * self.num_class,
kernel_size=3, padding=1, stride=1, groups=cls_groups
)
if self.xyz_for_angles and self.hwl_for_angles:
box_dim = self.num_anchors_per_location * self.box_coder.code_size
elif not self.xyz_for_angles and not self.hwl_for_angles:
box_dim = self.num_class * 6 + self.num_anchors_per_location * (self.box_coder.code_size - 6)
else:
box_dim = self.num_class * 3 + self.num_anchors_per_location * (self.box_coder.code_size - 3)
self.conv_box = nn.Conv2d(
input_channels, box_dim,
kernel_size=3, padding=1, stride=1
)
self.num_angles = self.num_anchors_per_location // self.num_class
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
cls_feature_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1,
groups=cls_groups
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.normal_(self.conv_cls.weight, std=0.1)
nn.init.normal_(self.conv_box.weight, std=0.02)
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
# nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
# NOTE: clear forward ret dict to avoid potential bugs
self.forward_ret_dict.clear()
spatial_features_2d = data_dict['spatial_features_2d']
if self.do_feature_imitation and self.training:
if 'gt_boxes' in data_dict:
self.forward_ret_dict['gt_boxes'] = data_dict['gt_boxes']
self.forward_ret_dict['imitation_features_pairs'] = []
imitation_conv_layers = [self.conv_imitation] if len(self.imitation_configs) == 1 else self.conv_imitation
for cfg, imitation_conv in zip(self.imitation_configs, imitation_conv_layers):
lidar_feature_name = cfg.lidar_feature_layer
stereo_feature_name = cfg.stereo_feature_layer
self.forward_ret_dict['imitation_features_pairs'].append(
dict(
config=cfg,
stereo_feature_name=stereo_feature_name,
lidar_feature_name=lidar_feature_name,
gt=data_dict['lidar_outputs'][lidar_feature_name],
pred=imitation_conv(data_dict[stereo_feature_name])
)
)
# for k in data_dict:
# if k in ["lidar_batch_cls_preds", "lidar_batch_box_preds"]:
# self.forward_ret_dict[k] = data_dict[k]
cls_features = spatial_features_2d
reg_features = spatial_features_2d
if self.num_convs > 0:
cls_features = self.rpn3d_cls_convs(cls_features)
reg_features = self.rpn3d_bbox_convs(reg_features)
box_preds = self.conv_box(reg_features)
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
data_dict['reg_features'] = reg_features
cls_preds = self.conv_cls(cls_features)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
if not self.xyz_for_angles or not self.hwl_for_angles:
# TODO: here we assume that for each class, there are only anchors with difference angles
if self.xyz_for_angles:
xyz_dim = self.num_anchors_per_location * 3
xyz_shapes = (self.num_class, self.num_anchors_per_location // self.num_class, 3)
else:
xyz_dim = self.num_class * 3
xyz_shapes = (self.num_class, 1, 3)
if self.hwl_for_angles:
hwl_dim = self.num_anchors_per_location * 3
hwl_shapes = (self.num_class, self.num_anchors_per_location // self.num_class, 3)
else:
hwl_dim = self.num_class * 3
hwl_shapes = (self.num_class, 1, 3)
rot_dim = self.num_anchors_per_location * (self.box_coder.code_size - 6)
rot_shapes = (self.num_class, self.num_anchors_per_location // self.num_class, (self.box_coder.code_size - 6))
assert box_preds.shape[-1] == xyz_dim + hwl_dim + rot_dim
xyz_preds, hwl_preds, rot_preds = torch.split(box_preds, [xyz_dim, hwl_dim, rot_dim], dim=-1)
# anchors [Nz, Ny, Nx, N_cls*N_size=3*1, N_rot, 7]
xyz_preds = xyz_preds.view(*xyz_preds.shape[:3], *xyz_shapes)
hwl_preds = hwl_preds.view(*hwl_preds.shape[:3], *hwl_shapes)
rot_preds = rot_preds.view(*rot_preds.shape[:3], *rot_shapes)
# expand xyz and hwl
if not self.xyz_for_angles:
xyz_preds = xyz_preds.repeat(1, 1, 1, 1, rot_preds.shape[4] // xyz_preds.shape[4], 1)
if not self.hwl_for_angles:
hwl_preds = hwl_preds.repeat(1, 1, 1, 1, rot_preds.shape[4] // hwl_preds.shape[4], 1)
box_preds = torch.cat([xyz_preds, hwl_preds, rot_preds], dim=-1)
box_preds = box_preds.view(*box_preds.shape[:3], -1)
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if 'valids' in data_dict:
self.forward_ret_dict['valids'] = data_dict['valids'].any(1)
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(cls_features)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training or 'gt_boxes' in data_dict:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
data_dict.update(targets_dict)
data_dict['anchors'] = self.anchors
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
# TODO: check the code here, we add sigmoid in the generate predicted boxes, so set normalized to be True
data_dict['cls_preds_normalized'] = False
return data_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
tools/train.py
|
import argparse
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.distributed as dist
import torch.nn as nn
from tensorboardX import SummaryWriter
from liga.config import cfg, cfg_from_list, cfg_from_yaml_file, update_cfg_by_args, log_config_to_file
from liga.datasets import build_dataloader
from liga.models import build_network, model_fn_decorator
from liga.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
torch.backends.cudnn.benchmark = True
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
# basic training options
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=2, help='number of workers for dataloader')
parser.add_argument('--exp_name', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# loading options
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--continue_train', action='store_true', default=False)
# distributed options
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='pytorch')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--find_unused_parameters', action='store_true', default=False, help='whether to find find_unused_parameters')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
# config options
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed')
parser.add_argument('--trainval', action='store_true', default=False, help='')
parser.add_argument('--imitation', type=str, default="2d")
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
update_cfg_by_args(cfg, args)
cfg.TAG = Path(args.cfg_file).stem
# remove 'cfgs' and 'xxxx.yaml'
cfg.EXP_GROUP_PATH = '_'.join(args.cfg_file.split('/')[1:-1])
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666 + cfg.LOCAL_RANK)
output_dir = cfg.ROOT_DIR / 'outputs' / cfg.EXP_GROUP_PATH / (cfg.TAG + '.' + args.exp_name)
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / 'log_train.txt'
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(
) else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('git diff > %s/%s' % (output_dir, 'git.diff'))
os.system('git log > %s/%s' % (output_dir, 'git.log'))
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(
output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
train_set, train_loader, train_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(
cfg.CLASS_NAMES), dataset=train_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(
args.ckpt, to_cpu=dist, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
elif args.continue_train:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=lambda x: int(x.split('.')[-2].split('_')[-1]))
print("using ckpt", ckpt_list[-1])
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
else:
raise FileNotFoundError("no ckpt files found")
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=args.find_unused_parameters)
logger.info(model)
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(train_loader) // (args.epochs if args.merge_all_iters_to_one_epoch else 1), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('*******Start training: {} ********'.format(output_dir))
train_model(
model,
optimizer,
train_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
train_sampler=train_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
dist_train=dist_train,
logger=logger
)
logger.info('*******End training: {} ********'.format(output_dir))
logger.info('*******Start evaluation: {} ********'.format(output_dir))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
# Only evaluate the last 10 epochs
args.start_epoch = max(cfg.OPTIMIZATION.DECAY_STEP_LIST[-1], args.epochs - 10) if len(cfg.OPTIMIZATION.DECAY_STEP_LIST) > 0 else cfg.OPTIMIZATION.NUM_EPOCHS - 10
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('*******End evaluation: {} ********'.format(output_dir))
if __name__ == '__main__':
main()
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/ops/build_cost_volume/__init__.py
|
<gh_stars>10-100
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from liga.ops.build_cost_volume import build_cost_volume_cuda
class _BuildCostVolume(Function):
@staticmethod
def forward(ctx, left, right, shift, downsample):
ctx.save_for_backward(shift, )
ctx.downsample = downsample
assert torch.all(shift >= 0.)
output = build_cost_volume_cuda.build_cost_volume_forward(
left, right, shift, downsample)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
shift, = ctx.saved_tensors
grad_left, grad_right = build_cost_volume_cuda.build_cost_volume_backward(
grad_output, shift, ctx.downsample)
return grad_left, grad_right, None, None
build_cost_volume = _BuildCostVolume.apply
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/datasets/stereo_dataset_template.py
|
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch.utils.data as torch_data
from liga.utils import common_utils, box_utils, depth_map_utils
from liga.ops.roiaware_pool3d import roiaware_pool3d_utils
from .augmentor.stereo_data_augmentor import StereoDataAugmentor
from .processor.data_processor import DataProcessor
from .processor.point_feature_encoder import PointFeatureEncoder
from liga.utils.calibration_kitti import Calibration
class StereoDatasetTemplate(torch_data.Dataset):
def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):
super().__init__()
self.dataset_cfg = dataset_cfg
self.training = training
self.class_names = class_names
self.logger = logger
self.root_path = root_path if root_path is not None else Path(
self.dataset_cfg.DATA_PATH)
self.logger = logger
if self.dataset_cfg is None or class_names is None:
return
self.point_cloud_range = np.array(
self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)
self.voxel_size = self.dataset_cfg.VOXEL_SIZE
grid_size = (
self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(self.voxel_size)
self.grid_size = np.round(grid_size).astype(np.int64)
if self.dataset_cfg.get("STEREO_VOXEL_SIZE", None):
self.stereo_voxel_size = self.dataset_cfg.STEREO_VOXEL_SIZE
stereo_grid_size = (
self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(self.stereo_voxel_size)
self.stereo_grid_size = np.round(stereo_grid_size).astype(np.int64)
if self.training:
self.data_augmentor = StereoDataAugmentor(
self.root_path, self.dataset_cfg.TRAIN_DATA_AUGMENTOR, self.class_names, logger=self.logger
)
else:
if getattr(self.dataset_cfg, 'TEST_DATA_AUGMENTOR', None) is not None:
self.data_augmentor = StereoDataAugmentor(
self.root_path, self.dataset_cfg.TEST_DATA_AUGMENTOR, self.class_names, logger=self.logger
)
# logger.warn('using data augmentor in test mode')
else:
self.data_augmentor = None
if self.dataset_cfg.get('POINT_FEATURE_ENCODING'):
self.point_feature_encoder = PointFeatureEncoder(
self.dataset_cfg.POINT_FEATURE_ENCODING,
point_cloud_range=self.point_cloud_range
)
else:
self.point_feature_encoder = None
if self.dataset_cfg.get('DATA_PROCESSOR'):
self.data_processor = DataProcessor(
self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, training=self.training
)
else:
self.data_processor = None
self.total_epochs = 0
self._merge_all_iters_to_one_epoch = False
@property
def mode(self):
return 'train' if self.training else 'test'
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):
if merge:
self._merge_all_iters_to_one_epoch = True
self.total_epochs = epochs
else:
self._merge_all_iters_to_one_epoch = False
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
raise NotImplementedError
def prepare_data(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if self.training:
assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'
gt_boxes_mask = np.array(
[n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
# TODO: in case using data augmentor, please pay attention to the coordinate
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
if self.training and len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
elif (not self.training) and self.data_augmentor:
# only do some basic image scaling and cropping
data_dict = self.data_augmentor.forward(data_dict)
if data_dict.get('gt_boxes', None) is not None:
if 'gt_boxes_no3daug' not in data_dict:
data_dict['gt_boxes_no3daug'] = data_dict['gt_boxes'].copy()
selected = common_utils.keep_arrays_by_name(
data_dict['gt_names'], self.class_names)
if len(selected) != len(data_dict['gt_names']):
for key in ['gt_names', 'gt_boxes', 'gt_truncated', 'gt_occluded', 'gt_difficulty', 'gt_index', 'gt_boxes_no3daug']:
data_dict[key] = data_dict[key][selected]
gt_classes = np.array([self.class_names.index(
n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
data_dict['gt_boxes'] = np.concatenate(
(data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes_no3daug'] = np.concatenate(
(data_dict['gt_boxes_no3daug'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
# convert to 2d gt boxes
image_shape = data_dict['left_img'].shape[:2]
if 'gt_boxes' in data_dict:
gt_boxes_no3daug = data_dict['gt_boxes_no3daug']
gt_boxes_no3daug_cam = box_utils.boxes3d_lidar_to_kitti_camera(gt_boxes_no3daug, None, pseudo_lidar=True)
data_dict['gt_boxes_2d'] = box_utils.boxes3d_kitti_camera_to_imageboxes(
gt_boxes_no3daug_cam, data_dict['calib'], image_shape, fix_neg_z_bug=True)
data_dict['gt_centers_2d'] = box_utils.boxes3d_kitti_camera_to_imagecenters(
gt_boxes_no3daug_cam, data_dict['calib'], image_shape)
if self.point_feature_encoder:
data_dict = self.point_feature_encoder.forward(data_dict)
if self.data_processor:
data_dict = self.data_processor.forward(data_dict=data_dict)
# generate depth gt image
rect_points = Calibration.lidar_pseudo_to_rect(data_dict.get('points_no3daug', data_dict['points'])[:, :3])
data_dict['depth_gt_img'] = depth_map_utils.points_to_depth_map(rect_points, image_shape, data_dict['calib'])
if 'gt_boxes' in data_dict:
data_dict['depth_fgmask_img'] = roiaware_pool3d_utils.depth_map_in_boxes_cpu(
data_dict['depth_gt_img'], data_dict['gt_boxes'][:, :7], data_dict['calib'], expand_distance=0., expand_ratio=1.0)
data_dict.pop('points_no3daug', None)
data_dict.pop('did_3d_transformation', None)
data_dict.pop('road_plane', None)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['left_img', 'right_img', 'depth_gt_img', 'depth_fgmask_img']:
if key in ['depth_gt_img', 'depth_fgmask_img']:
val = [np.expand_dims(x, -1) for x in val]
max_h = np.max([x.shape[0] for x in val])
max_w = np.max([x.shape[1] for x in val])
pad_h = (max_h - 1) // 32 * 32 + 32 - max_h
pad_w = (max_w - 1) // 32 * 32 + 32 - max_w
assert pad_h < 32 and pad_w < 32
padded_imgs = []
for i, img in enumerate(val):
if key in ['left_img', 'right_img']:
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
img = (img.astype(np.float32) / 255 - mean) / std
img = np.pad(img, ((0, pad_h), (0, pad_w),
(0, 0)), mode='constant')
padded_imgs.append(img)
ret[key] = np.stack(
padded_imgs, axis=0).transpose(0, 3, 1, 2)
elif key in ['gt_boxes', 'gt_boxes_no3daug', 'gt_boxes_2d', 'gt_centers_2d', 'gt_boxes_2d_ignored', 'gt_boxes_camera']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros(
(batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
elif key in ['image_idx']: # gt_boxes_mask
ret[key] = val
elif key in ['gt_names', 'gt_truncated', 'gt_occluded', 'gt_difficulty', 'gt_index']:
ret[key] = [np.array(x) for x in val]
elif key in ['calib', 'calib_ori', 'use_lead_xyz']:
ret[key] = val
elif key in ['frame_id', 'image_shape', 'random_T']:
ret[key] = np.stack(val, axis=0)
else:
print(key)
raise NotImplementedError
ret['batch_size'] = batch_size
return ret
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_2d/map_to_bev/__init__.py
|
from .height_compression import HeightCompression
__all__ = {
'HeightCompression': HeightCompression,
}
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_2d/map_to_bev/height_compression.py
|
# Convert sparse or dense 3D tensors into BEV 2D tensors by dimension rearrangement
import torch.nn as nn
class HeightCompression(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
self.sparse_input = getattr(self.model_cfg, 'SPARSE_INPUT', True)
def forward(self, batch_dict):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
if self.sparse_input:
encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
spatial_features = encoded_spconv_tensor.dense()
batch_dict['volume_features'] = spatial_features
else:
spatial_features = batch_dict['volume_features']
N, C, D, H, W = spatial_features.shape
spatial_features = spatial_features.view(N, C * D, H, W)
batch_dict['spatial_features'] = spatial_features
if self.sparse_input:
batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride']
else:
batch_dict['spatial_features_stride'] = 1
return batch_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/utils/box_utils.py
|
<gh_stars>10-100
from liga.utils.calibration_kitti import Calibration
import numpy as np
import scipy
import torch
from scipy.spatial import Delaunay
from liga.ops.roiaware_pool3d import roiaware_pool3d_utils
from liga.ops.iou3d_nms.iou3d_nms_utils import boxes_iou_bev
from . import common_utils
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) & (corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def remove_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) == 0]
return points.numpy() if is_numpy else points
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib, pseudo_lidar=False, pseudo_cam2_view=False):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
xyz_camera = boxes3d_camera[:, 0:3]
l, h, w, r = boxes3d_camera[:, 3:4], boxes3d_camera[:, 4:5], boxes3d_camera[:, 5:6], boxes3d_camera[:, 6:7]
if not pseudo_lidar:
assert calib is not None, "calib can only be none when pseudo_lidar is True"
xyz_lidar = calib.rect_to_lidar(xyz_camera)
else:
if pseudo_cam2_view:
xyz_camera = xyz_camera + calib.txyz
xyz_lidar = Calibration.rect_to_lidar_pseudo(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_fliplr(boxes3d, cam_view=True):
if cam_view:
alpha = boxes3d[:, 6:7]
alpha = np.pi-alpha # ((alpha > 0).astype(np.float) + (alpha <= 0).astype(np.float) * -1) * np.pi - alpha
return np.concatenate([-boxes3d[:, 0:1], boxes3d[:, 1:6], alpha], axis=1)
else:
raise NotImplementedError
def boxes2d_fliplr(boxes2d, image_shape):
x1, y1, x2, y2 = boxes2d[:, 0], boxes2d[:, 1], boxes2d[:, 2], boxes2d[:, 3]
img_w = image_shape[1]
return np.stack([img_w - 1 - x1, y1, img_w - 1 - x2, y2], axis=1)
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :]
return large_boxes3d
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib=None, pseudo_lidar=False, pseduo_cam2_view=False):
"""
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
# TODO: will modify original boxes3d_lidar
xyz_lidar = boxes3d_lidar[:, 0:3].copy()
l, w, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
if not pseudo_lidar:
assert calib is not None, "calib can only be None in pseudo_lidar mode"
xyz_cam = calib.lidar_to_rect(xyz_lidar)
else:
# transform xyz from pseudo-lidar to camera view
xyz_cam = Calibration.lidar_pseudo_to_rect(xyz_lidar)
if pseduo_cam2_view:
xyz_cam = xyz_cam - calib.txyz
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dtype=np.float32).T
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dtype=np.float32).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def boxes3d_to_grid3d_kitti_camera(boxes3d, size=28, bottom_center=True, surface=False):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners, y_corners, z_corners = np.meshgrid(np.linspace(-0.5, 0.5, size), np.linspace(-0.5, 0.5, size), np.linspace(-0.5, 0.5, size))
if surface:
surface_mask = (np.abs(x_corners) == 0.5) | (np.abs(y_corners) == 0.5) | (np.abs(z_corners) == 0.5)
x_corners = x_corners[surface_mask]
y_corners = y_corners[surface_mask]
z_corners = z_corners[surface_mask]
x_corners = x_corners.reshape([1, -1]) * l.reshape([-1, 1])
y_corners = y_corners.reshape([1, -1]) * h.reshape([-1, 1])
z_corners = z_corners.reshape([1, -1]) * w.reshape([-1, 1])
if bottom_center:
y_corners -= h.reshape([-1, 1]) / 2
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.stack([x_corners, y_corners, z_corners], axis=-1) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners
y = y_loc.reshape(-1, 1) + y_corners
z = z_loc.reshape(-1, 1) + z_corners
corners = np.stack([x, y, z], axis=-1)
return corners.astype(np.float32)
def torch_boxes3d_to_corners3d_kitti_lidar(boxes3d):
"""
:param boxes3d: (N, ..., 7) [x, y, z, l, w, h, rz] in lidar coords
:param bottom_center: whether z is on the bottom center of object
:return: corners3d: (N, ..., 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
l, w, h = boxes3d[..., 3], boxes3d[..., 4], boxes3d[..., 5] # [...]
x_corners = torch.stack([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dim=-1) # [..., 8]
y_corners = torch.stack([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dim=-1) # [..., 8]
z_corners = torch.stack([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dim=-1) # [..., 8]
ry = boxes3d[..., 6]
zeros, ones = torch.zeros_like(ry), torch.ones_like(ry)
cosy, siny = torch.cos(ry), torch.sin(ry)
R_list = torch.stack([cosy, siny, zeros,
-siny, cosy, zeros,
zeros, zeros, ones], dim=-1)
R_list = R_list.view(*R_list.shape[:-1], 3, 3) # (..., 3, 3)
temp_corners = torch.stack([x_corners, y_corners, z_corners], dim=-1) # (..., 8, 3)
rotated_corners = torch.matmul(temp_corners, R_list) # (..., 8, 3)
loc = boxes3d[..., :3].unsqueeze(-2)
rotated_corners = rotated_corners + loc
return rotated_corners
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None, return_neg_z_mask=False, fix_neg_z_bug=False):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
if not fix_neg_z_bug:
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
if not return_neg_z_mask:
return boxes2d_image
else:
return boxes2d_image, np.all(corners3d[:, :, 2] >= 0.01, axis=1)
else:
num_boxes = boxes3d.shape[0]
corners3d = boxes3d_to_grid3d_kitti_camera(boxes3d, size=7, surface=False)
if num_boxes != 0:
num_points = corners3d.shape[1]
pts_img, pts_depth = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(num_boxes, num_points, 2)
depth_in_image = pts_depth.reshape(num_boxes, num_points)
min_uv = np.array([np.min(x[d > 0], axis=0) for x, d in zip(corners_in_image, depth_in_image)]) # (N, 2)
max_uv = np.array([np.max(x[d > 0], axis=0) for x, d in zip(corners_in_image, depth_in_image)]) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
else:
boxes2d_image = np.zeros([0, 4], dtype=np.float32)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
if not return_neg_z_mask:
return boxes2d_image
else:
return boxes2d_image, np.all(corners3d[:, :, 2] >= 0.01, axis=1)
def boxes3d_kitti_camera_inside_image_mask(boxes3d, calib, image_shape, reduce=True):
corners3d = boxes3d_to_grid3d_kitti_camera(boxes3d, size=28, surface=True)
num_points = corners3d.shape[1]
pts_img, pts_depth = calib.rect_to_img(corners3d.reshape(-1, 3))
pts_img = pts_img.reshape(-1, num_points, 2)
pts_u, pts_v = pts_img[..., 0], pts_img[..., 1]
pts_depth = pts_depth.reshape(-1, num_points)
valid_depth = pts_depth > 0
valid_in_image = (pts_u > 0) & (pts_v > 0) & (pts_u < image_shape[1]) & (pts_v < image_shape[0])
valid_mask = valid_depth & valid_in_image
if reduce:
return np.any(valid_mask, 1)
else:
return valid_mask
def boxes3d_kitti_camera_to_imagecenters(boxes3d, calib, image_shape=None):
centers3d = boxes3d_to_corners3d_kitti_camera(boxes3d).mean(1)
pts_img, _ = calib.rect_to_img(centers3d)
return pts_img
def boxes_iou_normal(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = torch.clamp_min(x_max - x_min, min=0)
y_len = torch.clamp_min(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / torch.clamp_min(area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs()
choose_dims = torch.where(rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]])
aligned_bev_boxes = torch.cat((boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
def boxes3d_direction_aligned_bev_iou(boxes_a, boxes_b, angle_threshold=np.pi / 4, angle_cycle=np.pi):
"""
This function is similar to boxes3d_nearest_bev_iou.
The directions of anchor boxes (boxes_a) are aligned using its nearest gt box,
When the angle difference is larger than angle_threshold,
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ious
"""
# find the bev centers for boxes a and b
center_bev_a = boxes_a[:, 0:2]
center_bev_b = boxes_b[:, 0:2]
# compute pairwise distance to compute the nearest gt indexes
center_dist = torch.norm(center_bev_a[:, None, :] - center_bev_b[None, :, :], dim=-1)
nearest_gt_ids = center_dist.argmin(1)
# compute pairwise angle difference and limit by period pi
angle_dist = boxes_a[:, 6][:, None] - boxes_b[:, 6][None, :]
angle_dist = common_utils.limit_period(angle_dist, offset=0.5, period=angle_cycle)
assert torch.all(angle_dist > -angle_cycle / 2 - 1e-4)
assert torch.all(angle_dist < angle_cycle / 2 + 1e-4)
angle_dist = angle_dist.abs()
# use the nearest gt indexes to align the anchor boxes,
# then compute the iou
aligned_boxes_a = torch.cat([boxes_a[:, :3], boxes_b[:, 3:7][nearest_gt_ids]], dim=1)
# aligned_boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(aligned_boxes_a)
# boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
iou = boxes_iou_bev(aligned_boxes_a, boxes_b)
# force >angle thresh to be zero
iou[angle_dist > angle_threshold] = 0.
return iou
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_3d_stereo/submodule.py
|
<reponame>Owen-Liuyuxuan/LIGA-Stereo
# modified from DSGN https://github.com/Jia-Research-Lab/DSGN
# sub-modules used in LIGA backbone.
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
def convbn(in_planes,
out_planes,
kernel_size,
stride,
pad,
dilation=1,
gn=False,
groups=32):
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False),
nn.BatchNorm2d(out_planes) if not gn else nn.GroupNorm(
groups, out_planes))
def convbn_3d(in_planes,
out_planes,
kernel_size,
stride,
pad,
gn=False,
groups=32):
return nn.Sequential(
nn.Conv3d(in_planes,
out_planes,
kernel_size=kernel_size,
padding=pad,
stride=stride,
bias=False),
nn.BatchNorm3d(out_planes) if not gn else nn.GroupNorm(
groups, out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride,
downsample,
pad,
dilation,
gn=False):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes, planes, 3, stride, pad, dilation, gn=gn),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation, gn=gn)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class disparityregression(nn.Module):
def __init__(self):
super(disparityregression, self).__init__()
def forward(self, x, depth):
assert len(x.shape) == 4
assert len(depth.shape) == 1
out = torch.sum(x * depth[None, :, None, None], 1)
return out
class hourglass(nn.Module):
def __init__(self, inplanes, gn=False):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(
convbn_3d(inplanes,
inplanes * 2,
kernel_size=3,
stride=2,
pad=1,
gn=gn), nn.ReLU(inplace=True))
self.conv2 = convbn_3d(inplanes * 2,
inplanes * 2,
kernel_size=3,
stride=1,
pad=1,
gn=gn)
self.conv3 = nn.Sequential(
convbn_3d(inplanes * 2,
inplanes * 2,
kernel_size=3,
stride=2,
pad=1,
gn=gn), nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn_3d(inplanes * 2,
inplanes * 2,
kernel_size=3,
stride=1,
pad=1,
gn=gn), nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False),
nn.BatchNorm3d(inplanes *
2) if not gn else nn.GroupNorm(32, inplanes *
2)) # +conv2
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False),
nn.BatchNorm3d(inplanes)
if not gn else nn.GroupNorm(32, inplanes)) # +x
def forward(self, x, presqu, postsqu):
out = self.conv1(x) # in:1/4 out:1/8
pre = self.conv2(out) # in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) # in:1/8 out:1/16
out = self.conv4(out) # in:1/16 out:1/16
if presqu is not None:
post = F.relu(self.conv5(out) + presqu,
inplace=True) # in:1/16 out:1/8
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post) # in:1/8 out:1/4
return out, pre, post
class hourglass2d(nn.Module):
def __init__(self, inplanes, gn=False):
super(hourglass2d, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes,
inplanes * 2,
kernel_size=3,
stride=2,
pad=1,
dilation=1,
gn=gn), nn.ReLU(inplace=True))
self.conv2 = convbn(inplanes * 2,
inplanes * 2,
kernel_size=3,
stride=1,
pad=1,
dilation=1,
gn=gn)
self.conv3 = nn.Sequential(
convbn(inplanes * 2,
inplanes * 2,
kernel_size=3,
stride=2,
pad=1,
dilation=1,
gn=gn), nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn(inplanes * 2,
inplanes * 2,
kernel_size=3,
stride=1,
pad=1,
dilation=1,
gn=gn), nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose2d(inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False),
nn.BatchNorm2d(inplanes *
2) if not gn else nn.GroupNorm(32, inplanes *
2)) # +conv2
self.conv6 = nn.Sequential(
nn.ConvTranspose2d(inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False),
nn.BatchNorm2d(inplanes)
if not gn else nn.GroupNorm(32, inplanes)) # +x
def forward(self, x, presqu, postsqu):
out = self.conv1(x) # in:1/4 out:1/8
pre = self.conv2(out) # in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) # in:1/8 out:1/16
out = self.conv4(out) # in:1/16 out:1/16
if presqu is not None:
post = F.relu(self.conv5(out) + presqu,
inplace=True) # in:1/16 out:1/8
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post) # in:1/8 out:1/4
return out, pre, post
class upconv_module(nn.Module):
def __init__(self, in_channels, up_channels):
super(upconv_module, self).__init__()
self.num_stage = len(in_channels) - 1
self.conv = nn.ModuleList()
self.redir = nn.ModuleList()
for stage_idx in range(self.num_stage):
self.conv.append(
convbn(in_channels[0] if stage_idx == 0 else up_channels[stage_idx - 1], up_channels[stage_idx], 3, 1, 1, 1)
)
self.redir.append(
convbn(in_channels[stage_idx + 1], up_channels[stage_idx], 3, 1, 1, 1)
)
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, feats):
x = feats[0]
for stage_idx in range(self.num_stage):
x = self.conv[stage_idx](x)
redir = self.redir[stage_idx](feats[stage_idx + 1])
x = F.relu(self.up(x) + redir)
return x
class feature_extraction_neck(nn.Module):
def __init__(self, cfg):
super(feature_extraction_neck, self).__init__()
self.cfg = cfg
self.in_dims = cfg.in_dims
self.with_upconv = cfg.with_upconv
self.start_level = cfg.start_level
self.cat_img_feature = cfg.cat_img_feature
self.sem_dim = cfg.sem_dim
self.stereo_dim = cfg.stereo_dim
self.spp_dim = getattr(cfg, 'spp_dim', 32)
self.spp_branches = nn.ModuleList([
nn.Sequential(
nn.AvgPool2d(s, stride=s),
convbn(self.in_dims[-1],
self.spp_dim,
1, 1, 0,
gn=cfg.GN,
groups=min(32, self.spp_dim)),
nn.ReLU(inplace=True))
for s in [(64, 64), (32, 32), (16, 16), (8, 8)]])
concat_dim = self.spp_dim * len(self.spp_branches) + sum(self.in_dims[self.start_level:])
if self.with_upconv:
assert self.start_level == 2
self.upconv_module = upconv_module([concat_dim, self.in_dims[1], self.in_dims[0]], [64, 32])
stereo_dim = 32
else:
stereo_dim = concat_dim
assert self.start_level >= 1
self.lastconv = nn.Sequential(
convbn(stereo_dim, self.stereo_dim[0], 3, 1, 1, gn=cfg.GN),
nn.ReLU(inplace=True),
nn.Conv2d(self.stereo_dim[0], self.stereo_dim[1],
kernel_size=1,
padding=0,
stride=1,
bias=False))
if self.cat_img_feature:
self.rpnconv = nn.Sequential(
convbn(concat_dim, self.sem_dim[0], 3, 1, 1, 1, gn=cfg.GN),
nn.ReLU(inplace=True),
convbn(self.sem_dim[0], self.sem_dim[1], 3, 1, 1, gn=cfg.GN),
nn.ReLU(inplace=True)
)
def forward(self, feats):
feat_shape = tuple(feats[self.start_level].shape[2:])
assert len(feats) == len(self.in_dims)
spp_branches = []
for branch_module in self.spp_branches:
x = branch_module(feats[-1])
x = F.interpolate(
x, feat_shape,
mode='bilinear',
align_corners=True)
spp_branches.append(x)
concat_feature = torch.cat((*feats[self.start_level:], *spp_branches), 1)
stereo_feature = concat_feature
if self.with_upconv:
stereo_feature = self.upconv_module([stereo_feature, feats[1], feats[0]])
stereo_feature = self.lastconv(stereo_feature)
if self.cat_img_feature:
sem_feature = self.rpnconv(concat_feature)
else:
sem_feature = None
return stereo_feature, sem_feature
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_3d_stereo/__init__.py
|
<reponame>Owen-Liuyuxuan/LIGA-Stereo
from .liga_backbone import LigaBackbone
__all__ = {
'LigaBackbone': LigaBackbone
}
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/utils/loss_utils.py
|
import sys
import numpy as np
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from liga.ops.iou3d_nms.iou3d_nms_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
assert torch.all((target == 1) | (target == 0)), 'labels should be 0 or 1 in focal loss.'
assert input.shape == target.shape
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
else:
self.code_weights = None
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights # .view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape == loss.shape[:-1]
weights = weights.unsqueeze(-1)
assert len(loss.shape) == len(weights.shape)
loss = loss * weights
return loss
class WeightedL2WithSigmaLoss(nn.Module):
def __init__(self, code_weights: list = None):
super(WeightedL2WithSigmaLoss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
else:
self.code_weights = None
@staticmethod
def l2_loss(diff, sigma=None):
if sigma is None:
loss = 0.5 * diff ** 2
else:
loss = 0.5 * (diff / torch.exp(sigma)) ** 2 + math.log(math.sqrt(6.28)) + sigma
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None, sigma: torch.Tensor = None):
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights # .view(1, 1, -1)
loss = self.l2_loss(diff, sigma=sigma)
# anchor-wise weighting
if weights is not None:
assert weights.shape == loss.shape[:-1]
weights = weights.unsqueeze(-1)
assert len(loss.shape) == len(weights.shape)
loss = loss * weights
return loss
class IOU3dLoss(nn.Module):
def __init__(self):
super(IOU3dLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target)
else:
loss = (input - target).sum(1) * 0.
# anchor-wise weighting
if weights is not None:
assert weights.shape == loss.shape
loss = loss * weights
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
class WeightedBinaryCrossEntropyLoss(nn.Module):
def __init__(self):
super(WeightedBinaryCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, ...) float tensor.
Predited logits for each class.
target: (B, ...) float tensor.
One-hot classification targets.
weights: (B, ...) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
assert input.shape == target.shape
assert input.shape == weights.shape
loss = F.binary_cross_entropy_with_logits(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class InnerProductLoss(nn.Module):
def __init__(self, code_weights: list = None):
super(InnerProductLoss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
else:
self.code_weights = None
@staticmethod
def ip_loss(product):
return 1 - product.mean(dim=-1, keepdim=True)
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
product = input * target
# code-wise weighting
if self.code_weights is not None:
product = product * self.code_weights # .view(1, 1, -1)
loss = self.ip_loss(product)
# anchor-wise weighting
if weights is not None:
assert weights.shape == loss.shape[:-1]
weights = weights.unsqueeze(-1)
assert len(loss.shape) == len(weights.shape)
loss = loss * weights
return loss
class MergeLoss(nn.Module):
def __init__(self, splits, multi_losses_cfg, code_weights):
super(MergeLoss, self).__init__()
self.multiple_losses = nn.ModuleList()
self.splits = splits
code_weights = np.array(code_weights)
code_weights = np.split(code_weights, np.cumsum(splits)[:-1], 0)
assert isinstance(code_weights, list)
assert len(code_weights) == len(multi_losses_cfg)
for cw, losses_cfg in zip(code_weights, multi_losses_cfg):
reg_loss_name = 'WeightedSmoothL1Loss' if losses_cfg.get('REG_LOSS_TYPE', None) is None \
else losses_cfg.REG_LOSS_TYPE
self.multiple_losses.append(
getattr(sys.modules[__name__], reg_loss_name)(code_weights=cw)
)
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
inputs = torch.split(input, self.splits, -1)
targets = torch.split(target, self.splits, -1)
losses = []
for input, target, reg_loss in zip(inputs, targets, self.multiple_losses):
losses.append(reg_loss(input, target, weights))
return losses
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/utils/calibration_kitti.py
|
import torch
import numpy as np
def get_calib_from_file(calib_file):
with open(calib_file) as f:
lines = f.readlines()
calib_data = {}
for key in ['P2', 'P3', 'R0_rect', 'Tr_velo_to_cam']:
for line in lines:
line = line.strip()
splits = [x for x in line.split(' ') if len(x.strip()) > 0]
if splits[0][:-1] == key:
obj = splits[1:]
calib_data[key] = np.array(obj, dtype=np.float32)
break
return {'P2': calib_data['P2'].reshape(3, 4),
'P3': calib_data['P3'].reshape(3, 4),
'R0': calib_data['R0_rect'].reshape(3, 3),
'Tr_velo2cam': calib_data['Tr_velo_to_cam'].reshape(3, 4)}
class Calibration(object):
def __init__(self, calib_file):
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.P3 = calib['P3'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
self.flipped = False
self.offsets = [0, 0]
@property
def cu(self):
return self.P2[0, 2]
@property
def cv(self):
return self.P2[1, 2]
@property
def fu(self):
return self.P2[0, 0]
@property
def fv(self):
return self.P2[1, 1]
@property
def tx(self):
return self.P2[0, 3] / (-self.fu)
@property
def ty(self):
return self.P2[1, 3] / (-self.fv)
@property
def txyz(self):
return np.matmul(np.linalg.inv(self.P2[:3, :3]), self.P2[:3, 3:4]).squeeze(-1)
@property
def K(self):
return self.P2[:3, :3]
@property
def K3x4(self):
return np.concatenate([self.P2[:3, :3], np.zeros_like(self.P2[:3, 3:4])], axis=1)
@property
def inv_K(self):
return np.linalg.inv(self.K)
def global_scale(self, scale_factor):
self.P2[:, 3:4] *= scale_factor
self.P3[:, 3:4] *= scale_factor
def scale(self, scale_factor):
self.P2[:2, :] *= scale_factor
self.P3[:2, :] *= scale_factor
def offset(self, offset_x, offset_y):
K = self.K.copy()
inv_K = self.inv_K
T2 = np.matmul(inv_K, self.P2)
T3 = np.matmul(inv_K, self.P3)
K[0, 2] -= offset_x
K[1, 2] -= offset_y
self.P2 = np.matmul(K, T2)
self.P3 = np.matmul(K, T3)
self.offsets[0] += offset_x
self.offsets[1] += offset_y
def fliplr(self, image_width):
# mirror using y-z plane of cam 0
assert not self.flipped
K = self.P2[:3, :3].copy()
inv_K = np.linalg.inv(K)
T2 = np.matmul(inv_K, self.P2)
T3 = np.matmul(inv_K, self.P3)
T2[0, 3] *= -1
T3[0, 3] *= -1
K[0, 2] = image_width - 1 - K[0, 2]
self.P3 = np.matmul(K, T2)
self.P2 = np.matmul(K, T3)
# delete useless parameters to avoid bugs
del self.R0, self.V2C
self.flipped = not self.flipped
@property
def fu_mul_baseline(self):
return np.abs(self.P2[0, 3] - self.P3[0, 3])
@staticmethod
def cart_to_hom(pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack(
(pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
if self.flipped:
raise NotImplementedError
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack(
(self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack(
(R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack(
(self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(
np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
@staticmethod
def rect_to_lidar_pseudo(pts_rect):
pts_rect_hom = Calibration.cart_to_hom(pts_rect)
T = np.array([[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 0, 1]], dtype=np.float32)
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
if self.flipped:
raise NotImplementedError
pts_lidar_hom = self.cart_to_hom(pts_lidar)
# pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
pts_rect = np.dot(pts_lidar_hom, self.V2C.T)
pts_rect = np.dot(pts_rect, self.R0.T)
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
@staticmethod
def lidar_pseudo_to_rect(pts_lidar):
pts_lidar_hom = Calibration.cart_to_hom(pts_lidar)
T = np.array([[0, 0, 1],
[-1, 0, 0],
[0, -1, 0],
[0, 0, 0]], dtype=np.float32)
pts_rect = np.dot(pts_lidar_hom, T)
return pts_rect
def torch_lidar_pseudo_to_rect(self, pts_lidar):
pts_lidar_hom = torch.cat([pts_lidar, torch.ones_like(pts_lidar[..., -1:])], dim=-1)
T = np.array([[0, 0, 1],
[-1, 0, 0],
[0, -1, 0],
[0, 0, 0]], dtype=np.float32)
T = torch.from_numpy(T).cuda()
pts_rect = torch.matmul(pts_lidar_hom, T)
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - \
self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def torch_rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = torch.cat([pts_rect, torch.ones_like(pts_rect[..., -1:])], dim=-1)
pts_2d_hom = torch.matmul(pts_rect_hom, torch.from_numpy(self.P2.T).cuda())
pts_img = pts_2d_hom[..., 0:2] / pts_rect_hom[..., 2:3]
return pts_img
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
if self.flipped:
raise NotImplementedError
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate(
(x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate(
(corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :,
2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate(
(x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate(
(x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
def unproject_depth_map_to_3d(depth_map, calib, image=None):
cu, cv = calib.cu, calib.cv
fu, fv = calib.fu, calib.fv
u, v = np.meshgrid(np.arange(depth_map.shape[1]), np.arange(depth_map.shape[0]))
mask = depth_map > 0.1
z = depth_map[mask]
v = v[mask]
u = u[mask]
x = (u - cu) * z / fu
y = (v - cv) * z / fv
xyz = np.stack([x, y, z], -1)
if image is not None:
color = image[mask]
return xyz, color, mask
else:
return xyz, mask
def project_points_with_mask_back_to_image(values, mask):
u, v = np.meshgrid(np.arange(mask.shape[1]), np.arange(mask.shape[0]))
v = v[mask]
u = u[mask]
output = np.zeros([mask.shape[0], mask.shape[1], *values.shape[1:]], dtype=np.float32)
output[v, u] = values
return output
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/detectors_stereo/liga.py
|
<reponame>Owen-Liuyuxuan/LIGA-Stereo
from liga.ops.iou3d_nms import iou3d_nms_utils
from .stereo_detector3d_template import StereoDetector3DTemplate
class LIGA(StereoDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss(batch_dict)
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, ret_dicts = self.post_processing(batch_dict)
for k in batch_dict.keys():
if k.startswith('depth_error_'):
if isinstance(batch_dict[k], list):
ret_dicts[k] = batch_dict[k]
elif len(batch_dict[k].shape) == 0:
ret_dicts[k] = batch_dict[k].item()
if getattr(self, 'dense_head_2d', None) and 'boxes_2d_pred' in batch_dict:
assert len(pred_dicts) == len(batch_dict['boxes_2d_pred'])
for pred_dict, pred_2d_dict in zip(pred_dicts, batch_dict['boxes_2d_pred']):
pred_dict['pred_boxes_2d'] = pred_2d_dict['pred_boxes_2d']
pred_dict['pred_scores_2d'] = pred_2d_dict['pred_scores_2d']
pred_dict['pred_labels_2d'] = pred_2d_dict['pred_labels_2d']
pred_dicts[0]['batch_dict'] = batch_dict
return pred_dicts, ret_dicts
def get_training_loss(self, batch_dict):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_depth, tb_dict = self.depth_loss_head.get_loss(batch_dict, tb_dict)
tb_dict = {
'loss_rpn': loss_rpn.item(),
'loss_depth': loss_depth.item(),
**tb_dict
}
loss = loss_rpn + loss_depth
if getattr(self, 'dense_head_2d', None):
loss_rpn_2d, tb_dict = self.dense_head_2d.get_loss(batch_dict, tb_dict)
tb_dict['loss_rpn2d'] = loss_rpn_2d.item()
loss += loss_rpn_2d
return loss, tb_dict, disp_dict
def get_iou_map(self, batch_dict):
batch_size = batch_dict['batch_size']
iou_map_results = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask] # [N_anchors, 7]
gt_boxes = batch_dict['gt_boxes'][index]
if gt_boxes.shape[0] <= 0:
iou_map_results.append(None)
else:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(
box_preds[:, 0:7], gt_boxes[:, 0:7])
iou_map_results.append(iou3d_roi.detach().cpu().numpy())
return iou_map_results
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_3d_lidar/__init__.py
|
from .spconv_backbone import VoxelBackBone8x, VoxelBackBone4x, VoxelResBackBone8x, VoxelBackBone4xNoFinalBnReLU
__all__ = {
'VoxelBackBone8x': VoxelBackBone8x,
'VoxelBackBone4x': VoxelBackBone4x,
'VoxelResBackBone8x': VoxelResBackBone8x,
'VoxelBackBone4xNoFinalBnReLU': VoxelBackBone4xNoFinalBnReLU
}
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_2d/hg_bev_backbone.py
|
<gh_stars>10-100
# Hourglass BEV backbone (same as DSGN. https://arxiv.org/abs/2001.03398)
import torch.nn as nn
from liga.models.backbones_3d_stereo.submodule import convbn, hourglass2d
class HgBEVBackbone(nn.Module):
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
self.num_channels = model_cfg.num_channels
self.GN = model_cfg.GN
self.rpn3d_conv2 = nn.Sequential(
convbn(input_channels, self.num_channels, 3, 1, 1, 1, gn=self.GN),
nn.ReLU(inplace=True))
self.rpn3d_conv3 = hourglass2d(self.num_channels, gn=self.GN)
self.num_bev_features = self.num_channels
def forward(self, data_dict):
spatial_features = data_dict['spatial_features']
x = self.rpn3d_conv2(spatial_features)
data_dict['spatial_features_2d_prehg'] = x
x = self.rpn3d_conv3(x, None, None)[0]
data_dict['spatial_features_2d'] = x
return data_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/detectors_stereo/__init__.py
|
from .liga import LIGA
__all__ = {
'stereo_LIGA': LIGA,
}
def build_detector(model_cfg, num_class, dataset):
model = __all__[model_cfg.NAME](
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/__init__.py
|
<filename>liga/models/__init__.py
from collections import namedtuple
import numpy as np
import torch
from .detectors_lidar import build_detector as build_lidar_detector
from .detectors_stereo import build_detector as build_stereo_detector
def build_network(model_cfg, num_class, dataset):
if model_cfg['NAME'].startswith('stereo'):
model = build_stereo_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
else:
model = build_lidar_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
def load_data_to_gpu(batch_dict):
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
if key in ['frame_id', 'metadata', 'calib', 'calib_ori', 'image_shape', 'gt_names']:
continue
if val.dtype in [np.float32, np.float64]:
batch_dict[key] = torch.from_numpy(val).float().cuda()
elif val.dtype in [np.uint8, np.int32, np.int64]:
batch_dict[key] = torch.from_numpy(val).long().cuda()
elif val.dtype in [bool]:
pass
else:
raise ValueError(f"invalid data type {key}: {type(val)}")
def model_fn_decorator():
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict):
load_data_to_gpu(batch_dict)
ret_dict, tb_dict, disp_dict = model(batch_dict)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_step'):
model.update_global_step()
else:
model.module.update_global_step()
return ModelReturn(loss, tb_dict, disp_dict)
return model_func
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/detectors_stereo/stereo_detector3d_template.py
|
import os
import torch
import torch.nn as nn
import torch.distributed as dist
from liga.utils import nms_utils
from liga.utils.common_utils import create_logger
from liga.ops.iou3d_nms import iou3d_nms_utils
from liga.models import backbones_2d, backbones_3d_stereo, dense_heads
from liga.models.backbones_2d import map_to_bev
from liga.models.dense_heads.depth_loss_head import DepthLossHead
class StereoDetector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'lidar_model',
'backbone_3d', 'map_to_bev_module',
'backbone_2d', 'dense_head_2d', 'dense_head', 'depth_loss_head'
]
def train(self, mode=True):
self.training = mode
for module in self.children():
if module in self.model_info_dict['fixed_module_list']:
module.eval()
else:
module.train(mode)
return self
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'fixed_module_list': [],
'grid_size': getattr(self.dataset, 'stereo_grid_size', self.dataset.grid_size),
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': getattr(self.dataset, 'stereo_voxel_size', self.dataset.voxel_size),
'boxes_gt_in_cam2_view': self.dataset.boxes_gt_in_cam2_view
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
self.model_info_dict = model_info_dict
return model_info_dict['module_list']
def build_lidar_model(self, model_info_dict):
from ..detectors_lidar import build_detector as build_lidar_detector
if self.model_cfg.get('LIDAR_MODEL', None) is None:
return None, model_info_dict
lidar_model = build_lidar_detector(self.model_cfg.LIDAR_MODEL, self.num_class, self.dataset)
for param in lidar_model.parameters():
param.requires_grad_(False)
model_info_dict['module_list'].append(lidar_model)
model_info_dict['fixed_module_list'].append(lidar_model)
logger = create_logger(rank=dist.get_rank() if dist.is_initialized() else 0)
if self.model_cfg.LIDAR_MODEL.PRETRAINED_MODEL:
lidar_model.load_params_from_file(
filename=self.model_cfg.LIDAR_MODEL.PRETRAINED_MODEL, to_cpu=True, logger=logger)
return lidar_model, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d_stereo.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
boxes_gt_in_cam2_view=model_info_dict['boxes_gt_in_cam2_view'])
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_3d_features'] = backbone_3d_module.num_3d_features
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_dense_head_2d(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_2D', None) is None:
return None, model_info_dict
if self.model_cfg.DENSE_HEAD_2D.NAME == 'MMDet2DHead':
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD_2D.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_2D
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
else:
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD_2D.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_2D,
input_channels=32,
num_class=self.num_class,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False) or self.model_cfg.DENSE_HEAD.get('predict_boxes_when_training', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_depth_loss_head(self, model_info_dict):
if self.model_cfg.get('DEPTH_LOSS_HEAD', None) is None:
return None, model_info_dict
depth_loss_head = DepthLossHead(
model_cfg=self.model_cfg.DEPTH_LOSS_HEAD,
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(depth_loss_head)
return depth_loss_head, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (
N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask]
for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = None
pred_scores, pred_labels, pred_boxes = nms_utils.multi_classes_nms(
cls_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH,
label_preds=label_preds,
)
final_scores = pred_scores
final_labels = pred_labels + 1
final_boxes = pred_boxes
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
}
recall_dict, iou_results, ioubev_results = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
if iou_results is not None:
record_dict['iou_results'] = iou_results
record_dict['ioubev_results'] = ioubev_results
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict, None, None
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(
box_preds[:, 0:7], cur_gt[:, 0:7])
ioubev_rcnn = iou3d_nms_utils.boxes_iou_bev(
box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
ioubev_rcnn = torch.zeros((0, cur_gt.shape[00]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(
rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[
0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[
0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
# per box iou
if iou3d_rcnn.shape[0] == 0:
iou_results = [0.] * cur_gt.shape[0]
ioubev_results = [0.] * cur_gt.shape[0]
else:
iou_results = iou3d_rcnn.max(0).values.cpu().numpy().tolist()
ioubev_results = ioubev_rcnn.max(0).values.cpu().numpy().tolist()
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
iou_results = []
ioubev_results = []
return recall_dict, iou_results, ioubev_results
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' %
(filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' %
checkpoint['version'])
update_model_state = {}
for key, val in model_state_disk.items():
if key in self.state_dict() and self.state_dict()[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s <- %s' %
(key, str(state_dict[key].shape), str(model_state_disk[key].shape) if key in model_state_disk else 'None'))
logger.info('==> Done (loaded %d/%d)' %
(len(update_model_state), len(self.state_dict())))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' %
(filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(
optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(
optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' %
checkpoint['version'])
logger.info('==> Done')
return it, epoch
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/utils/box_coder_utils.py
|
import numpy as np
import torch
from liga.utils import box_utils, common_utils
class ResidualCoder(object):
def __init__(self, code_size=7, encode_angle_by_sincos=False, div_by_diagonal=True, use_corners=False, use_tanh=False, tanh_range=3.14, **kwargs):
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
self.div_by_diagonal = div_by_diagonal
self.use_corners = use_corners
self.use_tanh = use_tanh
self.tanh_range = tanh_range
if self.encode_angle_by_sincos:
self.code_size += 1
if self.use_corners:
assert not encode_angle_by_sincos, "encode_angle_by_sincos should not be enabled when using corners"
def encode_torch(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
if self.use_corners:
return boxes
anchors[..., 3:6] = torch.clamp_min(anchors[..., 3:6], min=1e-5)
boxes[..., 3:6] = torch.clamp_min(boxes[..., 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)
if self.div_by_diagonal:
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = torch.cos(rg) - torch.cos(ra)
rt_sin = torch.sin(rg) - torch.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [common_utils.limit_period(rg - ra, offset=0.5, period=np.pi * 2)]
# assert torch.all(rts[0] > -np.pi / 4 - 1e-4)
# assert torch.all(rts[0] < np.pi / 4 + 1e-4)
# print(rts[0] / 3.1415926 * 180)
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)
def process_before_loss(self, anchors, pred, targets, dim=6):
assert dim != -1
if not self.use_corners:
if self.use_tanh:
pred[..., -1] = torch.tanh(pred[..., -1]) * (self.tanh_range / 2)
boxes1, boxes2 = pred, targets
rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * torch.cos(boxes2[..., dim:dim + 1])
rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * torch.sin(boxes2[..., dim:dim + 1])
boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]], dim=-1)
boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]], dim=-1)
return boxes1, boxes2
else:
pred = self.decode_torch(pred, anchors)
pred_corners = box_utils.torch_boxes3d_to_corners3d_kitti_lidar(pred)
target_corners = box_utils.torch_boxes3d_to_corners3d_kitti_lidar(targets)
pred_corners = pred_corners.view(*pred_corners.shape[:-2], 24)
target_corners = target_corners.view(*target_corners.shape[:-2], 24)
return pred_corners, target_corners
def decode_torch(self, box_encodings, anchors, decode_translation=True):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
if self.use_tanh:
box_encodings[..., -1] = torch.tanh(box_encodings[..., -1]) * (self.tanh_range / 2)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
if not decode_translation:
xa = 0
ya = 0
za = 0
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
if self.div_by_diagonal:
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + torch.cos(ra)
rg_sin = sint + torch.sin(ra)
rg = torch.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_3d_lidar/vfe/__init__.py
|
from .mean_vfe import MeanVFE
from .vfe_template import VFETemplate
__all__ = {
'VFETemplate': VFETemplate,
'MeanVFE': MeanVFE,
}
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/dense_heads/depth_loss_head.py
|
<reponame>Owen-Liuyuxuan/LIGA-Stereo
# Depth Loss Head for stereo matching supervision.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
IS_PRINT = (not dist.is_initialized()) or (dist.get_rank() == 0)
class DepthLossHead(nn.Module):
def __init__(self, model_cfg, point_cloud_range):
super().__init__()
self.model_cfg = model_cfg
self.depth_loss_type = model_cfg.LOSS_TYPE
self.loss_weights = model_cfg.WEIGHTS
self.point_cloud_range = point_cloud_range
self.min_depth = point_cloud_range[0]
self.max_depth = point_cloud_range[3]
self.forward_ret_dict = {}
def get_loss(self, batch_dict, tb_dict=None):
if tb_dict is None:
tb_dict = {}
depth_preds = batch_dict['depth_preds']
depth_volumes = batch_dict['depth_volumes']
depth_sample = batch_dict['depth_samples']
# depth_fgmask_imgs = batch_dict['depth_fgmask_img'].squeeze(1)
# gt_boxes_2d = batch_dict['gt_boxes_2d']
gt = batch_dict['depth_gt_img'].squeeze(1)
depth_loss = 0.
assert len(depth_preds) == len(depth_volumes)
assert len(depth_preds) == len(self.loss_weights)
mask = (gt > self.min_depth) & (gt < self.max_depth)
gt = gt[mask]
depth_interval = depth_sample[1] - depth_sample[0]
assert len(depth_preds) == len(depth_volumes)
assert len(depth_volumes) == len(self.loss_weights)
for i, (depth_pred, depth_cost, pred_weight) in enumerate(zip(depth_preds, depth_volumes, self.loss_weights)):
depth_pred = depth_pred[mask]
depth_cost = depth_cost.permute(0, 2, 3, 1)[mask]
for loss_type, loss_type_weight in self.depth_loss_type.items():
if depth_pred.shape[0] == 0:
print('no gt warning')
loss = depth_preds[i].mean() * 0.0
else:
if loss_type == "l1":
loss = F.smooth_l1_loss(depth_pred, gt, reduction='none')
loss = loss.mean()
elif loss_type == "purel1":
loss = F.l1_loss(depth_pred, gt, reduction='none')
loss = loss.mean()
elif loss_type == "ce":
depth_log_prob = F.log_softmax(depth_cost, dim=1)
distance = torch.abs(
depth_sample.cuda() - gt.unsqueeze(-1)) / depth_interval
probability = 1 - distance.clamp(max=1.0)
loss = -(probability * depth_log_prob).sum(-1)
loss = loss.mean()
elif loss_type.startswith("gaussian"):
depth_log_prob = F.log_softmax(depth_cost, dim=1)
distance = torch.abs(
depth_sample.cuda() - gt.unsqueeze(-1))
sigma = float(loss_type.split("_")[1])
if IS_PRINT:
print("depth loss using gaussian normalized", sigma)
probability = torch.exp(-0.5 * (distance ** 2) / (sigma ** 2))
probability /= torch.clamp(probability.sum(1, keepdim=True), min=1.0)
loss = -(probability * depth_log_prob).sum(-1)
loss = loss.mean()
elif loss_type.startswith("laplacian"):
depth_log_prob = F.log_softmax(depth_cost, dim=1)
distance = torch.abs(
depth_sample.cuda() - gt.unsqueeze(-1))
sigma = float(loss_type.split("_")[1])
if IS_PRINT:
print("depth loss using laplacian normalized", sigma)
probability = torch.exp(-distance / sigma)
probability /= torch.clamp(probability.sum(1, keepdim=True), min=1.0)
loss = -(probability * depth_log_prob).sum(-1)
loss = loss.mean()
elif loss_type == "hard_ce":
depth_log_prob = F.log_softmax(depth_cost, dim=1)
distance = torch.abs(
depth_sample.cuda() - gt.unsqueeze(-1)) / depth_interval
probability = 1 - distance.clamp(max=1.0)
probability[probability >= 0.5] = 1.0
probability[probability < 0.5] = .0
loss = -(probability * depth_log_prob).sum(-1)
loss = loss.mean()
else:
raise NotImplementedError
tb_dict['loss_depth_{}_{}'.format(i, loss_type)] = loss.item()
depth_loss += pred_weight * loss_type_weight * loss
return depth_loss, tb_dict
def forward(self, batch_dict):
if batch_dict['depth_preds'][-1].shape[0] != 1:
raise NotImplementedError
if not self.training:
# depth_pred = batch_dict['depth_preds'][-1]
depth_pred_local = batch_dict['depth_preds_local'][-1]
# depth_cost = batch_dict['depth_volumes'][0].permute(0, 2, 3, 1)
# depth_sample = batch_dict['depth_samples']
gt = batch_dict['depth_gt_img'].squeeze(1)
mask = (gt > self.min_depth) & (gt < self.max_depth)
# depth_interval = depth_sample[1] - depth_sample[0]
assert mask.sum() > 0
# abs error
error_map = torch.abs(depth_pred_local - gt) * mask.float()
batch_dict['depth_error_map'] = error_map
# mean_error = error_map[mask].mean()
median_error = error_map[mask].median()
# batch_dict['depth_error_local_mean'] = mean_error
batch_dict['depth_error_all_local_median'] = median_error
for thresh in [0.2, 0.4, 0.8, 1.6]:
batch_dict[f"depth_error_all_local_{thresh:.1f}m"] = (error_map[mask] > thresh).float().mean()
if 'depth_fgmask_img' in batch_dict:
fg_mask = (gt > self.min_depth) & (gt < self.max_depth) & (batch_dict['depth_fgmask_img'].squeeze(1) > 0)
local_errs = torch.abs(depth_pred_local - gt)
fg_local_errs = local_errs[fg_mask]
# fg local depth errors per instance
fg_gts = gt[fg_mask]
batch_dict['depth_error_fg_local_statistics_perbox'] = []
fg_ids = batch_dict['depth_fgmask_img'].squeeze(1)[fg_mask].int() - 1
if len(fg_ids) > 0:
for idx in range(fg_ids.min().item(), fg_ids.max().item() + 1):
if batch_dict['gt_index'][0][idx] < 0:
continue
if torch.sum(fg_ids == idx) <= 5:
continue
errs_i = fg_local_errs[fg_ids == idx]
fg_gt_i_median = fg_gts[fg_ids == idx].median().item()
num_points_i = (fg_ids == idx).sum().item()
batch_dict['depth_error_fg_local_statistics_perbox'].append(dict(
distance=fg_gt_i_median,
err_median=errs_i.median().item(),
num_points=num_points_i,
name=batch_dict['gt_names'][0][idx],
truncated=batch_dict['gt_truncated'][0][idx],
occluded=batch_dict['gt_occluded'][0][idx],
difficulty=batch_dict['gt_difficulty'][0][idx],
index=batch_dict['gt_index'][0][idx],
idx=idx,
image_idx=batch_dict['image_idx'][0]
))
for thresh in [0.2, 0.4, 0.8, 1.6]:
batch_dict['depth_error_fg_local_statistics_perbox'][-1][f"err_{thresh:.1f}m"] = (errs_i > thresh).float().mean().item()
return batch_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/backbones_2d/__init__.py
|
<reponame>Owen-Liuyuxuan/LIGA-Stereo<gh_stars>10-100
from .base_bev_backbone import BaseBEVBackbone
from .hg_bev_backbone import HgBEVBackbone
__all__ = {
'BaseBEVBackbone': BaseBEVBackbone,
'HgBEVBackbone': HgBEVBackbone
}
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/datasets/kitti/stereo_kitti_dataset.py
|
<filename>liga/datasets/kitti/stereo_kitti_dataset.py
# Stereo KITTI Pytorch Dataset (for training Our LIGA model)
import copy
import pickle
import numpy as np
import torch
from skimage import io
from liga.utils import box_utils, calibration_kitti, common_utils, object3d_kitti, depth_map_utils
from liga.datasets.stereo_dataset_template import StereoDatasetTemplate
class StereoKittiDataset(StereoDatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.flip = self.dataset_cfg.FLIP
self.force_flip = getattr(self.dataset_cfg, 'FORCE_FLIP', False)
self.boxes_gt_in_cam2_view = getattr(self.dataset_cfg, 'BOXES_GT_IN_CAM2_VIEW', False)
self.use_van = self.dataset_cfg.USE_VAN and training
self.use_person_sitting = self.dataset_cfg.USE_PERSON_SITTING and training
self.cat_reflect = self.dataset_cfg.CAT_REFLECT_DIM
logger.info('boxes_gt_in_cam2_view %s' % self.boxes_gt_in_cam2_view)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.root_split_path = self.root_path / \
('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(
split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
assert len(self.sample_id_list) == len(self.kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' %
(len(kitti_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / \
('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(
split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists(), f"{lidar_file} not found"
return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
def get_image_shape(self, idx):
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_image(self, idx, image_id=2):
img_file = self.root_split_path / \
('image_%s' % image_id) / ('%s.png' % idx)
assert img_file.exists()
return io.imread(img_file).copy()
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_calib(self, idx):
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
return calibration_kitti.Calibration(calib_file)
def get_road_plane(self, idx):
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
"""
Args:
pts_rect:
img_shape:
calib:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(
pts_img[:, 0] > 0, pts_img[:, 0] < img_shape[1] - 1)
val_flag_2 = np.logical_and(
pts_img[:, 1] > 0, pts_img[:, 1] < img_shape[0] - 1)
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None, mode_2d=False):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib_aug = batch_dict['calib'][batch_index]
calib_ori = batch_dict['calib_ori'][batch_index] if 'calib_ori' in batch_dict else calib_aug
image_shape = batch_dict['image_shape'][batch_index]
# NOTE: in stereo mode, the 3d boxes are predicted in pseudo lidar coordinates
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(
pred_boxes, None, pseudo_lidar=True, pseduo_cam2_view=self.boxes_gt_in_cam2_view)
# only for debug, calib.flipped should be False when testing
if calib_aug.flipped:
pred_boxes_camera = box_utils.boxes3d_fliplr(pred_boxes_camera, cam_view=True)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib_ori, image_shape=image_shape,
fix_neg_z_bug=True
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
# pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
def generate_single_2d_sample_dict(batch_index, box_dict):
def to_numpy(x):
if isinstance(x, np.ndarray):
return x
elif isinstance(x, torch.Tensor):
return x.cpu().numpy()
else:
raise ValueError('wrong type of input')
pred_scores_2d = to_numpy(box_dict['pred_scores_2d'])
pred_boxes_2d = to_numpy(box_dict['pred_boxes_2d'])
pred_labels_2d = to_numpy(box_dict['pred_labels_2d'])
pred_dict = get_template_prediction(pred_scores_2d.shape[0])
calib = batch_dict['calib'][batch_index]
# calib_ori = batch_dict['calib_ori'][batch_index] if 'calib_ori' in batch_dict else calib
if pred_scores_2d.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels_2d - 1]
pred_dict['bbox'] = pred_boxes_2d[:, :4]
pred_dict['bbox'][:, [0, 2]] += calib.offsets[0]
pred_dict['bbox'][:, [1, 3]] += calib.offsets[1]
pred_dict['score'] = pred_scores_2d
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
if not mode_2d:
single_pred_dict = generate_single_sample_dict(index, box_dict)
else:
single_pred_dict = generate_single_2d_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.8f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, eval_metric='3d', **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
if eval_metric == '2d':
ap_result_str, ap_dict = kitti_eval.get_official_eval_result_2d(
eval_gt_annos, eval_det_annos, class_names)
else:
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
assert self.dataset_cfg.FOV_POINTS_ONLY
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
if not self.force_flip:
if self.flip and self.mode == 'train':
flip_this_image = np.random.randint(2) > 0.5
else:
flip_this_image = False
else:
flip_this_image = True
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
raw_points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
calib_ori = copy.deepcopy(calib)
pts_rect = calib.lidar_to_rect(raw_points[:, 0:3])
reflect = raw_points[:, 3:4]
if flip_this_image:
calib.fliplr(info['image']['image_shape'][1])
pts_rect[:, 0] *= -1
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
pts_rect = pts_rect[fov_flag]
reflect = reflect[fov_flag]
# load images
left_img = self.get_image(info['image']['image_idx'], 2)
right_img = self.get_image(info['image']['image_idx'], 3)
if flip_this_image:
right_img, left_img = left_img[:, ::-1], right_img[:, ::-1]
# convert camera-view points into pseudo lidar points
# see code in calibration_kitti.py
# right: [x] --> [-y]
# up: [-y] --> [z]
# front: [z] --> [x]
if self.cat_reflect:
input_points = np.concatenate([calib.rect_to_lidar_pseudo(pts_rect), reflect], 1)
else:
input_points = calib.rect_to_lidar_pseudo(pts_rect)
input_dict = {
'points': input_points,
'frame_id': sample_idx,
'calib': calib,
'calib_ori': calib_ori,
'left_img': left_img,
'right_img': right_img,
'image_shape': left_img.shape
}
if 'annos' in info:
annos = info['annos']
if self.use_van:
# Car 14357, Van 1297
annos['name'][annos['name'] == 'Van'] = 'Car'
if self.use_person_sitting:
# Ped 2207, Person_sitting 56
annos['name'][annos['name'] == 'Person_sitting'] = 'Pedestrian'
full_annos = annos
ignored_annos = common_utils.collect_ignored_with_name(full_annos, name=['DontCare']) # only bbox is useful
annos = common_utils.drop_info_with_name(full_annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_2d_ignored = ignored_annos['bbox']
gt_truncated = annos['truncated']
gt_occluded = annos['occluded']
gt_difficulty = annos['difficulty']
gt_index = annos['index']
image_shape = left_img.shape
if flip_this_image:
gt_boxes_camera = box_utils.boxes3d_fliplr(gt_boxes_camera, cam_view=True)
gt_boxes_2d_ignored = box_utils.boxes2d_fliplr(gt_boxes_2d_ignored, image_shape)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(
gt_boxes_camera, calib, pseudo_lidar=True, pseudo_cam2_view=self.boxes_gt_in_cam2_view)
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar,
'gt_boxes_2d_ignored': gt_boxes_2d_ignored,
'gt_truncated': gt_truncated,
'gt_occluded': gt_occluded,
'gt_difficulty': gt_difficulty,
'gt_index': gt_index,
'image_idx': index
})
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/detectors_lidar/second_net.py
|
# Modified from OpenPCDet. https://github.com/open-mmlab/OpenPCDet
from .lidar_detector3d_template import Detector3DTemplate
class SECONDNet(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.model_cfg.get('RETURN_BATCH_DICT', False):
keys_to_remove = ['encoded_spconv_tensor',
'encoded_spconv_tensor_stride',
'multi_scale_3d_features',
'reg_features',
'box_cls_labels',
'box_reg_targets',
'reg_weights',
'anchors',
'cls_preds_normalized']
# 'voxels', 'voxel_coords', 'voxel_num_points', 'voxel_features']
for k in keys_to_remove:
batch_dict.pop(k, None)
keys_to_keep = ['spatial_features_stride',
'spatial_features',
'spatial_features_2d',
'volume_features',
'spatial_features_2d_prehg',
'batch_cls_preds',
'batch_box_preds']
batch_dict['lidar_outputs'] = {}
for k in keys_to_keep:
if k in batch_dict:
batch_dict['lidar_outputs'][k] = batch_dict.pop(k)
return batch_dict
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/utils/depth_map_utils.py
|
<filename>liga/utils/depth_map_utils.py
import numpy as np
def points_to_depth_map(pts_rect, img_shape, calib):
depth_gt_img = np.zeros(img_shape, dtype=np.float32)
pts_img, pts_depth = calib.rect_to_img(pts_rect[:, :3])
iy, ix = np.round(pts_img[:, 1]).astype(np.int64), np.round(pts_img[:, 0]).astype(np.int64)
mask = (iy >= 0) & (ix >= 0) & (iy < depth_gt_img.shape[0]) & (ix < depth_gt_img.shape[1])
iy, ix = iy[mask], ix[mask]
depth_gt_img[iy, ix] = pts_depth[mask]
return depth_gt_img
|
Owen-Liuyuxuan/LIGA-Stereo
|
liga/models/dense_heads/mmdet_2d_head.py
|
<filename>liga/models/dense_heads/mmdet_2d_head.py
# 2D detection head based on mmdetection.
import numpy as np
import torch
import torch.nn as nn
from mmdet.models.builder import build_head
from mmdet.core import bbox2result
class MMDet2DHead(nn.Module):
def __init__(self, model_cfg):
super(MMDet2DHead, self).__init__()
self.bbox_head = build_head(model_cfg.cfg)
self.bbox_head.init_weights()
self.use_3d_center = model_cfg.use_3d_center
# if getattr(model_cfg, 'load_from') is not None:
# from mmcv.runner.checkpoint import load_state_dict
# state_dict = torch.load(model_cfg.load_from, map_location='cpu')['state_dict']
# print('loading mmdet head from ', model_cfg.load_from)
# load_state_dict(self.bbox_head, {k[10:]: v for k, v in state_dict.items() if k.startswith("bbox_head.")}, strict=False, logger=None)
def get_loss(self, data_dict, tb_dict):
img_metas = [{
"image": data_dict['left_img'][i], # for debug
"img_shape": list(data_dict['left_img'][i].shape[1:3]) + [3],
"pad_shape": list(data_dict['left_img'][i].shape[1:3]) + [3]}
for i in range(len(data_dict['left_img']))]
if self.use_3d_center:
gt_boxes_2d = torch.cat([data_dict['gt_boxes_2d'], data_dict['gt_centers_2d']], dim=-1)
else:
gt_boxes_2d = data_dict['gt_boxes_2d']
gt_boxes_2d = torch.unbind(gt_boxes_2d)
gt_boxes_3d = data_dict['gt_boxes_no3daug']
gt_labels = torch.unbind(gt_boxes_3d[:, :, 7].long() - 1) # a list of [N] tensors
gt_bboxes_2d_ignore = torch.unbind(data_dict['gt_boxes_2d_ignored']) if 'gt_boxes_2d_ignored' in data_dict else None # a list of [N, 4] tensors
losses = self.bbox_head.forward_train(data_dict['sem_features'], img_metas, gt_boxes_2d,
gt_labels, gt_bboxes_2d_ignore)
for k, v in losses.items():
if not isinstance(v, (list, tuple)) and len(v.shape) == 0:
_sum_loss = v
else:
_sum_loss = sum(_loss for _loss in v)
assert len(_sum_loss.shape) == 0
# if k != 'loss_bbox':
# assert len(_sum_loss.shape) == 0
# else:
# assert len(_sum_loss.shape) in [0, 1]
# if len(_sum_loss.shape) == 1:
# assert _sum_loss.shape[0] < 10
# if len(_sum_loss.shape) == 1:
# for i in range(_sum_loss.shape[0]):
# tb_dict['rpn2d_' + k + '_' + str(i)] = _sum_loss[i].item()
losses[k] = _sum_loss.sum()
tb_dict['rpn2d_' + k] = losses[k].item()
loss_sum = sum([v for _, v in losses.items()])
return loss_sum, tb_dict
def forward(self, data_dict):
if self.training:
return data_dict
else:
img_metas = [{
"img_shape": list(data_dict['left_img'][i].shape[1:3]) + [3],
"pad_shape": list(data_dict['left_img'][i].shape[1:3]) + [3],
"scale_factor": 1.0} # TODO: scale factor from dataset
for i in range(len(data_dict['left_img']))]
outs = self.bbox_head(data_dict['sem_features'])
data_dict['head_outs'] = outs
try:
bbox_list = self.bbox_head.get_bboxes(*outs, img_metas, rescale=False) # TODO: rescale
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
data_dict['boxes_2d_pred'] = []
for bbox_result in bbox_results:
pred_dict = {}
pred_dict['pred_boxes_2d'] = np.concatenate([x[:, :-1] for x in bbox_result])
pred_dict['pred_scores_2d'] = np.concatenate([x[:, -1] for x in bbox_result])
pred_dict['pred_labels_2d'] = np.concatenate([[cls_id + 1] * len(x) for cls_id, x in enumerate(bbox_result)]).astype(np.int64)
data_dict['boxes_2d_pred'].append(pred_dict)
except NotImplementedError:
print("not implemented get_bboxes, skip")
return data_dict
|
rstudio/mlflow
|
examples/hyperparam/search_gpyopt.py
|
<gh_stars>10-100
"""
Example of hyperparameter search in MLflow using GPyOpt.
The run method will instantiate and run GPyOpt optimizer. Each parameter configuration is
evaluated in a new MLflow run invoking main entry point with selected parameters.
The runs are evaluated based on validation set loss. Test set score is calculated to verify the
results.
Several runs can be run in parallel.
"""
import math
import os
import shutil
import tempfile
import click
import GPyOpt
import mlflow
import mlflow.sklearn
import mlflow.tracking
import mlflow.projects
@click.command(help="Perform hyperparameter search with GPyOpt library."
"Optimize dl_train target.")
@click.option("--max-runs", type=click.INT, default=20,
help="Maximum number of runs to evaluate.")
@click.option("--batch-size", type=click.INT, default=8,
help="Number of runs to evaluate in a batch")
@click.option("--max-p", type=click.INT, default=8,
help="Maximum number of parallel runs.")
@click.option("--epochs", type=click.INT, default=32,
help="Number of epochs")
@click.option("--metric", type=click.STRING, default="rmse",
help="Metric to optimize on.")
@click.option("--gpy-model", type=click.STRING, default="GP_MCMC",
help="Optimizer algorhitm.")
@click.option("--gpy-acquisition", type=click.STRING, default="EI_MCMC",
help="Optimizer algorhitm.")
@click.option("--initial-design", type=click.STRING, default="random",
help="Optimizer algorhitm.")
@click.option("--seed", type=click.INT, default=97531,
help="Seed for the random generator")
@click.option("--training-experiment-id", type=click.INT, default=-1,
help="Maximum number of runs to evaluate. Inherit parent;s experiment if == -1.")
@click.argument("training_data")
def run(training_data, max_runs, batch_size, max_p, epochs, metric, gpy_model, gpy_acquisition,
initial_design, seed, training_experiment_id):
bounds = [
{'name': 'lr', 'type': 'continuous', 'domain': (1e-5, 1e-1)},
{'name': 'momentum', 'type': 'continuous', 'domain': (0.0, 1.0)},
]
# create random file to store run ids of the training tasks
tmp = tempfile.mkdtemp()
results_path = os.path.join(tmp, "results.txt")
tracking_service = mlflow.tracking.get_service()
def new_eval(nepochs,
experiment_id,
null_train_loss,
null_valid_loss,
null_test_loss,
return_all=False):
"""
Create a new eval function
:param nepochs: Number of epochs to train the model.
:experiment_id: Experiment id for the training run
:valid_null_loss: Loss of a null model on the validation dataset
:test_null_loss: Loss of a null model on the test dataset.
:return_test_loss: Return both validation and test loss if set.
:return: new eval function.
"""
def eval(params):
"""
Train Keras model with given parameters by invoking MLflow run.
Notice we store runUuid and resulting metric in a file. We will later use these to pick
the best run and to log the runUuids of the child runs as an artifact. This is a
temporary workaround until MLflow offers better mechanism of linking runs together.
:param params: Parameters to the train_keras script we optimize over:
learning_rate, drop_out_1
:return: The metric value evaluated on the validation data.
"""
lr, momentum = params[0]
p = mlflow.projects.run(
uri=".",
entry_point="train",
parameters={
"training_data": training_data,
"epochs": str(nepochs),
"learning_rate": str(lr),
"momentum": str(momentum),
"seed": str(seed)},
experiment_id=experiment_id,
block=False
)
if p.wait():
training_run = tracking_service.get_run(p.run_id)
def get_metric(metric_name):
return [m.value for m in training_run.data.metrics if m.key == metric_name][0]
# cap the loss at the loss of the null model
train_loss = min(null_valid_loss,
get_metric("train_{}".format(metric)))
valid_loss = min(null_valid_loss,
get_metric("val_{}".format(metric)))
test_loss = min(null_test_loss,
get_metric("test_{}".format(metric)))
else:
# run failed => return null loss
tracking_service.set_terminated(p.run_id, "FAILED")
train_loss = null_train_loss
valid_loss = null_valid_loss
test_loss = null_test_loss
mlflow.log_metric("train_{}".format(metric), valid_loss)
mlflow.log_metric("val_{}".format(metric), valid_loss)
mlflow.log_metric("test_{}".format(metric), test_loss)
with open(results_path, "a") as f:
f.write("{runId} {train} {val} {test}\n".format(runId=p.run_id,
train=train_loss,
val=valid_loss,
test=test_loss))
if return_all:
return train_loss, valid_loss, test_loss
else:
return valid_loss
return eval
with mlflow.start_run() as run:
experiment_id = run.info.experiment_id if training_experiment_id == -1 \
else training_experiment_id
# Evaluate null model first.
# We use null model (predict everything to the mean) as a reasonable upper bound on loss.
# We need an upper bound to handle the failed runs (e.g. return NaNs) because GPyOpt can not
# handle Infs.
# Allways including a null model in our results is also a good ML practice.
train_null_loss, valid_null_loss, test_null_loss = new_eval(0,
experiment_id,
math.inf,
math.inf,
math.inf,
True)(params=[[0, 0]])
myProblem = GPyOpt.methods.BayesianOptimization(new_eval(epochs,
experiment_id,
train_null_loss,
valid_null_loss,
test_null_loss),
bounds,
evaluator_type=
"local_penalization" if min(batch_size,
max_p) > 1
else "sequential",
batch_size=batch_size,
num_cores=max_p,
model_type=gpy_model,
acquisition_type=gpy_acquisition,
initial_design_type=initial_design,
initial_design_numdata=max_runs >> 2,
exact_feval=False)
myProblem.run_optimization(max_runs)
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
plt.switch_backend('agg')
acquisition_plot = os.path.join(tmp, "acquisition_plot.png")
convergence_plot = os.path.join(tmp, "convergence_plot.png")
myProblem.plot_acquisition(filename=acquisition_plot)
myProblem.plot_convergence(filename=convergence_plot)
if os.path.exists(convergence_plot):
mlflow.log_artifact(convergence_plot, "converegence_plot")
if os.path.exists(acquisition_plot):
mlflow.log_artifact(acquisition_plot, "acquisition_plot")
best_val_train = math.inf
best_val_valid = math.inf
best_val_test = math.inf
best_run = None
# we do not have tags yet, for now store the list of executed runs as an artifact
mlflow.log_artifact(results_path, "training_runs")
with open(results_path) as f:
for line in f.readlines():
run_id, str_val, str_val2, str_val3 = line.split(" ")
val = float(str_val2)
if val < best_val_valid:
best_val_train = float(str_val)
best_val_valid = val
best_val_test = float(str_val3)
best_run = run_id
# record which run produced the best results, store it as a param for now
best_run_path = os.path.join(os.path.join(tmp, "best_run.txt"))
with open(best_run_path, "w") as f:
f.write("{run_id} {val}\n".format(run_id=best_run, val=best_val_valid))
mlflow.log_artifact(best_run_path, "best-run")
mlflow.log_metric("train_{}".format(metric), best_val_train)
mlflow.log_metric("val_{}".format(metric), best_val_valid)
mlflow.log_metric("test_{}".format(metric), best_val_test)
shutil.rmtree(tmp)
if __name__ == '__main__':
run()
|
Shams261/IOSD-UIETKUK-HacktoberFest-Meetup-2019
|
Intermediate/bubble_sort002.py
|
<filename>Intermediate/bubble_sort002.py
def bubblesort(A):
for i in range(len(A)):
for k in range(len(A) - 1, i, -1):
if (A[k] < A[k - 1]):
swap(A,k,k-1)
def swap(A,x,y):
temp = A[x]
A[x] = A[y]
A[y] = temp
A= [534,689,235,124,525,216,134,356]
bubblesort(A)
print(A)
|
toxinu/spilleliste
|
setup.py
|
#!/usr/bin/env python2
# coding: utf-8
import os
import sys
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version():
VERSIONFILE = 'spilleliste/__init__.py'
initfile_lines = open(VERSIONFILE, 'rt').readlines()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in initfile_lines:
mo = re.search(VSRE, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError('Unable to find version string in %s.' % (VERSIONFILE,))
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
setup(
name='spilleliste',
version=get_version(),
description="Spilleliste, share your beautiful playlist.",
long_description=open('README.rst').read(),
license=open('LICENSE').read(),
author='toxinu',
author_email='<EMAIL>',
url='https://github.com/toxinu/spilleliste',
keywords='music',
packages=[
'spilleliste',
'spilleliste.externals',
'spilleliste.connectors'],
scripts=['scripts/spilleliste'],
install_requires=[
'requests',
'docopt',
'requests',
'jinja2',
'isit'
],
include_package_data=True,
classifiers=(
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7')
)
|
toxinu/spilleliste
|
spilleliste/generator.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
import os
import requests
import codecs
from spilleliste.core import Playlist
from spilleliste.core import Artist
from spilleliste.core import Album
from spilleliste.core import Track
from spilleliste.externals import googleimage
from spilleliste.externals import spotify
from spilleliste.externals import youtube
from jinja2 import Template
def run(raw_playlists, args, template_name="default", template_uri=False):
# Playlists
playlists = []
print('=> Building playlists...')
print('=> Retrieving externals uri (could be long)...')
for playlist_name, tracks in raw_playlists.items():
playlist = Playlist(playlist_name)
playlist_external_uri = []
for track in tracks:
artist = unicode(track[0])
album = unicode(track[1])
title = unicode(track[2])
track_infos = {}
provider = "unknown"
external_uri = ""
if not args.get('--no-spotify'):
provider = "spotify"
external_uri = spotify.search(artist, album, title)
if not args.get('--no-youtube'):
if not external_uri:
provider = "youtube"
external_uri = youtube.search(artist, album, title)
else:
playlist_external_uri.append(external_uri.split(':')[-1])
track_infos = {u'provider': provider, u'url': external_uri}
playlist.add_track(artist, album, title, track_infos = track_infos)
if playlist_external_uri:
playlist.set_external_uri(playlist_external_uri)
playlists.append(playlist)
# Get backgrounds
backgrounds = []
if not args.get('--no-background'):
print('=> Retrieving externals images (could be long)...')
backgrounds = googleimage.search(playlists, int(args.get('--nb-background', 5)))
if not args.get('--template-url'):
page_template = requests.get('https://raw.github.com/toxinu/spilleliste/master/one_page.html').text
elif args.get('--template-url'):
page_template = requests.get(args.get('--template-url')).text
template = Template(page_template)
output_content = template.render(playlists=playlists, backgrounds=backgrounds)
output_file_name = '%s/spilleliste.html' % os.getcwd()
output_file = codecs.open(output_file_name, mode='w', encoding='utf-8')
output_file.write(output_content)
print(' :: Done (%s)' % output_file_name)
|
toxinu/spilleliste
|
spilleliste/connectors/googlemusic.py
|
# -*- coding: utf-8 -*-
try:
from gmusicapi import Mobileclient
except ImportError:
raise Exception('You need gmusicapi python package (pip install gmusicapi)')
def get_playlists(username, password):
api = Mobileclient(debug_logging=False)
api.login(username, password)
playlists = {}
_playlists = api.get_all_playlists()
for playlist in _playlists:
playlist_name = playlist['name']
playlists[playlist_name] = api.get_shared_playlist_contents(
playlist['shareToken'])
for playlist_name, playlist_songs in playlists.items():
songs = []
for song in playlist_songs:
song = song['track']
s = (song['artist'], song['album'], song['title'])
songs.append(s)
playlists[playlist_name] = songs
return playlists
|
toxinu/spilleliste
|
spilleliste/__init__.py
|
# coding: utf-8
__title__ = 'spilleliste'
__version__ = '0.1.2'
__author__ = 'toxinu'
__license__ = ''
__copyright__ = 'Copyright 2013 toxinu'
|
toxinu/spilleliste
|
spilleliste/core.py
|
<reponame>toxinu/spilleliste<filename>spilleliste/core.py
# -*- coding: utf-8 -*-
class Track(object):
def __init__(self, name, infos):
self.name = name
self.infos = infos
class Album(object):
def __init__(self, name, infos):
self.name = name
self.tracks = []
self.infos = infos
def add_track(self, track, track_infos={}):
#if not track in [ __track.name for __track in self.tracks ]:
self.tracks.append(Track(track, track_infos))
class Artist(object):
def __init__(self, name, infos):
self.name = name
self.albums = []
self.tracks = self.get_tracks()
self.infos = infos
def add_track(self, album, track, album_infos={}, track_infos={}):
if album not in [ __album.name for __album in self.albums ]:
_album = Album(album, album_infos)
_album.add_track(track, track_infos)
self.albums.append(_album)
else:
_album = [ __album for __album in self.albums if __album.name == album ][0]
_album.add_track(track, track_infos)
def get_tracks(self):
res = []
for album in self.albums:
res.append(album.get_tracks())
return res
class Playlist(object):
def __init__(self, name, external_uri='#'):
self.name = name
self.artists = []
self.external_uri = external_uri
self.tracks = self.get_tracks()
def add_track(self, artist, album, track, artist_infos={}, album_infos={}, track_infos={}):
if artist not in [ __artist.name for __artist in self.artists ]:
_artist = Artist(artist, artist_infos)
_artist.add_track(album, track, album_infos, track_infos)
self.artists.append(_artist)
else:
_artist = [ __artist for __artist in self.artists if __artist.name == artist ][0]
_artist.add_track(album, track, album_infos, track_infos)
def get_tracks(self):
res = []
for artist in self.artists:
res.append(artist.get_tracks())
return res
def get_artists(self):
return [ __artist for __artist in self.artists ]
def set_external_uri(self, tracks):
uri = tracks[0]
for track in tracks[1:]:
uri += u',%s' % track
self.external_uri = 'spotify:trackset:PREFERED:%s' % uri
|
toxinu/spilleliste
|
spilleliste/externals/spotify.py
|
# -*- coding: utf-8 -*-
import requests
def search(artist, album, title):
res = None
request = "%s %s" % (artist, title)
r = requests.get('http://ws.spotify.com/search/1/track.json?q=%s' % request)
try:
data = r.json()
except ValueError:
return False
if data:
for track in data['tracks']:
if res is not None:
if res['popularity'] < track['popularity']:
res = track
else:
res = track
if res is not None:
return res['href']
else:
return False
|
toxinu/spilleliste
|
spilleliste/externals/youtube.py
|
# -*- coding: utf-8 -*-
def search(artist, album, title):
query = "%s+%s+%s" % (artist, album, title)
return u"https://www.youtube.com/results?search_query=%s" % query
|
toxinu/spilleliste
|
spilleliste/externals/googleimage.py
|
# -*- coding: utf-8 -*-
import random
import requests
def search(playlists, nb_background=5):
artists = []
for playlist in playlists:
artists += playlist.get_artists()
background_found = 0
backgrounds = []
while background_found < nb_background:
for artist in random.sample(artists, nb_background - background_found):
search_terms = u'%s %s music' % (artist.name, random.choice(artist.albums).name)
#print(' - %s ' % search_terms)
r = requests.get('https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=%s&imgsz=xxlarge' % search_terms.replace(' ', '+'))
data = r.json()
try:
backgrounds.append(data['responseData']['results'][0]['url'])
background_found += 1
except Exception as err:
artists.remove(artist)
break
return backgrounds
|
toxinu/spilleliste
|
spilleliste/connectors/itunes.py
|
<reponame>toxinu/spilleliste
# -*- coding: utf-8 -*-
import isit
if not isit.osx:
raise Exception('Only available on OSX')
import xml.etree.ElementTree as ET
try:
from appscript import *
except ImportError:
raise Exception('You need appscript python package (pip install appscript)')
iTunes = app('iTunes')
def get_library():
return iTunes.library_playlists['Library']
def get_folders():
return iTunes.folder_playlists()
def count_folder(folder):
return folder.count(each=k.playlist)
def get_playlists():
skip = [
u'Party Shuffle',
u'90\u2019s Music',
u'Movies',
u'Music',
u'Music Videos',
u'My Top Rated',
u'Recently Added',
u'Recently Played',
u'Top 25 Most Played',
u'TV Shows',
u'Podcasts',
u'Genius',
]
raw_playlists = [x for x in iTunes.user_playlists() if x.name() not in skip]
playlists = {}
for playlist in raw_playlists:
name = playlist.name()
tracks = []
playlist_tracks = catalog_tracks(playlist)
for track in playlist_tracks:
artist = unicode(track[0])
album = unicode(track[1])
track_name = unicode(track[2])
tracks.append((artist, album, track_name))
playlists[name] = tracks
return playlists
def get_tracks(playlist):
return playlist.file_tracks()
def get_parent(playlist):
try:
return playlist.parent()
except:
return None
def list_names(list, out):
for elem in list:
out.write('%s\n' % elem.name())
def check_track(track):
try:
path = track.location().path
if not os.path.isfile(path):
throw(AttributeError)
return True
except AttributeError:
return False
def delete_missing(log):
library = get_library()
for track in get_tracks(library):
if not check_track(track):
log.write('- %s - %s\n' % (track.artist(), track.name()))
library.delete(track)
def freeze_tracks(tracks):
return frozenset([x.database_ID() for x in tracks])
def catalog_playlists(playlists):
return dict([(freeze_tracks(get_tracks(x)), x) for x in playlists])
def catalog_folders(folders):
return dict([(x.name(), x) for x in folders])
def track_tuple(keys):
return tuple(map(lambda x: x if x else 'Unknown', keys))
def catalog_tracks(playlist, log=None):
tracks = get_tracks(playlist)
catalog = dict()
for track in tracks:
artist = track.album_artist()
if not artist:
artist = track.artist()
album = track.album()
name = track.name()
key = track_tuple([artist, album, name])
if key in catalog:
if log:
log.write('? duplicate track: %s - %s - %s\n' % key)
else:
catalog[key] = track
return catalog
class setdict(dict):
def add(self, key, val):
if not key in self:
self[key] = set()
self[key].add(val)
class setdict2(setdict):
def add(self, key1, key2, val):
if key1 not in self:
self[key1] = setdict()
self[key1].add(key2, val)
def file_artist(track):
try:
path = track.location().path
artist = os.path.split(path)[0].split('/')[-2]
return artist if not artist == 'Compilations' else 'Various'
except AttributeError:
return None
def album_artist(tracks, log, album):
artists = set([x.artist() for x in tracks])
album_artists = set([x.album_artist() for x in tracks])
compilations = set([x.compilation() for x in tracks])
file_artists = set([file_artist(x) for x in tracks])
compilation = compilations == set([True])
if len(album_artists) == 1 and album_artists != set(['']):
artist = album_artists.pop()
if compilation:
log.write('Album %s (%s) is compilation\n' % (album, artist))
elif len(artists) == 1 and artists != set(['']):
artist = artists.pop()
if compilation:
log.write('Album %s (%s) is compilation\n' % (album, artist))
elif compilations == set([True]):
artist = 'Various'
elif len(file_artists) == 1:
artist = file_artists.pop()
log.write('Using file_artist for album %s (%s)\n' % (album, artist))
else:
artist = 'Unknown'
log.write('Cannot determine artist for album %s\n' % album)
return artist
def collect_albums(playlist):
artists = setdict2()
for track in get_tracks(playlist):
artist = file_artist(track)
if artist:
album = track.album()
artists.add(artist, album, track)
return artists
def year_string(tracks):
years = [x.year() for x in tracks if x.year()]
years = list(set(years))
years.sort()
n = len(years)
if n == 0:
years = '-'
elif n == 1:
years = '(%d)' % years[0]
elif n == 2:
years = '(%d,%d)' % (years[0], years[1])
else:
years = '(%d-%d)' % (years[0], years[-1])
return years
def title_key(pair):
title = pair[0].lower()
if title.startswith('a '):
title = title[2:]
elif title.startswith('the '):
title = title[4:]
return title
def track_key(track):
return (track.disc_number(), track.track_number())
def list_albums(out, log):
artists = collect_albums(get_library()).items()
artists.sort(key=title_key)
for artist, albums in artists:
out.write('%s\n' % artist)
albums = albums.items()
albums.sort(key=title_key)
for album, tracks in albums:
years = year_string(tracks)
out.write('\t%s %s\n' % (album, years))
out.write('\n')
def list_playlists(out):
list_names(get_playlists(), out)
def list_folders(out):
list_names(get_folders(), out)
def list_album_playlists(out):
artists = collect_albums(get_library())
catalog = catalog_playlists(get_playlists())
for artist in artists:
albums = artists[artist]
for album in albums:
key = freeze_tracks(albums[album])
if key in catalog:
out.write('%s\n' % catalog[key].name())
def make(kind, name, loc=iTunes):
return iTunes.make(new=kind, at=loc, with_properties={k.name: name})
def check_tracks(tracks, artist, album, log):
try:
error = "disc count"
disc_count = set([x.disc_count() for x in tracks])
assert len(disc_count) == 1
disc_count = disc_count.pop()
assert disc_count != 0
track_counts = [set() for x in range(0,disc_count)]
track_numbers = [list() for x in range(0,disc_count)]
for track in tracks:
n = track.disc_number()
track_counts[n-1].add(track.track_count())
track_numbers[n-1].append(track.track_number())
for track_count, track_number in zip(track_counts, track_numbers):
error = "missing disc"
assert len(track_count) > 0
error = "track count"
assert len(track_count) == 1
track_count = track_count.pop()
assert track_count != 0
error = "track numbers"
track_number.sort()
assert track_number == range(1,track_count+1)
except AssertionError:
log.write('! %s - %s (%s)\n' % (artist, album, error))
def add_tracks(tracks, playlist, log):
tracks = list(tracks)
tracks.sort(key=track_key)
for track in tracks:
iTunes.duplicate(track, to=playlist)
def delete_playlist(folder, log, index=None):
name = folder.name()
folder.delete()
if index:
del index[name]
log.write('- %s\n' % name)
def make_album_playlists(log):
artists = collect_albums(get_library())
playlists = catalog_playlists(get_playlists())
folders = catalog_folders(get_folders())
for artist, albums in artists.iteritems():
if artist not in folders:
folder = make(k.folder_playlist, artist)
log.write('+ %s\n' % artist)
folders[artist] = folder
else:
folder = folders[artist]
for album, tracks in albums.iteritems():
check_tracks(tracks, artist, album, log)
artist2 = album_artist(tracks, log, album)
years = year_string(tracks)
name = "%s %s %s" % (artist2, years, album)
key = freeze_tracks(albums[album])
if key in playlists:
playlist = playlists[key]
name2 = playlist.name()
folder2 = get_parent(playlist)
if folder2 and folder == folder2 and name == name2:
continue
delete_playlist(playlist, log)
if folder2 != folder and not count_folder(folder2):
delete_playlist(folder2, log, index=folders)
playlist = make(k.user_playlist, name, loc=folder)
add_tracks(tracks, playlist, log)
log.write('+ %s\n' % name)
def dict_from_plist(plist):
xml = dict()
pairs = zip(plist[::2], plist[1::2])
for key, val in pairs:
assert key.tag == 'key'
if val.tag == 'dict':
xml[key.text] = dict_from_plist(val)
elif val.tag == 'array':
xml[key.text] = map(dict_from_plist, val)
else:
xml[key.text] = val.text
return xml
def dict_from_xml_playlist(xmlfile):
tree = ET.parse(xmlfile)
assert tree.getroot().tag == 'plist'
plist = tree.find('dict')
xml = dict_from_plist(plist)
return xml
def catalog_xml_playlists(xmlfile):
xml = dict_from_xml_playlist(xmlfile)
catalog = list()
library = xml['Tracks']
playlists = xml['Playlists']
for playlist in playlists:
if not 'Playlist Items' in playlist:
continue
tracks = list()
track_IDs = [x['Track ID'] for x in playlist['Playlist Items']]
for ID in track_IDs:
track = library[ID]
if 'Album Artist' in track:
artist = track['Album Artist']
else:
artist = track['Artist'] if 'Artist' in track else None
album = track['Album'] if 'Album' in track else None
name = track['Name'] if 'Name' in track else None
key = track_tuple([artist, album, name])
tracks.append(key)
catalog.append((playlist['Name'], tracks))
return catalog
def write_playlist(playlist, tracks, out):
out.write('\n%s\n\n' % playlist)
for track in tracks:
out.write('%s - %s - %s\n' % track)
def list_xml_playlists(xmlfile, out):
catalog = catalog_xml_playlists(xmlfile)
for playlist, tracks in catalog:
write_playlist(playlist, tracks, out)
def diff_playlists(xmlfile1, xmlfile2, out1, out2):
catalog1 = dict(catalog_xml_playlists(xmlfile1))
catalog2 = dict(catalog_xml_playlists(xmlfile2))
for playlist, tracks1 in catalog1.iteritems():
set1 = set(tracks1)
set2 = set(catalog2[playlist]) if playlist in catalog2 else set()
diff1 = list(set1 - set2)
diff2 = list(set2 - set1)
diff1.sort()
diff2.sort()
write_playlist(playlist, diff1, out1)
write_playlist(playlist, diff2, out2)
def get_args(count, usage):
argv = sys.argv
if len(argv) == count + 1:
return argv[1:]
else:
print 'usage: %s %s' % (argv[0], usage)
exit(0)
def open_args(count, usage, start=1):
argv = sys.argv
if len(argv) == count + 1:
return [codecs.open(x, 'w', 'utf-8') for x in argv[start:]]
else:
print 'usage: %s %s' % (argv[0], usage)
exit(0)
def open_arg(usage):
return open_args(1, usage)[0]
|
Sourodip-ghosh123/Fruits-360
|
ResNet50 V2/resnet50_v2_model.py
|
from keras.applications.resnet_v2 import ResNet50V2
model=ResNet50V2(include_top=True, weights=None, input_tensor=None, input_shape=(100,100,3),classes=41)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Compiled!')
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import backend as K
batch_size = 50
checkpointer = ModelCheckpoint(filepath = 'cnn_from_scratch_fruits.hdf5', save_best_only = True)
history = model.fit(x_train,y_train,
batch_size = 50,
epochs=15,
validation_data=(x_valid, y_vaild),
callbacks = [checkpointer],
shuffle=True
)
model.load_weights('cnn_from_scratch_fruits.hdf5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
|
Sourodip-ghosh123/Fruits-360
|
CNN/cnn_model.py
|
from keras.preprocessing.image import array_to_img, img_to_array, load_img
def convert_image_to_array(files):
images_as_array=[]
for file in files:
# Convert to Numpy Array
images_as_array.append(img_to_array(load_img(file)))
return images_as_array
x_train = np.array(convert_image_to_array(x_train))
print('Training set shape : ',x_train.shape)
x_valid = np.array(convert_image_to_array(x_valid))
print('Validation set shape : ',x_valid.shape)
x_test = np.array(convert_image_to_array(x_test))
print('Test set shape : ',x_test.shape)
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import backend as K
model=Sequential()
model.add(Conv2D(filters = 16, kernel_size = 2,input_shape=(100,100,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters = 32,kernel_size = 2,activation= 'relu',padding='same'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters = 64,kernel_size = 2,activation= 'relu',padding='same'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters = 128,kernel_size = 2,activation= 'relu',padding='same'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters = 256,kernel_size = 2,activation= 'relu',padding='same'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(150))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(41,activation = 'softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Compiled!')
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import backend as K
batch_size = 50
checkpointer = ModelCheckpoint(filepath = 'cnn_from_scratch_fruits.hdf5', verbose = 1, save_best_only = True)
history = model.fit(x_train,y_train,
batch_size = 50,
epochs=15,
validation_data=(x_valid, y_vaild),
callbacks = [checkpointer],
verbose=2,shuffle=True
)
model.load_weights('cnn_from_scratch_fruits.hdf5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
|
Sourodip-ghosh123/Fruits-360
|
ResNet50 V2/pre-processing.py
|
<gh_stars>0
from sklearn.datasets import load_files
import numpy as np
train_dir = '../input/fruits/Fruits_1/Training'
test_dir = '../input/fruits/Fruits_1/Test'
def load_dataset(path):
data = load_files(path)
files = np.array(data['filenames'])
targets = np.array(data['target'])
target_labels = np.array(data['target_names'])
return files,targets,target_labels
x_train, y_train,target_labels = load_dataset(train_dir)
x_test, y_test,_ = load_dataset(test_dir)
print('Loading complete!')
print('Training set size : ' , x_train.shape[0])
print('Testing set size : ', x_test.shape[0])
no_of_classes = len(np.unique(y_train))
no_of_classes
print(y_train[0:10])
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train,no_of_classes)
y_test = np_utils.to_categorical(y_test,no_of_classes)
y_train[0]
x_test,x_valid = x_test[3500:],x_test[:3500]
y_test,y_vaild = y_test[3500:],y_test[:3500]
print('Vaildation X : ',x_valid.shape)
print('Vaildation y :',y_vaild.shape)
print('Test X : ',x_test.shape)
print('Test y : ',y_test.shape)
from keras.preprocessing.image import array_to_img, img_to_array, load_img
def convert_image_to_array(files):
images_as_array=[]
for file in files:
# Convert to Numpy Array
images_as_array.append(img_to_array(load_img(file)))
return images_as_array
x_train = np.array(convert_image_to_array(x_train))
print('Training set shape : ',x_train.shape)
x_valid = np.array(convert_image_to_array(x_valid))
print('Validation set shape : ',x_valid.shape)
x_test = np.array(convert_image_to_array(x_test))
print('Test set shape : ',x_test.shape)
print('1st training image shape ',x_train[0].shape)
|
icarus523/xml_naming_convention_validator
|
xml_naming_convention_validator.py
|
import xml.etree.ElementTree as ET
import re
fname = "5101000843.xml"
class xml_naming_convention_validator:
def test_HMSC(self, name):
# BIOS
bios_matchObj = re.search(r'(bios)', name, re.M | re.I)
if bios_matchObj:
print(bios_matchObj.group(), name)
return True
# Firmware
firmware_matchObj = re.search(r'(firmware)', name, re.M | re.I)
if firmware_matchObj:
print(firmware_matchObj.group(), name)
return True
def test_NOTE(self, name):
# Banknote Acceptor
bna_matchObj = re.search(r'(banknote acceptor)', name, re.M | re.I)
if bna_matchObj:
print(bna_matchObj.group(), name)
return True
def test_TICK(self, name):
# Ticket Printer
ticket_printer_matchObj = re.search(r'(ticket printer)', name, re.M | re.I)
if ticket_printer_matchObj:
print(ticket_printer_matchObj.group(), name)
return True
def test_MACH(self, name):
# Machine
machine_matchObj = re.search('(cabinet)', name, re.M | re.I)
if machine_matchObj:
print(machine_matchObj.group(), name)
return True
def test_MNTR(self, name):
# Monitor
monitor_matchObj = re.search('(monitor)', name, re.M | re.I)
if monitor_matchObj:
print(monitor_matchObj.group(), name)
return True
def test_TCHS(self, name):
# Touch Screen
touch_screen_matchObj = re.search('(touch screen)', name, re.M | re.I)
if touch_screen_matchObj:
print(touch_screen_matchObj.group(), name)
return True
def test(self, x):
return {
'HMSC': self.test_HMSC,
'NOTE': self.test_NOTE,
'TICK': self.test_TICK,
'MACH': self.test_MACH,
'MNTR': self.test_MNTR,
'TCHS': self.test_TCHS,
}.get(x)
def validate_naming_convention(self, atype, aname):
fn = self.test(atype)
if fn:
return(fn(aname))
else:
print("No tests implemented for: " + atype)
return False
def test_requirements(self, x):
return {
'NSV10': True,
'NSV9': False,
'NSV8': False,
'NSV6': False,
}.get(x)
def validate_requirements(self, title, version):
fn = self.test_requirements(title)
if fn:
return(fn) # todo: test version
else:
print("Invalid requirements")
return False
def suggest_naming_convention(self, atype, aname):
suggested_name = ""
return suggested_name
def read_xml_file(self, root):
for recommended_product in root.findall('Recommended_Product'):
# Requirements_Tested
for requirements_tested in recommended_product.findall('Requirements_Tested'):
requirements_title = requirements_tested.find('Title').text
requirements_version = requirements_tested.find('Version').text
valid_requirements = self.validate_requirements(requirements_title, requirements_version)
if not valid_requirements:
print("Invalid Requirements: ", requirements_title, requirements_version)
# Hardware
for hardware_details in recommended_product.findall('Hardware_Details'):
hardware_name = hardware_details.find('Hardware_Name').text
hardware_type = hardware_details.find('Hardware_Type').text
valid_name = self.validate_naming_convention(hardware_type, hardware_name)
if not valid_name:
self.suggest_naming_convention(hardware_type, hardware_name)
# Software
for software_details in recommended_product.findall('Software_Details'):
software_name = software_details.find('Software_Name').text
software_type = software_details.find('Software_Type').text
valid_name = self.validate_naming_convention(software_type, software_name)
if not valid_name:
self.suggest_naming_convention(software_type, software_name)
def __init__(self):
self.tree = ET.parse(fname)
self.root = self.tree.getroot()
self.read_xml_file(self.root)
def main():
app = xml_naming_convention_validator()
if __name__ == "__main__": main()
|
grantcooksey/pytest-demo
|
src/taxi_trips/__main__.py
|
<reponame>grantcooksey/pytest-demo<filename>src/taxi_trips/__main__.py<gh_stars>0
from taxi_trips import passenger_count
passenger_count.start_job()
|
grantcooksey/pytest-demo
|
test/test_pytest_basics.py
|
import os
import copy
from unittest import mock
import requests
from taxi_trips import sleep_demo
import pytest
# This statement is only used to make it easier to only run unit test modules.
# Integration test modules will have `pytestmark = pytest.mark.integration`
# Check out the `pytest.ini` to the list of other manually defined pytest marks
#
# Command to run unit tests: `pytest -m unit`
pytestmark = pytest.mark.unit
# pytest looks for functions prefixed with `test`
def test_basic():
def add_2(x):
return x + 2
# use python standard assert to verify expectations
assert add_2(5) == 7
# Can use multiple assertions in the same test
assert add_2(1) == 3
# functions that aren't prefixed with test are ignored
def i_am_ignored():
pass
# assert that an exception is raised
def test_raise_exception():
def divide_by_zero():
5 / 0
with pytest.raises(ZeroDivisionError):
divide_by_zero()
@pytest.mark.skip
def test_will_fail():
assert False
# fixtures provide a dependable baseline to set up test data
@pytest.fixture
def env_var_config():
return {
'YEAR': '2019'
}
# project code would call get_year_env_var()
def get_year_env_var(config=os.environ):
return config['YEAR']
# Test functions can receive fixture objects by naming them as an input argument
def test_get_year_env_var(env_var_config):
assert get_year_env_var(config=env_var_config) == '2019'
# fixtures are modular and can use other fixtures
@pytest.fixture
def plus_one_year_env_config(env_var_config):
new_config = copy.copy(env_var_config)
new_config['YEAR'] = str(int(new_config['YEAR']) + 1)
return new_config
def test_get_year_env_var_plus_one(plus_one_year_env_config, env_var_config):
assert get_year_env_var(config=plus_one_year_env_config) == '2020'
# fixtures cover setup, but what about tear down?
@pytest.fixture
def tear_down():
print('I am called before the test')
yield 'the test'
print('I am called after the test')
def test_tear_down(tear_down):
print(tear_down)
# mock object substitutes and imitates a real object within a testing environment
def test_mock():
mocked_object = mock.MagicMock()
mocked_object.some_function(this_is_a_parameter=5)
mocked_object.some_function.assert_called_once_with(this_is_a_parameter=5)
# You don't want to reach out to external systems during a unit test
def test_dont_hit_google():
mocked_request = mock.MagicMock()
mocked_response = mock.MagicMock()
mocked_request.get.return_value = mocked_response # mock function
mocked_response.status_code = 200 # mock property
def get_google_response_status_code(request_service=requests):
response = request_service.get('https://google.com')
return response.status_code
assert get_google_response_status_code(request_service=mocked_request) == 200
# but what if we can't access the internal function? We can patch it using pytest-mock
def test_dont_hit_google_with_patching(mocker):
mocked_response = mock.MagicMock()
mocked_response.status_code = 200 # mock property
mocker.patch.object(requests, 'get', return_value=mocked_response)
def get_google_response_status_code():
response = requests.get('https://google.com')
return response.status_code
assert get_google_response_status_code() == 200
# dont want to sleep in our unit tests so they run fast
@pytest.fixture(autouse=True)
def no_sleep_tonight(mocker):
mocker.patch.object(sleep_demo.time, 'sleep')
def test_sleep_demo():
assert sleep_demo.long_sleep()
|
grantcooksey/pytest-demo
|
src/taxi_trips/sleep_demo.py
|
<filename>src/taxi_trips/sleep_demo.py
import time
def long_sleep():
time.sleep(seconds=60)
return True
|
grantcooksey/pytest-demo
|
test/test_passenger_count.py
|
import io
from collections.abc import Iterable
from unittest import mock
from requests.exceptions import ConnectTimeout
from taxi_trips import passenger_count
import pytest
pytestmark = pytest.mark.unit
@pytest.fixture
def ten_person_taxi_report_file(raw_ten_passenger_taxi_records):
return io.StringIO(raw_ten_passenger_taxi_records)
def test_pull_file_returns_buffer(config, raw_ten_passenger_taxi_records):
mock_response = mock.MagicMock()
mock_response.text = raw_ten_passenger_taxi_records
passenger_count.requests.get.return_value = mock_response
file = passenger_count.pull_file(year=config['YEAR'], month=['MONTH'])
assert isinstance(file, Iterable)
def test_pull_file_errors_on_timeout(config):
def timeout(url, allow_redirects, timeout):
raise ConnectTimeout('Too slow!')
# Use side effects when you need to do more than return a value, like raise an exception
passenger_count.requests.get.side_effect = timeout
with pytest.raises(ConnectTimeout):
passenger_count.pull_file(year=config['YEAR'], month=['MONTH'])
def test_pull_file_errors_on_non_successful_response(config):
class MockSuccessResponse:
def raise_for_status(self):
raise Exception('404!')
passenger_count.requests.get.return_value = MockSuccessResponse
with pytest.raises(Exception):
passenger_count.pull_file(year=config['YEAR'], month=['MONTH'])
def test_count_people(ten_person_taxi_report_file):
assert passenger_count.count_people(ten_person_taxi_report_file) == 10
def test_count_people_handles_empty_file():
assert passenger_count.count_people(io.StringIO('')) == 0
|
grantcooksey/pytest-demo
|
test/conftest.py
|
<filename>test/conftest.py
from taxi_trips import passenger_count
import pytest
@pytest.fixture
def raw_ten_passenger_taxi_records():
# 1 + 1 + 3 + 5 = 10 passengers
return '''VendorID,tpep_pickup_datetime,tpep_dropoff_datetime,passenger_count,trip_distance,RatecodeID,store_and_fwd_flag,PULocationID,DOLocationID,payment_type,fare_amount,extra,mta_tax,tip_amount,tolls_amount,improvement_surcharge,total_amount,congestion_surcharge
1,2019-01-01 00:46:40,2019-01-01 00:53:20,1,1.50,1,N,151,239,1,7,0.5,0.5,1.65,0,0.3,9.95,
1,2019-01-01 00:59:47,2019-01-01 01:18:59,1,2.60,1,N,239,246,1,14,0.5,0.5,1,0,0.3,16.3,
2,2018-12-21 13:48:30,2018-12-21 13:52:40,3,.00,1,N,236,236,1,4.5,0.5,0.5,0,0,0.3,5.8,
2,2018-11-28 15:52:25,2018-11-28 15:55:45,5,.00,1,N,193,193,2,3.5,0.5,0.5,0,0,0.3,7.55'''
@pytest.fixture
def config():
return {
'YEAR': '2018',
'MONTH': '01'
}
@pytest.fixture(autouse=True)
def patch_request_get(mocker):
mocker.patch.object(passenger_count.requests, 'get')
|
grantcooksey/pytest-demo
|
src/taxi_trips/passenger_count.py
|
import os
import io
import logging
import csv
import requests
logger = logging.getLogger(__name__)
YELLOW_TAXI_ENDPOINT = 'https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_{year}-{month}.csv'
REQUEST_TIMEOUT_SECONDS = 5
FILE_FORMAT = '{year}_{month}_count_passengers.csv'
RESULT_PATH = 'data/'
def start_job(config=os.environ):
logger.info('Starting count_passengers job')
taxi_report = pull_file(year=config['YEAR'], month=config['MONTH'])
number_of_passengers = count_people(taxi_report)
save_csv_file(year=config['YEAR'], month=config['MONTH'], results=number_of_passengers)
logger.info('Finished count_passengers job')
def pull_file(year, month):
url = YELLOW_TAXI_ENDPOINT.format(year=year, month=month)
logger.info(f'Started pulling file from {url}')
response = requests.get(url, allow_redirects=True, timeout=REQUEST_TIMEOUT_SECONDS)
response.raise_for_status()
logger.info(f'Finished pulling file from {url} with status code: {response.status_code}')
return io.StringIO(response.text)
def count_people(taxi_report):
logger.info('Starting read csv report')
csv_reader = csv.DictReader(taxi_report)
passenger_count = 0
for row in csv_reader:
try:
passenger_count += int(row['passenger_count'])
except KeyError:
logger.error('Missing passenger_count on line: {}'.format(csv_reader.line_num))
except ValueError:
logger.error('Failed to parse passenger_count on line: {}'.format(csv_reader.line_num))
logger.info('Finished reading csv')
return passenger_count
def save_csv_file(year, month, results):
file_key = FILE_FORMAT.format(year=year, month=month)
filename = '{path}{file_key}'.format(file_key=file_key, path=RESULT_PATH)
if not os.path.exists(os.path.dirname(RESULT_PATH)):
os.makedirs(os.path.dirname(RESULT_PATH), exist_ok=True)
with open(filename, 'w') as csv_file:
fieldnames = ['passenger_count']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'passenger_count': results})
logger.info('Saved passenger count report to {filename}'.format(filename=filename))
return filename
|
grantcooksey/pytest-demo
|
test/test_passenger_count_integration.py
|
from unittest import mock
from taxi_trips import passenger_count
import pytest
pytestmark = pytest.mark.integration
@pytest.fixture
def test_data_path(tmp_path, mocker):
mocker.patch.object(passenger_count, 'RESULT_PATH')
data_dir = tmp_path / 'data'
data_dir.mkdir()
passenger_count.RESULT_PATH = str(data_dir) + '/'
return data_dir
# data report is huge so we don't want to pull it in a test
# Lets test the load side, verify that given a report,
# it shows up how we expect in the filesystem
def test_integration(test_data_path, config, raw_ten_passenger_taxi_records):
mock_response = mock.MagicMock()
mock_response.text = raw_ten_passenger_taxi_records
passenger_count.requests.get.return_value = mock_response
passenger_count.start_job(config=config)
data_file = test_data_path / '2018_01_count_passengers.csv'
assert data_file.read_text() == 'passenger_count\n10\n'
|
grantcooksey/pytest-demo
|
src/taxi_trips/__init__.py
|
import logging
import os
import sys
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, LOG_LEVEL))
formatter = logging.Formatter('[%(levelname)s] %(name)s %(asctime)s - %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
|
grantcooksey/pytest-demo
|
setup.py
|
<reponame>grantcooksey/pytest-demo
from setuptools import setup, find_packages
setup(
name="sample etl job",
packages=find_packages('src/taxi_trips'),
package_dir={'': 'src'}
)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/diffmark/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Diffmark(AutotoolsPackage):
"""Diffmark is a DSL for transforming one string to another."""
homepage = "https://github.com/vbar/diffmark"
git = "https://github.com/vbar/diffmark.git"
version('master', branch='master')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('pkgconfig', type='build')
depends_on('libxml2')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-gridextra/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGridextra(RPackage):
"""Miscellaneous Functions for "Grid" Graphics.
Provides a number of user-level functions to work with "grid" graphics,
notably to arrange multiple grid-based plots on a page, and draw tables."""
cran = "gridExtras"
version('2.3', sha256='81b60ce6f237ec308555471ae0119158b115463df696d2eca9b177ded8988e3b')
version('2.2.1', sha256='44fe455a5bcdf48a4ece7a542f83e7749cf251dc1df6ae7634470240398c6818')
depends_on('r-gtable', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-cupy/package.py
|
<filename>var/spack/repos/builtin/packages/py-cupy/package.py<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCupy(PythonPackage):
"""CuPy is an open-source array library accelerated with
NVIDIA CUDA. CuPy provides GPU accelerated computing with
Python. CuPy uses CUDA-related libraries including cuBLAS,
cuDNN, cuRand, cuSolver, cuSPARSE, cuFFT and NCCL to make
full use of the GPU architecture."""
homepage = "https://cupy.dev/"
pypi = "cupy/cupy-8.0.0.tar.gz"
version('8.0.0', sha256='d1dcba5070dfa754445d010cdc952ff6b646d5f9bdcd7a63e8246e2472c3ddb8')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-fastrlock@0.3:', type=('build', 'run'))
depends_on('py-numpy@1.15:', type=('build', 'run'))
depends_on('cuda')
depends_on('nccl')
depends_on('cudnn')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-iocapture/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIocapture(PythonPackage):
"""Capture stdout, stderr easily."""
homepage = "https://github.com/oinume/iocapture"
pypi = "iocapture/iocapture-0.1.2.tar.gz"
maintainers = ['dorton21']
version('0.1.2', sha256='86670e1808bcdcd4f70112f43da72ae766f04cd8311d1071ce6e0e0a72e37ee8')
depends_on('python@2.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-stabledist/package.py
|
<filename>var/spack/repos/builtin/packages/r-stabledist/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RStabledist(RPackage):
"""Stable Distribution Functions.
Density, Probability and Quantile functions, and random number generation
for (skew) stable distributions, using the parametrizations of Nolan."""
cran = "stabledist"
version('0.7-1', sha256='06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69')
depends_on('r@3.1.0:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-e1071/package.py
|
<filename>var/spack/repos/builtin/packages/r-e1071/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RE1071(RPackage):
"""Misc Functions of the Department of Statistics, Probability Theory Group
(Formerly: E1071), TU Wien.
Functions for latent class analysis, short time Fourier transform, fuzzy
clustering, support vector machines, shortest path computation, bagged
clustering, naive Bayes classifier, generalized k-nearest neighbour ..."""
cran = "e1071"
version('1.7-9', sha256='9bf9a15e7ce0b9b1a57ce3048d29cbea7f2a5bb2e91271b1b6aaafe07c852226')
version('1.7-4', sha256='e6ab871b06f500dc65f8f781cc7253f43179698784c06dab040b4aa6592f2309')
version('1.7-2', sha256='721c299ce83047312acfa3e0c4b3d4c223d84a4c53400c73465cca2c92913752')
version('1.7-1', sha256='5c5f04a51c1cd2c7dbdf69987adef9bc07116804c63992cd36d804a1daf89dfe')
version('1.6-7', sha256='7048fbc0ac17d7e3420fe68081d0e0a2176b1154ee3191d53558ea9724c7c980')
depends_on('r-class', type=('build', 'run'))
depends_on('r-proxy', type=('build', 'run'), when='@1.7-9:')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/debbuild/package.py
|
<reponame>player1537-forks/spack<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Debbuild(AutotoolsPackage):
"""Build deb packages from rpm specifications."""
homepage = "https://github.com/debbuild/debbuild"
url = "https://github.com/debbuild/debbuild/archive/20.04.0.tar.gz"
version('20.04.0', sha256='e17c4f5b37e8c16592ebd99281884cabc053fb890af26531e9825417047d1430')
depends_on('gettext')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/henson/package.py
|
<reponame>player1537-forks/spack<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Henson(CMakePackage):
"""Cooperative multitasking for in situ processing."""
homepage = "https://github.com/henson-insitu/henson"
git = "https://github.com/henson-insitu/henson.git"
version('master', branch='master')
depends_on('mpi')
variant('python', default=False, description='Build Python bindings')
extends('python', when='+python')
variant('mpi-wrappers', default=False, description='Build MPI wrappers (PMPI)')
conflicts('^openmpi', when='+mpi-wrappers')
def cmake_args(self):
args = []
if '+python' in self.spec:
args += ['-Dpython=on']
else:
args += ['-Dpython=off']
if '+mpi-wrappers' in self.spec:
args += ['-Dmpi-wrappers=on']
else:
args += ['-Dmpi-wrappers=off']
return args
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-smmap/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySmmap(PythonPackage):
"""
A pure Python implementation of a sliding window memory map manager
"""
homepage = "https://github.com/gitpython-developers/smmap"
pypi = "smmap/smmap-3.0.4.tar.gz"
version('3.0.4', sha256='9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-watermelon/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RWatermelon(RPackage):
"""Illumina 450 methylation array normalization and metrics.
15 flavours of betas and three performance metrics, with methods for
objects produced by methylumi and minfi packages."""
bioc = "wateRmelon"
version('2.0.0', commit='<PASSWORD>')
version('1.34.0', commit='<PASSWORD>')
version('1.30.0', commit='<PASSWORD>')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r@3.5.0:', type=('build', 'run'), when='@2.0.0:')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-methylumi', type=('build', 'run'))
depends_on('r-lumi', type=('build', 'run'))
depends_on('r-roc', type=('build', 'run'))
depends_on('r-illuminahumanmethylation450kanno-ilmn12-hg19', type=('build', 'run'))
depends_on('r-illuminaio', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/opium/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Opium(AutotoolsPackage):
"""DFT pseudopotential generation project"""
homepage = "http://opium.sourceforge.net"
url = "https://downloads.sourceforge.net/project/opium/opium/opium-v3.8/opium-v3.8-src.tgz"
version('4.1', sha256='e5a102b52601ad037d8a7b3e2dbd295baad23b8c1e4908b9014df2e432c23c60')
version('3.8', sha256='edee6606519330aecaee436ee8cfb0a33788b5677861d59e38aba936e87d5ad3')
variant('external-lapack', default=False,
description='Links to externally installed LAPACK')
depends_on('lapack', when='+external-lapack')
parallel = False
def patch(self):
if '+external-lapack' in self.spec:
with working_dir('src'):
filter_file(r'(^subdirs=.*) lapack', r'\1', 'Makefile')
def configure_args(self):
options = []
if '+external-lapack' in self.spec:
options.append('LDFLAGS={0}'.format(self.spec['lapack'].libs.ld_flags))
return options
def install(self, spec, prefix):
# opium does not have a make install target :-((
mkdirp(self.prefix.bin)
install(join_path(self.stage.source_path, 'opium'),
self.prefix.bin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-lap/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyLap(PythonPackage):
"""lap is a linear assignment problem solver using
Jonker-Volgenant algorithm for dense (LAPJV) or sparse (LAPMOD)
matrices."""
homepage = "https://github.com/gatagat/lap"
pypi = "lap/lap-0.4.0.tar.gz"
version('0.4.0', sha256='c4dad9976f0e9f276d8a676a6d03632c3cb7ab7c80142e3b27303d49f0ed0e3b')
depends_on('py-setuptools', type='build')
depends_on('py-cython@0.21:', type='build')
depends_on('py-numpy@1.10.1:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/xf86miscproto/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xf86miscproto(AutotoolsPackage, XorgPackage):
"""This package includes the protocol definitions of the "XFree86-Misc"
extension to the X11 protocol. The "XFree86-Misc" extension is
supported by the XFree86 X server and versions of the Xorg X server
prior to Xorg 1.6."""
homepage = "https://cgit.freedesktop.org/xorg/proto/xf86miscproto"
xorg_mirror_path = "proto/xf86miscproto-0.9.3.tar.gz"
version('0.9.3', sha256='1b05cb76ac165c703b82bdd270b86ebbc4d42a7d04d299050b07ba2099c31352')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-oslo-i18n/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOsloI18n(PythonPackage):
"""
The oslo.i18n library contain utilities for working with
internationalization (i18n) features, especially translation for text
strings in an application or library.
"""
homepage = "https://docs.openstack.org/oslo.i18n"
pypi = "oslo.i18n/oslo.i18n-5.0.1.tar.gz"
maintainers = ['haampie']
version('5.0.1', sha256='3484b71e30f75c437523302d1151c291caf4098928269ceec65ce535456e035b')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pbr@2.0.0:2.0,2.1.1:', type=('build', 'run'))
depends_on('py-six@1.10.0:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/rsyslog/package.py
|
<filename>var/spack/repos/builtin/packages/rsyslog/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rsyslog(AutotoolsPackage):
"""The rocket-fast Syslog Server."""
homepage = "https://www.rsyslog.com/"
url = "https://github.com/rsyslog/rsyslog/archive/v8.2006.0.tar.gz"
version('8.2006.0', sha256='dc30a2ec02d5fac91d3a4f15a00641e0987941313483ced46592ab0b0d68f324')
version('8.2004.0', sha256='b56b985fec076a22160471d389b7ff271909dfd86513dad31e401a775a6dfdc2')
version('8.2002.0', sha256='b31d56311532335212ef2ea7be4501508224cb21f1bef9d262c6d78e21959ea1')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libestr')
depends_on('libfastjson')
depends_on('zlib')
depends_on('uuid')
depends_on('libgcrypt')
depends_on('curl')
depends_on('byacc', type='build')
depends_on('flex', type='build')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
def configure_args(self):
args = ["--with-systemdsystemunitdir=" +
self.spec['rsyslog'].prefix.lib.systemd.system]
return args
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-ampliqueso/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAmpliqueso(RPackage):
"""Analysis of amplicon enrichment panels.
The package provides tools and reports for the analysis of amplicon
sequencing panels, such as AmpliSeq"""
bioc = "ampliQueso"
version('1.21.0', commit='<PASSWORD>')
version('1.20.0', commit='<PASSWORD>')
version('1.18.0', commit='<PASSWORD>')
version('1.16.0', commit='<PASSWORD>43ff9dedef4f966f999c95cdf87185d<PASSWORD>')
version('1.14.0', commit='<PASSWORD>')
depends_on('r+X', type=('build', 'run'))
depends_on('r@2.15.0:', type=('build', 'run'))
depends_on('r-rnaseqmap@2.17.1:', type=('build', 'run'))
depends_on('r-knitr', type=('build', 'run'))
depends_on('r-rgl', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-variantannotation', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-statmod', type=('build', 'run'))
depends_on('r-xtable', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-deseq', type=('build', 'run'))
depends_on('r-samr', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-condop/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCondop(RPackage):
"""Condition-Dependent Operon Predictions.
An implementation of the computational strategy for the comprehensive
analysis of condition-dependent operon maps in prokaryotes proposed by
Fortino et al. (2014) <doi:10.1186/1471-2105-15-145>. It uses RNA-seq
transcriptome profiles to improve prokaryotic operon map inference."""
cran = "CONDOP"
version('1.0', sha256='3a855880f5c6b33f949c7e6de53c8e014b4d72b7024a93878b344d3e52b5296a')
depends_on('r-mclust', type=('build', 'run'))
depends_on('r-earth', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-seqinr', type=('build', 'run'))
depends_on('r-randomforest', type=('build', 'run'))
depends_on('r-rminer', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/prokka/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prokka(Package):
"""Prokka is a software tool to annotate bacterial, archaeal and viral
genomes quickly and produce standards-compliant output files."""
homepage = "https://github.com/tseemann/prokka"
url = "https://github.com/tseemann/prokka/archive/v1.14.5.tar.gz"
version('1.14.6', sha256='f730b5400ea9e507bfe6c5f3d22ce61960a897195c11571c2e1308ce2533faf8')
depends_on('perl', type='run')
depends_on('perl-bioperl', type='run')
depends_on('perl-xml-simple', type='run')
depends_on('perl-bio-searchio-hmmer', type='run')
depends_on('hmmer', type='run')
depends_on('blast-plus', type='run')
depends_on('prodigal', type='run')
depends_on('tbl2asn', type='run')
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('binaries', prefix.binaries)
install_tree('db', prefix.db)
install_tree('doc', prefix.doc)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/nco/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Nco(AutotoolsPackage):
"""The NCO toolkit manipulates and analyzes data stored in
netCDF-accessible formats"""
homepage = "http://nco.sourceforge.net/"
url = "https://github.com/nco/nco/archive/5.0.1.tar.gz"
version('5.0.1', sha256='37d11ffe582aa0ee89f77a7b9a176b41e41900e9ab709e780ec0caf52ad60c4b')
version('4.9.3', sha256='eade5b79f3814b11ae3f52c34159567e76a73f05f0ab141eccaac68f0ca94aee')
version('4.9.2', sha256='1a98c37c946c00232fa7319d00d1d80f77603adda7c9239d10d68a8a3545a4d5')
version('4.9.1', sha256='9592efaf0dfd6ccdefd0b417d990cfccae7e89c20d90fb44ead6263009778834')
version('4.9.0', sha256='21dd53f427793cbc52d1c007e9b7339c83f6944a937a1acfbbe733e49b65378b')
version('4.8.1', sha256='ddae3fed46c266798ed1176d6a70b36376d2d320fa933c716a623172d1e13c68')
version('4.8.0', sha256='91f95ebfc9baa888adaec3016ca18a6297e2881b1429d74543a27fdfbe15fcab')
version('4.7.9', sha256='048f6298bceb40913c3ae433f875dea1e9129b1c86019128e7271d08f274a879')
version('4.6.7', sha256='2fe2dabf14a60bface694307cbe719df57103682b715348e9d77bfe8d31487f3')
version('4.6.6', sha256='079d83f800b73d9b12b8de1634a88c2cbe40a639aaf7bc056cd2e836c6047697')
version('4.6.5', sha256='d5b18c9ada25d062a539e2995be445db39e8021c56cd4b20c88485cb2452c7ae')
version('4.6.4', sha256='1c2ab906fc81f91bf8aff3e6da27ae7a4c89821c5836d787188fff5262418062')
version('4.6.3', sha256='414ccb349ed25cb37b669fb87f9e2e4ca8d58c2f45538feda199bf895b982bf8')
version('4.6.2', sha256='cec82e35d47a6bbf8ab9301d5ff4cf08051f489b49e8529ebf780380f2c21ed3')
version('4.6.1', sha256='7433fe5901f48eb5170f24c6d53b484161e1c63884d9350600070573baf8b8b0')
version('4.5.5', sha256='bc6f5b976fdfbdec51f2ebefa158fa54672442c2fd5f042ba884f9f32c2ad666')
# https://github.com/nco/nco/issues/43
patch('NUL-0-NULL.patch', when='@:4.6.7')
variant('doc', default=False, description='Build/install NCO TexInfo-based documentation')
# See "Compilation Requirements" at:
# http://nco.sourceforge.net/#bld
depends_on('netcdf-c')
depends_on('antlr@2.7.7+cxx') # required for ncap2
depends_on('gsl') # desirable for ncap2
depends_on('udunits') # allows dimensional unit transformations
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('texinfo@4.12:', type='build', when='+doc')
conflicts('%gcc@9:', when='@:4.7.8')
def configure_args(self):
spec = self.spec
return ['--{0}-doc'.format('enable' if '+doc' in spec else 'disable')]
def setup_build_environment(self, env):
spec = self.spec
env.set('NETCDF_INC', spec['netcdf-c'].prefix.include)
env.set('NETCDF_LIB', spec['netcdf-c'].prefix.lib)
env.set('ANTLR_ROOT', spec['antlr'].prefix)
env.set('UDUNITS2_PATH', spec['udunits'].prefix)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/bioawk/package.py
|
<filename>var/spack/repos/builtin/packages/bioawk/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bioawk(MakefilePackage):
"""Bioawk is an extension to Brian Kernighan's awk, adding the support of
several common biological data formats, including optionally gzip'ed
BED, GFF, SAM, VCF, FASTA/Q and TAB-delimited formats with column names.
"""
homepage = "https://github.com/lh3/bioawk"
url = "https://github.com/lh3/bioawk/archive/v1.0.zip"
version('1.0', sha256='316a6561dda41e8327b85106db3704e94e23d7a89870392d19ef8559f7859e2d')
depends_on('zlib')
depends_on('bison', type=('build'))
parallel = False
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('bioawk', prefix.bin)
install('maketab', prefix.bin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-yapf/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyYapf(PythonPackage):
""" Yet Another Python Formatter """
homepage = "https://github.com/google/yapf"
# base https://pypi.python.org/pypi/cffi
url = "https://github.com/google/yapf/archive/v0.2.1.tar.gz"
version('0.30.0', sha256='9f561af26f8d27c3a334d3d2ee8947b8826a86691087e447ce483512d834682c')
version('0.29.0', sha256='f4bc9924de51d30da0241503d56e9e26a1a583bc58b3a13b2c450c4d16c9920d')
version('0.2.1', sha256='13158055acd8e3c2f3a577528051a1c5057237f699150211a86fb405c4ea3936')
depends_on('py-setuptools', type='build')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-matrixstats/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMatrixstats(RPackage):
"""Functions that Apply to Rows and Columns of Matrices (and to Vectors).
High-performing functions operating on rows and columns of matrices, e.g.
col / rowMedians(), col / rowRanks(), and col / rowSds(). Functions
optimized per data type and for subsetted calculations such that both
memory usage and processing time is minimized. There are also optimized
vector-based methods, e.g. binMeans(), madDiff() and weightedMedian()."""
cran = "matrixStats"
version('0.61.0', sha256='dbd3c0ec59b1ae62ff9b4c2c90c4687cbd680d1796f6fdd672319458d4d2fd9a')
version('0.58.0', sha256='8367b4b98cd24b6e40022cb2b11e907aa0bcf5ee5b2f89fefb186f53661f4b49')
version('0.57.0', sha256='f9681887cd3b121762c83f55f189cae26cb8443efce91fcd212ac714fde9f343')
version('0.55.0', sha256='16d6bd90eee4cee8df4c15687de0f9b72730c03e56603c2998007d4533e8db19')
version('0.54.0', sha256='8f0db4e181300a208b9aedbebfdf522a2626e6675d2662656efb8ba71b05a06f')
version('0.52.2', sha256='39da6aa6b109f89a141dab8913d981abc4fbd3f8be9e206f92e382cc5270d2a5')
depends_on('r@2.12.0:', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/perl-list-moreutils-xs/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlListMoreutilsXs(PerlPackage):
"""List::MoreUtils::XS is a backend for List::MoreUtils. Even if it's
possible (because of user wishes) to have it practically independent from
List::MoreUtils, it technically depend on List::MoreUtils. Since it's only
a backend, the API is not public and can change without any warning."""
homepage = "https://metacpan.org/pod/List::MoreUtils::XS"
url = "https://cpan.metacpan.org/authors/id/R/RE/REHSACK/List-MoreUtils-XS-0.428.tar.gz"
version('0.428', sha256='9d9fe621429dfe7cf2eb1299c192699ddebf060953e5ebdc1b4e293c6d6dd62d')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/p3dfft3/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class P3dfft3(AutotoolsPackage):
"""P3DFFT++ (a.k.a. P3DFFT v. 3) is a new generation of P3DFFT library
that aims to provide a comprehensive framework for simulating multiscale
phenomena. It takes the essence of P3DFFT further by creating an
extensible, modular structure uniquely adaptable to a greater range
of use cases."""
homepage = "https://www.p3dfft.net"
url = "https://github.com/sdsc/p3dfft.3/archive/v3.0.0.tar.gz"
git = "https://github.com/sdsc/p3dfft.3.git"
version('develop', branch='master')
version('3.0.0', sha256='1c549e78097d1545d18552b039be0d11cdb96be46efe99a16b65fd5d546dbfa7')
variant('fftw', default=True,
description='Builds with FFTW library')
variant('essl', default=False,
description='Builds with ESSL library')
variant('mpi', default=True,
description="Enable MPI support.")
variant('measure', default=False,
description="Define if you want to use"
"the measure fftw planner flag")
variant('estimate', default=False,
description="Define if you want to"
"use the estimate fftw planner flag")
variant('patient', default=False,
description="Define if you want to"
"use the patient fftw planner flag")
# TODO: Add more configure options!
depends_on('mpi', when='+mpi')
depends_on('fftw', when='+fftw')
depends_on('essl', when='+essl')
def configure_args(self):
args = []
if '%gcc' in self.spec:
args.append('--enable-gnu')
if '%intel' in self.spec:
args.append('--enable-intel')
if '%xl' in self.spec:
args.append('--enable-ibm')
if '%cce' in self.spec:
args.append('--enable-cray')
if '%pgi' in self.spec:
args.append('--enable-pgi')
if '+mpi' in self.spec:
args.append('CC=%s' % self.spec['mpi'].mpicc)
args.append('CXX=%s' % self.spec['mpi'].mpicxx)
args.append('FC=%s' % self.spec['mpi'].mpifc)
if '+openmpi' in self.spec:
args.append('--enable-openmpi')
if '+fftw' in self.spec:
args.append('--enable-fftw')
if '@:3.0.0' in self.spec:
args.append('--with-fftw-lib=%s' %
self.spec['fftw'].prefix.lib)
args.append('--with-fftw-inc=%s' %
self.spec['fftw'].prefix.include)
else:
args.append('--with-fftw=%s' % self.spec['fftw'].prefix)
if 'fftw+measure' in self.spec:
args.append('--enable-fftwmeasure')
if 'fftw+estimate' in self.spec:
args.append('--enable-fftwestimate')
if 'fftw+patient' in self.spec:
args.append('--enable-fftwpatient')
if '+essl' in self.spec:
args.append('--enable-essl')
args.append('--with-essl-lib=%s' %
self.spec['essl'].prefix.lib)
args.append('--with-essl-inc=%s' %
self.spec['essl'].prefix.include)
if '+mkl' in self.spec:
args.append('--enable-mkl')
args.append('--with-mkl-lib=%s' %
self.spec['mkl'].prefix.lib)
args.append('--with-mkl-inc=%s' %
self.spec['mkl'].prefix.include)
return args
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/mumax/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
from spack import *
class Mumax(MakefilePackage, CudaPackage):
"""GPU accelerated micromagnetic simulator."""
homepage = "https://mumax.github.io"
url = "https://github.com/mumax/3/archive/v3.10.tar.gz"
maintainers = ['glennpj']
version('3.10',
sha256='42c858661cec3896685ff4babea11e711f71fd6ea37d20c2bed7e4a918702caa',
preferred=True)
version('3.10beta',
sha256='f20fbd90a4b531fe5a0d8acc3d4505a092a5e426f5f53218a22a87d445daf0e9',
url='https://github.com/mumax/3/archive/3.10beta.tar.gz')
variant('cuda', default=True,
description='Use CUDA; must be true')
variant('gnuplot', default=False,
description='Use gnuplot for graphs')
depends_on('cuda')
depends_on('go@:1.15', type='build')
depends_on('gnuplot', type='run', when='+gnuplot')
conflicts('~cuda', msg='mumax requires cuda')
patch('https://github.com/mumax/3/commit/2cf5c9a6985c9eb16a124c6bd96aed75b4a30c24.patch',
sha256='a43b2ca6c9f9edfb1fd6d916a599f85a57c8bb3f9ee38148b1988fd82feec8ad',
when='@3.10beta')
@property
def cuda_arch(self):
cuda_arch = ' '.join(self.spec.variants['cuda_arch'].value)
if cuda_arch == 'none':
raise InstallError(
'Must select at least one value for cuda_arch'
)
return cuda_arch
@property
def gopath(self):
return self.stage.path
@property
def mumax_gopath_dir(self):
return join_path(self.gopath, 'src/github.com/mumax/3')
def do_stage(self, mirror_only=False):
super(Mumax, self).do_stage(mirror_only)
if not os.path.exists(self.mumax_gopath_dir):
# Need to move source to $GOPATH and then symlink the original
# stage directory
shutil.move(self.stage.source_path, self.mumax_gopath_dir)
force_symlink(self.mumax_gopath_dir, self.stage.source_path)
# filter out targets that do not exist
def edit(self, spec, prefix):
filter_file(r'(^all: cudakernels) hooks$', r'\1', 'Makefile')
@when('@3.10beta')
def edit(self, spec, prefix):
filter_file(r'(^ln -sf .*)', r'#\1', 'make.bash')
filter_file(r'(^\(cd test)', r'#\1', 'make.bash')
filter_file(r'(for cc in ).*(; do)', r'\1{0}\2'.format(self.cuda_arch),
'cuda/make.bash')
def setup_build_environment(self, env):
env.prepend_path('GOPATH', self.gopath)
env.set('CUDA_CC', self.cuda_arch)
def install(self, spec, prefix):
make()
with working_dir(self.gopath):
install_tree('bin', prefix.bin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/chrpath/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Chrpath(AutotoolsPackage):
"""chrpath: Modifies the dynamic library load path (rpath and runpath)
of compiled programs and libraries."""
homepage = "https://directory.fsf.org/wiki/Chrpath"
url = "https://cfhcable.dl.sourceforge.net/project/pisilinux/source/chrpath-0.16.tar.gz"
version('0.16', sha256='bb0d4c54bac2990e1bdf8132f2c9477ae752859d523e141e72b3b11a12c26e7b')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-h5glance/package.py
|
<filename>var/spack/repos/builtin/packages/py-h5glance/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyH5glance(PythonPackage):
"""H5Glance lets you explore HDF5 files in the terminal or
an HTML interface.
"""
homepage = "https://github.com/European-XFEL/h5glance"
pypi = "h5glance/h5glance-0.4.tar.gz"
version('0.6', sha256='203369ab614273aaad3419f151e234609bb8390b201b65f678d7e17c57633e35')
version('0.5', sha256='bc34ee42429f0440b329083e3f67fbf3d7016a4aed9e8b30911e5905217bc8d9')
version('0.4', sha256='03babaee0d481991062842796126bc9e6b11e2e6e7daba57c26f2b58bf3bbd32')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-flit', type='build')
depends_on('py-h5py', type=('build', 'run'))
depends_on('py-htmlgen', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-later/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RLater(RPackage):
"""Utilities for Scheduling Functions to Execute Later with Event Loops.
Executes arbitrary R or C functions some time after the current time, after
the R execution stack has emptied."""
cran = "later"
version('1.3.0', sha256='08f50882ca3cfd2bb68c83f1fcfbc8f696f5cfb5a42c1448c051540693789829')
version('1.1.0.1', sha256='71baa7beae774a35a117e01d7b95698511c3cdc5eea36e29732ff1fe8f1436cd')
version('0.8.0', sha256='6b2a28b43c619b2c7890840c62145cd3a34a7ed65b31207fdedde52efb00e521')
depends_on('r-rcpp@0.12.9:', type=('build', 'run'))
depends_on('r-rlang', type=('build', 'run'))
depends_on('r-bh', type=('build', 'run'), when='@:1.1.0.1')
|
player1537-forks/spack
|
lib/spack/spack/compilers/aocc.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
import sys
import llnl.util.lang
from spack.compiler import Compiler
from spack.version import ver
class Aocc(Compiler):
# Subclasses use possible names of C compiler
cc_names = ['clang']
# Subclasses use possible names of C++ compiler
cxx_names = ['clang++']
# Subclasses use possible names of Fortran 77 compiler
f77_names = ['flang']
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['flang']
PrgEnv = 'PrgEnv-aocc'
PrgEnv_compiler = 'aocc'
version_argument = '--version'
@property
def debug_flags(self):
return ['-gcodeview', '-gdwarf-2', '-gdwarf-3', '-gdwarf-4',
'-gdwarf-5', '-gline-tables-only', '-gmodules', '-gz', '-g']
@property
def opt_flags(self):
return ['-O0', '-O1', '-O2', '-O3', '-Ofast', '-Os', '-Oz', '-Og',
'-O', '-O4']
@property
def link_paths(self):
link_paths = {'cc': 'aocc/clang',
'cxx': 'aocc/clang++',
'f77': 'aocc/flang',
'fc': 'aocc/flang'}
return link_paths
@property
def verbose_flag(self):
return "-v"
@property
def openmp_flag(self):
return "-fopenmp"
@property
def cxx11_flag(self):
return "-std=c++11"
@property
def cxx14_flag(self):
return "-std=c++14"
@property
def cxx17_flag(self):
return "-std=c++17"
@property
def c99_flag(self):
return '-std=c99'
@property
def c11_flag(self):
return "-std=c11"
@property
def cc_pic_flag(self):
return "-fPIC"
@property
def cxx_pic_flag(self):
return "-fPIC"
@property
def f77_pic_flag(self):
return "-fPIC"
@property
def fc_pic_flag(self):
return "-fPIC"
required_libs = ['libclang']
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
match = re.search(
r'AOCC_(\d+)[._](\d+)[._](\d+)',
output
)
if match:
return '.'.join(match.groups())
@classmethod
def fc_version(cls, fortran_compiler):
if sys.platform == 'darwin':
return cls.default_version('clang')
return cls.default_version(fortran_compiler)
@classmethod
def f77_version(cls, f77):
return cls.fc_version(f77)
@property
def stdcxx_libs(self):
return ('-lstdc++', )
@property
def cflags(self):
return self._handle_default_flag_addtions()
@property
def cxxflags(self):
return self._handle_default_flag_addtions()
@property
def fflags(self):
return self._handle_default_flag_addtions()
def _handle_default_flag_addtions(self):
# This is a known issue for AOCC 3.0 see:
# https://developer.amd.com/wp-content/resources/AOCC-3.0-Install-Guide.pdf
if self.real_version == ver('3.0.0'):
return ("-Wno-unused-command-line-argument "
"-mllvm -eliminate-similar-expr=false")
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/ladot/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ladot(Package):
"""Ladot is a script that makes using LaTeX in graphs generated by dot
(graphviz) relatively easy."""
homepage = "https://brighten.bigw.org/projects/ladot/"
url = "https://brighten.bigw.org/projects/ladot/ladot-1.2.tar.gz"
version('1.2', sha256='f829eeca829b82c0315cd87bffe410bccab96309b86b1c883b3ddaa93170f25e')
depends_on('perl', type=('run', 'test'))
depends_on('graphviz', type=('run', 'test'))
depends_on('texlive', type='test')
def install(self, spec, prefix):
if self.run_tests:
with working_dir('example'):
make()
mkdir(prefix.bin)
install('ladot', prefix.bin)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/prometheus/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prometheus(MakefilePackage):
"""Prometheus, a Cloud Native Computing Foundation project, is a
systems and service monitoring system."""
homepage = "https://prometheus.io/"
url = "https://github.com/prometheus/prometheus/archive/v2.19.2.tar.gz"
version('2.19.2', sha256='d4e84cae2fed6761bb8a80fcc69b6e0e9f274d19dffc0f38fb5845f11da1bbc3')
version('2.19.1', sha256='b72b9b6bdbae246dcc29ef354d429425eb3c0a6e1596fc8b29b502578a4ce045')
version('2.18.2', sha256='a26c106c97d81506e3a20699145c11ea2cce936427a0e96eb2fd0dc7cd1945ba')
version('2.17.1', sha256='d0b53411ea0295c608634ca7ef1d43fa0f5559e7ad50705bf4d64d052e33ddaf')
version('2.17.0', sha256='b5e508f1c747aaf50dd90a48e5e2a3117fec2e9702d0b1c7f97408b87a073009')
depends_on('go', type='build')
depends_on('node-js@11.10.1:', type='build')
depends_on('yarn', type='build')
def build(self, spec, prefix):
make('build', parallel=False)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('prometheus', prefix.bin)
install('promtool', prefix.bin)
install('tsdb/tsdb', prefix.bin)
install_tree('documentation', prefix.documentation)
|
player1537-forks/spack
|
var/spack/repos/builtin.mock/packages/fetch-options/package.py
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class FetchOptions(Package):
"""Mock package with fetch_options."""
homepage = "http://www.fetch-options-example.com"
url = 'https://example.com/some/tarball-1.0.tar.gz'
fetch_options = {'timeout': 42, 'cookie': 'foobar'}
timeout = {'timeout': 65}
cookie = {'cookie': 'baz'}
version('1.2', '00000000000000000000000000000012', fetch_options=cookie)
version('1.1', '00000000000000000000000000000011', fetch_options=timeout)
version('1.0', '00000000000000000000000000000010')
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/filebench/package.py
|
<filename>var/spack/repos/builtin/packages/filebench/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Filebench(AutotoolsPackage):
"""
Filebench is a file system and storage benchmark that can generate a
large variety of workloads. Unlike typical benchmarks it is extremely
flexible and allows to specify application's I/O behavior using its
extensive Workload Model Language (WML). Users can either describe
desired workloads from scratch or use(with or without modifications)
workload personalities shipped with Filebench(e.g., mail-, web-, file-,
and database-server workloads). Filebench is equally good for micro
and macro-benchmarking, quick to setup, and relatively easy to use.
"""
homepage = "https://github.com/filebench/filebench"
url = "https://github.com/filebench/filebench/archive/1.4.9.1.tar.gz"
version('1.4.9.1', sha256='77ae91b83c828ded1219550aec74fbbd6975dce02cb5ab13c3b99ac2154e5c2e')
version('1.4.9', sha256='61b8a838c1450b51a4ce61481a19a1bf0d6e3993180c524ff4051f7c18bd9c6a')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('flex', type='build')
depends_on('bison', type='build')
|
player1537-forks/spack
|
lib/spack/llnl/util/compat.py
|
<filename>lib/spack/llnl/util/compat.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# isort: off
import sys
if sys.version_info < (3,):
from itertools import ifilter as filter
from itertools import imap as map
from itertools import izip as zip
from itertools import izip_longest as zip_longest # novm
from urllib import urlencode as urlencode
from urllib import urlopen as urlopen
else:
filter = filter
map = map
zip = zip
from itertools import zip_longest as zip_longest # novm # noqa: F401
from urllib.parse import urlencode as urlencode # novm # noqa: F401
from urllib.request import urlopen as urlopen # novm # noqa: F401
if sys.version_info >= (3, 3):
from collections.abc import Hashable as Hashable # novm
from collections.abc import Iterable as Iterable # novm
from collections.abc import Mapping as Mapping # novm
from collections.abc import MutableMapping as MutableMapping # novm
from collections.abc import MutableSequence as MutableSequence # novm
from collections.abc import MutableSet as MutableSet # novm
from collections.abc import Sequence as Sequence # novm
else:
from collections import Hashable as Hashable # noqa: F401
from collections import Iterable as Iterable # noqa: F401
from collections import Mapping as Mapping # noqa: F401
from collections import MutableMapping as MutableMapping # noqa: F401
from collections import MutableSequence as MutableSequence # noqa: F401
from collections import MutableSet as MutableSet # noqa: F401
from collections import Sequence as Sequence # noqa: F401
|
player1537-forks/spack
|
lib/spack/spack/test/test_activations.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This includes tests for customized activation logic for specific packages
(e.g. python and perl).
"""
import os
import pytest
from llnl.util.link_tree import MergeConflictError
import spack.package
import spack.spec
from spack.directory_layout import DirectoryLayout
from spack.filesystem_view import YamlFilesystemView
from spack.repo import RepoPath
def create_ext_pkg(name, prefix, extendee_spec, monkeypatch):
ext_spec = spack.spec.Spec(name)
ext_spec._concrete = True
ext_spec.package.spec.prefix = prefix
ext_pkg = ext_spec.package
# temporarily override extendee_spec property on the package
monkeypatch.setattr(ext_pkg.__class__, "extendee_spec", extendee_spec)
return ext_pkg
def create_python_ext_pkg(name, prefix, python_spec, monkeypatch,
namespace=None):
ext_pkg = create_ext_pkg(name, prefix, python_spec, monkeypatch)
ext_pkg.py_namespace = namespace
return ext_pkg
def create_dir_structure(tmpdir, dir_structure):
for fname, children in dir_structure.items():
tmpdir.ensure(fname, dir=fname.endswith('/'))
if children:
create_dir_structure(tmpdir.join(fname), children)
@pytest.fixture()
def builtin_and_mock_packages():
# These tests use mock_repo packages to test functionality of builtin
# packages for python and perl. To test this we put the mock repo at lower
# precedence than the builtin repo, so we test builtin.perl against
# builtin.mock.perl-extension.
repo_dirs = [spack.paths.packages_path, spack.paths.mock_packages_path]
path = RepoPath(*repo_dirs)
with spack.repo.use_repositories(path):
yield
@pytest.fixture()
def python_and_extension_dirs(tmpdir, builtin_and_mock_packages):
python_dirs = {
'bin/': {
'python': None
},
'lib/': {
'python2.7/': {
'site-packages/': None
}
}
}
python_name = 'python'
python_prefix = tmpdir.join(python_name)
create_dir_structure(python_prefix, python_dirs)
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = str(python_prefix)
ext_dirs = {
'bin/': {
'py-ext-tool': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'py-extension1/': {
'sample.py': None
}
}
}
}
}
ext_name = 'py-extension1'
ext_prefix = tmpdir.join(ext_name)
create_dir_structure(ext_prefix, ext_dirs)
easy_install_location = 'lib/python2.7/site-packages/easy-install.pth'
with open(str(ext_prefix.join(easy_install_location)), 'w') as f:
f.write("""path/to/ext1.egg
path/to/setuptools.egg""")
return str(python_prefix), str(ext_prefix)
@pytest.fixture()
def namespace_extensions(tmpdir, builtin_and_mock_packages):
ext1_dirs = {
'bin/': {
'py-ext-tool1': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'examplenamespace/': {
'__init__.py': None,
'ext1_sample.py': None
}
}
}
}
}
ext2_dirs = {
'bin/': {
'py-ext-tool2': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'examplenamespace/': {
'__init__.py': None,
'ext2_sample.py': None
}
}
}
}
}
ext1_name = 'py-extension1'
ext1_prefix = tmpdir.join(ext1_name)
create_dir_structure(ext1_prefix, ext1_dirs)
ext2_name = 'py-extension2'
ext2_prefix = tmpdir.join(ext2_name)
create_dir_structure(ext2_prefix, ext2_dirs)
return str(ext1_prefix), str(ext2_prefix), 'examplenamespace'
def test_python_activation_with_files(tmpdir, python_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
python_prefix, ext_prefix = python_and_extension_dirs
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = python_prefix
ext_pkg = create_python_ext_pkg(
'py-extension1', ext_prefix, python_spec, monkeypatch)
python_pkg = python_spec.package
python_pkg.activate(ext_pkg, python_pkg.view())
assert os.path.exists(os.path.join(python_prefix, 'bin/py-ext-tool'))
easy_install_location = 'lib/python2.7/site-packages/easy-install.pth'
with open(os.path.join(python_prefix, easy_install_location), 'r') as f:
easy_install_contents = f.read()
assert 'ext1.egg' in easy_install_contents
assert 'setuptools.egg' not in easy_install_contents
def test_python_activation_view(tmpdir, python_and_extension_dirs,
builtin_and_mock_packages, monkeypatch):
python_prefix, ext_prefix = python_and_extension_dirs
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = python_prefix
ext_pkg = create_python_ext_pkg('py-extension1', ext_prefix, python_spec,
monkeypatch)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext_pkg, view)
assert not os.path.exists(os.path.join(python_prefix, 'bin/py-ext-tool'))
assert os.path.exists(os.path.join(view_dir, 'bin/py-ext-tool'))
def test_python_ignore_namespace_init_conflict(
tmpdir, namespace_extensions, builtin_and_mock_packages, monkeypatch):
"""Test the view update logic in PythonPackage ignores conflicting
instances of __init__ for packages which are in the same namespace.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, py_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
# Normally handled by Package.do_activate, but here we activate directly
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
python_pkg.activate(ext2_pkg, view)
f1 = 'lib/python2.7/site-packages/examplenamespace/ext1_sample.py'
f2 = 'lib/python2.7/site-packages/examplenamespace/ext2_sample.py'
init_file = 'lib/python2.7/site-packages/examplenamespace/__init__.py'
assert os.path.exists(os.path.join(view_dir, f1))
assert os.path.exists(os.path.join(view_dir, f2))
assert os.path.exists(os.path.join(view_dir, init_file))
def test_python_keep_namespace_init(
tmpdir, namespace_extensions, builtin_and_mock_packages, monkeypatch):
"""Test the view update logic in PythonPackage keeps the namespace
__init__ file as long as one package in the namespace still
exists.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, py_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
# Normally handled by Package.do_activate, but here we activate directly
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
python_pkg.activate(ext2_pkg, view)
view.extensions_layout.add_extension(python_spec, ext2_pkg.spec)
f1 = 'lib/python2.7/site-packages/examplenamespace/ext1_sample.py'
init_file = 'lib/python2.7/site-packages/examplenamespace/__init__.py'
python_pkg.deactivate(ext1_pkg, view)
view.extensions_layout.remove_extension(python_spec, ext1_pkg.spec)
assert not os.path.exists(os.path.join(view_dir, f1))
assert os.path.exists(os.path.join(view_dir, init_file))
python_pkg.deactivate(ext2_pkg, view)
view.extensions_layout.remove_extension(python_spec, ext2_pkg.spec)
assert not os.path.exists(os.path.join(view_dir, init_file))
def test_python_namespace_conflict(tmpdir, namespace_extensions,
monkeypatch, builtin_and_mock_packages):
"""Test the view update logic in PythonPackage reports an error when two
python extensions with different namespaces have a conflicting __init__
file.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
other_namespace = py_namespace + 'other'
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, other_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
with pytest.raises(MergeConflictError):
python_pkg.activate(ext2_pkg, view)
@pytest.fixture()
def perl_and_extension_dirs(tmpdir, builtin_and_mock_packages):
perl_dirs = {
'bin/': {
'perl': None
},
'lib/': {
'site_perl/': {
'5.24.1/': {
'x86_64-linux/': None
}
}
}
}
perl_name = 'perl'
perl_prefix = tmpdir.join(perl_name)
create_dir_structure(perl_prefix, perl_dirs)
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = str(perl_prefix)
ext_dirs = {
'bin/': {
'perl-ext-tool': None
},
'lib/': {
'site_perl/': {
'5.24.1/': {
'x86_64-linux/': {
'TestExt/': {
}
}
}
}
}
}
ext_name = 'perl-extension'
ext_prefix = tmpdir.join(ext_name)
create_dir_structure(ext_prefix, ext_dirs)
return str(perl_prefix), str(ext_prefix)
def test_perl_activation(tmpdir, builtin_and_mock_packages, monkeypatch):
# Note the lib directory is based partly on the perl version
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_name = 'perl'
tmpdir.ensure(perl_name, dir=True)
perl_prefix = str(tmpdir.join(perl_name))
# Set the prefix on the package's spec reference because that is a copy of
# the original spec
perl_spec.package.spec.prefix = perl_prefix
ext_name = 'perl-extension'
tmpdir.ensure(ext_name, dir=True)
ext_pkg = create_ext_pkg(
ext_name, str(tmpdir.join(ext_name)), perl_spec, monkeypatch)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, perl_pkg.view())
def test_perl_activation_with_files(tmpdir, perl_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
perl_prefix, ext_prefix = perl_and_extension_dirs
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = perl_prefix
ext_pkg = create_ext_pkg(
'perl-extension', ext_prefix, perl_spec, monkeypatch)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, perl_pkg.view())
assert os.path.exists(os.path.join(perl_prefix, 'bin/perl-ext-tool'))
def test_perl_activation_view(tmpdir, perl_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
perl_prefix, ext_prefix = perl_and_extension_dirs
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = perl_prefix
ext_pkg = create_ext_pkg(
'perl-extension', ext_prefix, perl_spec, monkeypatch)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, view)
assert not os.path.exists(os.path.join(perl_prefix, 'bin/perl-ext-tool'))
assert os.path.exists(os.path.join(view_dir, 'bin/perl-ext-tool'))
def test_is_activated_upstream_extendee(tmpdir, builtin_and_mock_packages,
monkeypatch):
"""When an extendee is installed upstream, make sure that the extension
spec is never considered to be globally activated for it.
"""
extendee_spec = spack.spec.Spec('python')
extendee_spec._concrete = True
python_name = 'python'
tmpdir.ensure(python_name, dir=True)
python_prefix = str(tmpdir.join(python_name))
# Set the prefix on the package's spec reference because that is a copy of
# the original spec
extendee_spec.package.spec.prefix = python_prefix
monkeypatch.setattr(extendee_spec.package.__class__,
'installed_upstream', True)
ext_name = 'py-extension1'
tmpdir.ensure(ext_name, dir=True)
ext_pkg = create_ext_pkg(
ext_name, str(tmpdir.join(ext_name)), extendee_spec, monkeypatch)
# The view should not be checked at all if the extendee is installed
# upstream, so use 'None' here
mock_view = None
assert not ext_pkg.is_activated(mock_view)
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/jsoncpp/package.py
|
<filename>var/spack/repos/builtin/packages/jsoncpp/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Jsoncpp(CMakePackage):
"""JsonCpp is a C++ library that allows manipulating JSON values,
including serialization and deserialization to and from strings.
It can also preserve existing comment in unserialization/serialization
steps, making it a convenient format to store user input files."""
homepage = "https://github.com/open-source-parsers/jsoncpp"
url = "https://github.com/open-source-parsers/jsoncpp/archive/1.7.3.tar.gz"
version('1.9.4', sha256='e34a628a8142643b976c7233ef381457efad79468c67cb1ae0b83a33d7493999')
version('1.9.3', sha256='8593c1d69e703563d94d8c12244e2e18893eeb9a8a9f8aa3d09a327aa45c8f7d')
version('1.9.2', sha256='77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0')
version('1.9.1', sha256='c7b40f5605dd972108f503f031b20186f5e5bca2b65cd4b8bd6c3e4ba8126697')
version('1.9.0', sha256='bdd3ba9ed1f110b3eb57474d9094e90ab239b93b4803b4f9b1722c281e85a4ac')
version('1.8.4', sha256='c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6')
version('1.8.3', sha256='3671ba6051e0f30849942cc66d1798fdf0362d089343a83f704c09ee7156604f')
version('1.8.2', sha256='811f5aee20df2ef0868a73a976ec6f9aab61f4ca71c66eddf38094b2b3078eef')
version('1.8.1', sha256='858db2faf348f89fdf1062bd3e79256772e897e7f17df73e0624edf004f2f9ac')
version('1.8.0', sha256='5deb2462cbf0c0121c9d6c9823ec72fe71417e34242e3509bc7c003d526465bc')
version('1.7.7', sha256='087640ebcf7fbcfe8e2717a0b9528fff89c52fcf69fa2a18cc2b538008098f97')
version('1.7.6', sha256='07cf5d4f184394ec0a9aa657dd4c13ea682c52a1ab4da2fb176cb2d5501101e8')
version('1.7.5', sha256='4338c6cab8af8dee6cdfd54e6218bd0533785f552c6162bb083f8dd28bf8fbbe')
version('1.7.4', sha256='10dcd0677e80727e572a1e462193e51a5fde3e023b99e144b2ee1a469835f769')
version('1.7.3', sha256='1cfcad14054039ba97c22531888796cb9369e6353f257aacaad34fda956ada53')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo',
'MinSizeRel', 'Coverage'))
variant('cxxstd',
default='default',
values=('default', '98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
depends_on('cmake@3.1:', type='build')
depends_on('python', type='test')
# Ref: https://github.com/open-source-parsers/jsoncpp/pull/1023
# Released in 1.9.2, patch does not apply cleanly across releases.
# May apply to more compilers in the future.
@when('@:1.9.1 %clang@10.0.0:')
def patch(self):
filter_file(
'return d >= min && d <= max;',
'return d >= static_cast<double>(min) && '
'd <= static_cast<double>(max);',
'src/lib_json/json_value.cpp')
def cmake_args(self):
args = ['-DBUILD_SHARED_LIBS=ON']
cxxstd = self.spec.variants['cxxstd'].value
if cxxstd != 'default':
args.append('-DCMAKE_CXX_STANDARD={0}'.format(cxxstd))
if self.run_tests:
args.append('-DJSONCPP_WITH_TESTS=ON')
else:
args.append('-DJSONCPP_WITH_TESTS=OFF')
return args
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-dockerpy-creds/package.py
|
<reponame>player1537-forks/spack<filename>var/spack/repos/builtin/packages/py-dockerpy-creds/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDockerpyCreds(PythonPackage):
"""Python bindings for the docker credentials store API """
homepage = "https://github.com/shin-/dockerpy-creds"
url = "https://github.com/shin-/dockerpy-creds/archive/0.4.0.tar.gz"
version('0.4.0', sha256='c76c2863c6e9a31b8f70ee5b8b0e5ac6860bfd422d930c04a387599e4272b4b9')
version('0.3.0', sha256='3660a5e9fc7c2816ab967e4bdb4802f211e35011357ae612a601d6944721e153')
version('0.2.3', sha256='7278a7e3c904ccea4bcc777b991a39cac9d4702bfd7d76b95ff6179500d886c4')
version('0.2.2', sha256='bb26b8a8882b9d115a43169663cd9557d132a68147d9a1c77cb4a3ffc9897398')
version('0.2.1', sha256='7882efd95f44b5df166b4e34c054b486dc7287932a49cd491edf406763695351')
version('0.2.0', sha256='f2838348e1175079e3062bf0769b9fa5070c29f4d94435674b9f8a76144f4e5b')
version('0.1.0', sha256='f7ab290cb536e7ef1c774d4eb5df86237e579a9c7a87805da39ff07bd14e0aff')
depends_on('python@2.0:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/r-assertive-code/package.py
|
<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAssertiveCode(RPackage):
"""Assertions to Check Properties of Code.
A set of predicates and assertions for checking the properties of code.
This is mainly for use by other package developers who want to include
run-time testing features in their own packages. End-users will usually
want to use assertive directly."""
cran = "assertive.code"
version('0.0-3', sha256='ef80e8d1d683d776a7618e78ddccffca7f72ab4a0fcead90c670bb8f8cb90be2')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-assertive-base@0.0-2:', type=('build', 'run'))
depends_on('r-assertive-properties', type=('build', 'run'))
depends_on('r-assertive-types', type=('build', 'run'))
|
player1537-forks/spack
|
var/spack/repos/builtin/packages/py-or-tools/package.py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyOrTools(CMakePackage):
"""This project hosts operations research tools developed at
Google and made available as open source under the Apache 2.0
License."""
homepage = "https://developers.google.com/optimization/"
url = "https://github.com/google/or-tools/archive/v7.8.tar.gz"
version('7.8', sha256='d93a9502b18af51902abd130ff5f23768fcf47e266e6d1f34b3586387aa2de68')
depends_on('cmake@3.14:', type='build')
depends_on('py-pip', type='build')
depends_on('py-wheel', type='build')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-protobuf@3.12.2:', type=('build', 'run'))
depends_on('protobuf@3.12.2:')
depends_on('py-six@1.10:', type=('build', 'run'))
depends_on('gflags@2.2.2')
depends_on('glog@0.4.0')
depends_on('protobuf@3.12.2')
depends_on('abseil-cpp@20200225.2')
depends_on('cbc@2.10.5')
depends_on('cgl@0.60.3')
depends_on('clp@1.17.4')
depends_on('osi@0.108.6')
depends_on('coinutils@2.11.4')
depends_on('swig')
depends_on('python', type=('build', 'run'))
depends_on('py-wheel', type='build')
depends_on('py-virtualenv', type='build')
depends_on('scipoptsuite')
extends('python')
def cmake_args(self):
cmake_args = []
cmake_args.append('-DBUILD_DEPS=OFF')
cmake_args.append('-DBUILD_PYTHON=ON')
cmake_args.append('-DBUILD_TESTING=OFF')
return cmake_args
def install(self, spec, prefix):
with working_dir(self.build_directory):
make("install")
with working_dir(join_path(self.build_directory, 'python')):
args = std_pip_args + ['--prefix=' + prefix, '.']
pip(*args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.