repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
gevent | gevent-master/src/gevent/tests/test__core_callback.py | import gevent
from gevent.hub import get_hub
from gevent import testing as greentest
class Test(greentest.TestCase):
def test(self):
loop = get_hub().loop
called = []
def f():
called.append(1)
x = loop.run_callback(f)
assert x, x
gevent.sleep(0)
assert called == [1], called
assert not x, (x, bool(x))
x = loop.run_callback(f)
assert x, x
x.stop()
assert not x, x
gevent.sleep(0)
assert called == [1], called
assert not x, x
if __name__ == '__main__':
greentest.main()
| 618 | 17.757576 | 39 | py |
gevent | gevent-master/src/gevent/tests/_import_wait.py | # test__import_wait.py calls this via an import statement,
# so all of this is happening with import locks held (especially on py2)
import gevent
def fn2():
return 2
# A blocking function doesn't raise LoopExit
def fn():
return gevent.wait([gevent.spawn(fn2), gevent.spawn(fn2)])
gevent.spawn(fn).get()
# Marshalling the traceback across greenlets doesn't
# raise LoopExit
def raise_name_error():
raise NameError("ThisIsExpected")
try:
gevent.spawn(raise_name_error).get()
raise AssertionError("Should fail")
except NameError as e:
x = e
| 570 | 20.148148 | 72 | py |
gevent | gevent-master/src/gevent/tests/test__socket.py | from __future__ import print_function
from __future__ import absolute_import
from gevent import monkey
# This line can be commented out so that most tests run with the
# system socket for comparison.
monkey.patch_all()
import sys
import array
import socket
import time
import unittest
from functools import wraps
import gevent
from gevent._compat import reraise
import gevent.testing as greentest
from gevent.testing import six
from gevent.testing import LARGE_TIMEOUT
from gevent.testing import support
from gevent.testing import params
from gevent.testing.sockets import tcp_listener
from gevent.testing.skipping import skipWithoutExternalNetwork
from gevent.testing.skipping import skipOnMacOnCI
# we use threading on purpose so that we can test both regular and
# gevent sockets with the same code
from threading import Thread as _Thread
from threading import Event
errno_types = int
# socket.accept/unwrap/makefile aren't found for some reason
# pylint:disable=no-member
class BaseThread(object):
terminal_exc = None
def __init__(self, target):
@wraps(target)
def errors_are_fatal(*args, **kwargs):
try:
return target(*args, **kwargs)
except: # pylint:disable=bare-except
self.terminal_exc = sys.exc_info()
raise
self.target = errors_are_fatal
class GreenletThread(BaseThread):
def __init__(self, target=None, args=()):
BaseThread.__init__(self, target)
self.glet = gevent.spawn(self.target, *args)
def join(self, *args, **kwargs):
return self.glet.join(*args, **kwargs)
def is_alive(self):
return not self.glet.ready()
if not monkey.is_module_patched('threading'):
class ThreadThread(BaseThread, _Thread):
def __init__(self, **kwargs):
target = kwargs.pop('target')
BaseThread.__init__(self, target)
_Thread.__init__(self, target=self.target, **kwargs)
self.start()
Thread = ThreadThread
else:
Thread = GreenletThread
class TestTCP(greentest.TestCase):
__timeout__ = None
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
if not isinstance(long_data, bytes):
long_data = long_data.encode('ascii')
def setUp(self):
super(TestTCP, self).setUp()
if '-v' in sys.argv:
printed = []
try:
from time import perf_counter as now
except ImportError:
from time import time as now
def log(*args):
if not printed:
print()
printed.append(1)
print("\t -> %0.6f" % now(), *args)
orig_cot = self._close_on_teardown
def cot(o):
log("Registering for teardown", o)
def c(o=o):
log("Closing on teardown", o)
o.close()
o = None
orig_cot(c)
return o
self._close_on_teardown = cot
else:
def log(*_args):
"Does nothing"
self.log = log
self.listener = self._close_on_teardown(self._setup_listener())
# It is important to watch the lifetimes of socket objects and
# ensure that:
# (1) they are closed; and
# (2) *before* the next test begins.
#
# For example, it's a bad bad thing to leave a greenlet running past the
# scope of the individual test method if that greenlet will close
# a socket object --- especially if that socket object might also have been
# closed explicitly.
#
# On Windows, we've seen issue with filenos getting reused while something
# still thinks they have the original fileno around. When they later
# close that fileno, a completely unrelated object is closed.
self.port = self.listener.getsockname()[1]
def _setup_listener(self):
return tcp_listener()
def create_connection(self, host=None, port=None, timeout=None,
blocking=None):
sock = self._close_on_teardown(socket.socket())
sock.connect((host or params.DEFAULT_CONNECT, port or self.port))
if timeout is not None:
sock.settimeout(timeout)
if blocking is not None:
sock.setblocking(blocking)
return sock
def _test_sendall(self, data, match_data=None, client_method='sendall',
**client_args):
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
log = self.log
log("test_sendall using method", client_method)
read_data = []
accepted_event = Event()
def accept_and_read():
log("\taccepting", self.listener)
conn, _ = self.listener.accept()
try:
with conn.makefile(mode='rb') as r:
log("\taccepted on server; client conn is", conn, "file is", r)
accepted_event.set()
log("\treading")
read_data.append(r.read())
log("\tdone reading", r, "got bytes", len(read_data[0]))
del r
finally:
conn.close()
del conn
server = Thread(target=accept_and_read)
try:
log("creating client connection")
client = self.create_connection(**client_args)
# It's important to wait for the server to fully accept before
# we shutdown and close the socket. In SSL mode, the number
# and timing of data exchanges to complete the handshake and
# thus exactly when greenlet switches occur, varies by TLS version.
#
# It turns out that on < TLS1.3, we were getting lucky and the
# server was the greenlet that raced ahead and blocked in r.read()
# before the client returned from create_connection().
#
# But when TLS 1.3 was deployed (OpenSSL 1.1), the *client* was the
# one that raced ahead while the server had yet to return from
# self.listener.accept(). So the client sent the data to the socket,
# and closed, before the server could do anything, and the server,
# when it got switched to by server.join(), found its new socket
# dead.
accepted_event.wait()
log("Client got accepted event from server", client, "; sending data", len(data))
try:
x = getattr(client, client_method)(data)
log("Client sent data: result from method", x)
finally:
log("Client will unwrap and shutdown")
if hasattr(client, 'unwrap'):
# Are we dealing with an SSLSocket? If so, unwrap it
# before attempting to shut down the socket. This does the
# SSL shutdown handshake and (hopefully) stops ``accept_and_read``
# from generating ``ConnectionResetError`` on AppVeyor.
try:
client = client.unwrap()
except (ValueError, OSError):
# PyPy raises _cffi_ssl._stdssl.error.SSLSyscallError,
# which is an IOError in 2.7 and OSError in 3.7
pass
try:
# The implicit reference-based nastiness of Python 2
# sockets interferes, especially when using SSL sockets.
# The best way to get a decent FIN to the server is to shutdown
# the output. Doing that on Python 3, OTOH, is contraindicated
# except on PyPy, so this used to read ``PY2 or PYPY``. But
# it seems that a shutdown is generally good practice, and I didn't
# document what errors we saw without it. Per issue #1637
# lets do a shutdown everywhere, but only after removing any
# SSL wrapping.
client.shutdown(socket.SHUT_RDWR)
except OSError:
pass
log("Client will close")
client.close()
finally:
server.join(10)
assert not server.is_alive()
if server.terminal_exc:
reraise(*server.terminal_exc)
if match_data is None:
match_data = self.long_data
read_data = read_data[0].split(b',')
match_data = match_data.split(b',')
self.assertEqual(read_data[0], match_data[0])
self.assertEqual(len(read_data), len(match_data))
self.assertEqual(read_data, match_data)
def test_sendall_str(self):
self._test_sendall(self.long_data)
if six.PY2:
def test_sendall_unicode(self):
self._test_sendall(six.text_type(self.long_data))
@skipOnMacOnCI("Sometimes fails for no apparent reason (buffering?)")
def test_sendall_array(self):
data = array.array("B", self.long_data)
self._test_sendall(data)
def test_sendall_empty(self):
data = b''
self._test_sendall(data, data)
def test_sendall_empty_with_timeout(self):
# Issue 719
data = b''
self._test_sendall(data, data, timeout=10)
def test_sendall_nonblocking(self):
# https://github.com/benoitc/gunicorn/issues/1282
# Even if the socket is non-blocking, we make at least
# one attempt to send data. Under Py2 before this fix, we
# would incorrectly immediately raise a timeout error
data = b'hi\n'
self._test_sendall(data, data, blocking=False)
def test_empty_send(self):
# Issue 719
data = b''
self._test_sendall(data, data, client_method='send')
def test_fullduplex(self):
N = 100000
def server():
remote_client, _ = self.listener.accept()
self._close_on_teardown(remote_client)
# start reading, then, while reading, start writing. the reader should not hang forever
sender = Thread(target=remote_client.sendall,
args=((b't' * N),))
try:
result = remote_client.recv(1000)
self.assertEqual(result, b'hello world')
finally:
sender.join()
server_thread = Thread(target=server)
client = self.create_connection()
client_file = self._close_on_teardown(client.makefile())
client_reader = Thread(target=client_file.read, args=(N, ))
time.sleep(0.1)
client.sendall(b'hello world')
time.sleep(0.1)
# close() used to hang
client_file.close()
client.close()
# this tests "full duplex" bug;
server_thread.join()
client_reader.join()
def test_recv_timeout(self):
def accept():
# make sure the conn object stays alive until the end;
# premature closing triggers a ResourceWarning and
# EOF on the client.
conn, _ = self.listener.accept()
self._close_on_teardown(conn)
acceptor = Thread(target=accept)
client = self.create_connection()
try:
client.settimeout(1)
start = time.time()
with self.assertRaises(self.TIMEOUT_ERROR):
client.recv(1024)
took = time.time() - start
self.assertTimeWithinRange(took, 1 - 0.1, 1 + 0.1)
finally:
acceptor.join()
# Subclasses can disable this
_test_sendall_timeout_check_time = True
# Travis-CI container infrastructure is configured with
# large socket buffers, at least 2MB, as-of Jun 3, 2015,
# so we must be sure to send more data than that.
# In 2018, this needs to be increased *again* as a smaller value was
# still often being sent.
_test_sendall_data = b'hello' * 100000000
# This doesn't make much sense...why are we really skipping this?
@greentest.skipOnWindows("On Windows send() accepts whatever is thrown at it")
def test_sendall_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
time.sleep(0.1)
assert client_sock
client.settimeout(0.1)
start = time.time()
try:
with self.assertRaises(self.TIMEOUT_ERROR):
client.sendall(self._test_sendall_data)
if self._test_sendall_timeout_check_time:
took = time.time() - start
self.assertTimeWithinRange(took, 0.09, 0.2)
finally:
acceptor.join()
client.close()
client_sock[0][0].close()
def test_makefile(self):
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.flush()
fd.close()
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
# Closing the socket doesn't close the file
client_file = client.makefile(mode='rb')
client.close()
line = client_file.readline()
self.assertEqual(line, b'hello\n')
self.assertEqual(client_file.read(), b'')
client_file.close()
finally:
acceptor.join()
def test_makefile_timeout(self):
def accept_once():
conn, _ = self.listener.accept()
try:
time.sleep(0.3)
finally:
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
client.settimeout(0.1)
fd = client.makefile(mode='rb')
self.assertRaises(self.TIMEOUT_ERROR, fd.readline)
client.close()
fd.close()
finally:
acceptor.join()
def test_attributes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.assertIs(s.family, socket.AF_INET)
self.assertEqual(s.type, socket.SOCK_DGRAM)
self.assertEqual(0, s.proto)
if hasattr(socket, 'SOCK_NONBLOCK'):
s.settimeout(1)
self.assertIs(s.family, socket.AF_INET)
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
try:
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
finally:
std_socket.close()
s.close()
def test_connect_ex_nonblocking_bad_connection(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
ret = s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, support.find_unused_port()))
self.assertIsInstance(ret, errno_types)
finally:
s.close()
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_connect_ex_gaierror(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
with self.assertRaises(socket.gaierror):
s.connect_ex(('foo.bar.fizzbuzz', support.find_unused_port()))
finally:
s.close()
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_connect_ex_not_call_connect(self):
# Issue 1931
def do_it(sock):
try:
with self.assertRaises(socket.gaierror):
sock.connect_ex(('foo.bar.fizzbuzz', support.find_unused_port()))
finally:
sock.close()
# An instance attribute doesn't matter because we can't set it
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with self.assertRaises(AttributeError):
s.connect = None
s.close()
# A subclass
class S(socket.socket):
def connect(self, *args):
raise AssertionError('Should not be called')
s = S(socket.AF_INET, socket.SOCK_STREAM)
do_it(s)
def test_connect_ex_nonblocking_overflow(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
with self.assertRaises(OverflowError):
s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, 65539))
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'SOCK_CLOEXEC'),
"Requires SOCK_CLOEXEC")
def test_connect_with_type_flags_ignored(self):
# Issue 944
# If we have SOCK_CLOEXEC or similar, we shouldn't be passing
# them through to the getaddrinfo call that connect() makes
SOCK_CLOEXEC = socket.SOCK_CLOEXEC # pylint:disable=no-member
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM | SOCK_CLOEXEC)
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.close()
conn.close()
acceptor = Thread(target=accept_once)
try:
s.connect((params.DEFAULT_CONNECT, self.port))
fd = s.makefile(mode='rb')
self.assertEqual(fd.readline(), b'hello\n')
fd.close()
s.close()
finally:
acceptor.join()
class TestCreateConnection(greentest.TestCase):
__timeout__ = LARGE_TIMEOUT
def test_refuses(self, **conn_args):
connect_port = support.find_unused_port()
with self.assertRaisesRegex(
socket.error,
# We really expect "connection refused". It's unclear
# where/why we would get '[errno -2] name or service
# not known' but it seems some systems generate that.
# https://github.com/gevent/gevent/issues/1389 Somehow
# extremly rarely we've also seen 'address already in
# use', which makes even less sense. The manylinux
# 2010 environment produces 'errno 99 Cannot assign
# requested address', which, I guess?
# Meanwhile, the musllinux_1 environment produces
# '[Errno 99] Address not available'
'refused|not known|already in use|assign|not available'
):
socket.create_connection(
(greentest.DEFAULT_BIND_ADDR, connect_port),
timeout=30,
**conn_args
)
def test_refuses_from_port(self):
source_port = support.find_unused_port()
# Usually we don't want to bind/connect to '', but
# using it as the source is required if we don't want to hang,
# at least on some systems (OS X)
self.test_refuses(source_address=('', source_port))
@greentest.ignores_leakcheck
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_base_exception(self):
# such as a GreenletExit or a gevent.timeout.Timeout
class E(BaseException):
pass
class MockSocket(object):
created = ()
closed = False
def __init__(self, *_):
MockSocket.created += (self,)
def connect(self, _):
raise E(_)
def close(self):
self.closed = True
def mockgetaddrinfo(*_):
return [(1, 2, 3, 3, 5),]
import gevent.socket as gsocket
# Make sure we're monkey patched
self.assertEqual(gsocket.create_connection, socket.create_connection)
orig_socket = gsocket.socket
orig_getaddrinfo = gsocket.getaddrinfo
try:
gsocket.socket = MockSocket
gsocket.getaddrinfo = mockgetaddrinfo
with self.assertRaises(E):
socket.create_connection(('host', 'port'))
self.assertEqual(1, len(MockSocket.created))
self.assertTrue(MockSocket.created[0].closed)
finally:
MockSocket.created = ()
gsocket.socket = orig_socket
gsocket.getaddrinfo = orig_getaddrinfo
class TestFunctions(greentest.TestCase):
@greentest.ignores_leakcheck
# Creating new types in the function takes a cycle to cleanup.
def test_wait_timeout(self):
# Issue #635
from gevent import socket as gsocket
class io(object):
callback = None
def start(self, *_args):
gevent.sleep(10)
with self.assertRaises(gsocket.timeout):
gsocket.wait(io(), timeout=0.01) # pylint:disable=no-member
def test_signatures(self):
# https://github.com/gevent/gevent/issues/960
exclude = []
if greentest.PYPY:
# Up through at least PyPy 5.7.1, they define these as
# gethostbyname(host), whereas the official CPython argument name
# is hostname. But cpython doesn't allow calling with keyword args.
# Likewise for gethostbyaddr: PyPy uses host, cpython uses ip_address
exclude.append('gethostbyname')
exclude.append('gethostbyname_ex')
exclude.append('gethostbyaddr')
if sys.version_info[:2] < (3, 11):
# 3.11+ add ``*, all_errors=False``. We allow that on all versions,
# forcing it to a false value if the user sends a true value before
# exception groups exist.
exclude.append('create_connection')
self.assertMonkeyPatchedFuncSignatures('socket', exclude=exclude)
def test_resolve_ipv6_scope_id(self):
from gevent import _socketcommon as SC
if not SC.__socket__.has_ipv6:
self.skipTest("Needs IPv6") # pragma: no cover
if not hasattr(SC.__socket__, 'inet_pton'):
self.skipTest("Needs inet_pton") # pragma: no cover
# A valid IPv6 address, with a scope.
addr = ('2607:f8b0:4000:80e::200e', 80, 0, 9)
# Mock socket
class sock(object):
family = SC.AF_INET6 # pylint:disable=no-member
self.assertIs(addr, SC._resolve_addr(sock, addr))
class TestSocket(greentest.TestCase):
def test_shutdown_when_closed(self):
# https://github.com/gevent/gevent/issues/1089
# we once raised an AttributeError.
s = socket.socket()
s.close()
with self.assertRaises(socket.error):
s.shutdown(socket.SHUT_RDWR)
def test_can_be_weak_ref(self):
# stdlib socket can be weak reffed.
import weakref
s = socket.socket()
try:
w = weakref.ref(s)
self.assertIsNotNone(w)
finally:
s.close()
def test_has_no_dict(self):
# stdlib socket has no dict
s = socket.socket()
try:
with self.assertRaises(AttributeError):
getattr(s, '__dict__')
finally:
s.close()
if __name__ == '__main__':
greentest.main()
| 23,489 | 34.483384 | 102 | py |
gevent | gevent-master/src/gevent/tests/test__local.py | import gevent.testing as greentest
from copy import copy
# Comment the line below to see that the standard thread.local is working correct
from gevent import monkey; monkey.patch_all()
from threading import local
from threading import Thread
from zope import interface
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping # pylint:disable=deprecated-class
class ReadProperty(object):
"""A property that can be overridden"""
# A non-data descriptor
def __get__(self, inst, klass):
return 42 if inst is not None else self
class A(local):
__slots__ = ['initialized', 'obj']
path = ''
type_path = 'MyPath'
read_property = ReadProperty()
def __init__(self, obj):
super(A, self).__init__()
if not hasattr(self, 'initialized'):
self.obj = obj
self.path = ''
class Obj(object):
pass
# These next two classes have to be global to avoid the leakchecks
deleted_sentinels = []
created_sentinels = []
class Sentinel(object):
def __del__(self):
deleted_sentinels.append(id(self))
class MyLocal(local):
CLASS_PROP = 42
def __init__(self):
local.__init__(self)
self.sentinel = Sentinel()
created_sentinels.append(id(self.sentinel))
@property
def desc(self):
return self
class MyLocalSubclass(MyLocal):
pass
class WithGetattr(local):
def __getattr__(self, name):
if name == 'foo':
return 42
return super(WithGetattr, self).__getattr__(name) # pylint:disable=no-member
class LocalWithABC(local, Mapping):
def __getitem__(self, name):
return self.d[name]
def __iter__(self):
return iter(self.d)
def __len__(self):
return len(self.d)
class LocalWithStaticMethod(local):
@staticmethod
def a_staticmethod():
return 42
class LocalWithClassMethod(local):
@classmethod
def a_classmethod(cls):
return cls
class TestGeventLocal(greentest.TestCase):
# pylint:disable=attribute-defined-outside-init,blacklisted-name
def setUp(self):
del deleted_sentinels[:]
del created_sentinels[:]
tearDown = setUp
def test_create_local_subclass_init_args(self):
with self.assertRaisesRegex(TypeError,
"Initialization arguments are not supported"):
local("foo")
with self.assertRaisesRegex(TypeError,
"Initialization arguments are not supported"):
local(kw="foo")
def test_local_opts_not_subclassed(self):
l = local()
l.attr = 1
self.assertEqual(l.attr, 1)
def test_cannot_set_delete_dict(self):
l = local()
with self.assertRaises(AttributeError):
l.__dict__ = 1
with self.assertRaises(AttributeError):
del l.__dict__
def test_delete_with_no_dict(self):
l = local()
with self.assertRaises(AttributeError):
delattr(l, 'thing')
def del_local():
with self.assertRaises(AttributeError):
delattr(l, 'thing')
t = Thread(target=del_local)
t.start()
t.join()
def test_slot_and_type_attributes(self):
a = A(Obj())
a.initialized = 1
self.assertEqual(a.initialized, 1)
# The slot is shared
def demonstrate_slots_shared():
self.assertEqual(a.initialized, 1)
a.initialized = 2
greenlet = Thread(target=demonstrate_slots_shared)
greenlet.start()
greenlet.join()
self.assertEqual(a.initialized, 2)
# The slot overrides dict values
a.__dict__['initialized'] = 42 # pylint:disable=unsupported-assignment-operation
self.assertEqual(a.initialized, 2)
# Deleting the slot deletes the slot, but not the dict
del a.initialized
self.assertFalse(hasattr(a, 'initialized'))
self.assertIn('initialized', a.__dict__)
# We can delete the 'path' ivar
# and fall back to the type
del a.path
self.assertEqual(a.path, '')
with self.assertRaises(AttributeError):
del a.path
# A read property calls get
self.assertEqual(a.read_property, 42)
a.read_property = 1
self.assertEqual(a.read_property, 1)
self.assertIsInstance(A.read_property, ReadProperty)
# Type attributes can be read
self.assertEqual(a.type_path, 'MyPath')
self.assertNotIn('type_path', a.__dict__)
# and replaced in the dict
a.type_path = 'Local'
self.assertEqual(a.type_path, 'Local')
self.assertIn('type_path', a.__dict__)
def test_attribute_error(self):
# pylint:disable=attribute-defined-outside-init
a = A(Obj())
with self.assertRaises(AttributeError):
getattr(a, 'fizz_buzz')
def set_fizz_buzz():
a.fizz_buzz = 1
greenlet = Thread(target=set_fizz_buzz)
greenlet.start()
greenlet.join()
with self.assertRaises(AttributeError):
getattr(a, 'fizz_buzz')
def test_getattr_called(self):
getter = WithGetattr()
self.assertEqual(42, getter.foo)
getter.foo = 'baz'
self.assertEqual('baz', getter.foo)
def test_copy(self):
a = A(Obj())
a.path = '123'
a.obj.echo = 'test'
b = copy(a)
# Copy makes a shallow copy. Meaning that the attribute path
# has to be independent in the original and the copied object because the
# value is a string, but the attribute obj should be just reference to
# the instance of the class Obj
self.assertEqual(a.path, b.path, 'The values in the two objects must be equal')
self.assertEqual(a.obj, b.obj, 'The values must be equal')
b.path = '321'
self.assertNotEqual(a.path, b.path, 'The values in the two objects must be different')
a.obj.echo = "works"
self.assertEqual(a.obj, b.obj, 'The values must be equal')
def test_copy_no_subclass(self):
a = local()
setattr(a, 'thing', 42)
b = copy(a)
self.assertEqual(b.thing, 42)
self.assertIsNot(a.__dict__, b.__dict__)
def test_objects(self):
# Test which failed in the eventlet?!
a = A({})
a.path = '123'
b = A({'one': 2})
b.path = '123'
self.assertEqual(a.path, b.path, 'The values in the two objects must be equal')
b.path = '321'
self.assertNotEqual(a.path, b.path, 'The values in the two objects must be different')
def test_class_attr(self, kind=MyLocal):
mylocal = kind()
self.assertEqual(42, mylocal.CLASS_PROP)
mylocal.CLASS_PROP = 1
self.assertEqual(1, mylocal.CLASS_PROP)
self.assertEqual(mylocal.__dict__['CLASS_PROP'], 1) # pylint:disable=unsubscriptable-object
del mylocal.CLASS_PROP
self.assertEqual(42, mylocal.CLASS_PROP)
self.assertIs(mylocal, mylocal.desc)
def test_class_attr_subclass(self):
self.test_class_attr(kind=MyLocalSubclass)
def test_locals_collected_when_greenlet_dead_but_still_referenced(self):
# https://github.com/gevent/gevent/issues/387
import gevent
my_local = MyLocal()
my_local.sentinel = None
greentest.gc_collect_if_needed()
del created_sentinels[:]
del deleted_sentinels[:]
def demonstrate_my_local():
# Get the important parts
getattr(my_local, 'sentinel')
# Create and reference greenlets
greenlets = [Thread(target=demonstrate_my_local) for _ in range(5)]
for t in greenlets:
t.start()
gevent.sleep()
self.assertEqual(len(created_sentinels), len(greenlets))
for g in greenlets:
assert not g.is_alive()
gevent.sleep() # let the callbacks run
greentest.gc_collect_if_needed()
# The sentinels should be gone too
self.assertEqual(len(deleted_sentinels), len(greenlets))
@greentest.skipOnLibuvOnPyPyOnWin("GC makes this non-deterministic, especially on Windows")
def test_locals_collected_when_unreferenced_even_in_running_greenlet(self):
# In fact only on Windows do we see GC being an issue;
# pypy2 5.0 on macos and travis don't have a problem.
# https://github.com/gevent/gevent/issues/981
import gevent
import gc
gc.collect()
count = 1000
running_greenlet = None
def demonstrate_my_local():
for _ in range(1000):
x = MyLocal()
self.assertIsNotNone(x.sentinel)
x = None
gc.collect()
gc.collect()
self.assertEqual(count, len(created_sentinels))
# They're all dead, even though this greenlet is
# still running
self.assertEqual(count, len(deleted_sentinels))
# The links were removed as well.
self.assertFalse(running_greenlet.has_links())
running_greenlet = gevent.spawn(demonstrate_my_local)
gevent.sleep()
running_greenlet.join()
self.assertEqual(count, len(deleted_sentinels))
@greentest.ignores_leakcheck
def test_local_dicts_for_greenlet(self):
import gevent
from gevent.local import all_local_dicts_for_greenlet
class MyGreenlet(gevent.Greenlet):
results = None
id_x = None
def _run(self): # pylint:disable=method-hidden
x = local()
x.foo = 42
self.id_x = id(x)
self.results = all_local_dicts_for_greenlet(self)
g = MyGreenlet()
g.start()
g.join()
self.assertTrue(g.successful, g)
self.assertEqual(g.results,
[((local, g.id_x), {'foo': 42})])
def test_local_with_abc(self):
# an ABC (or generally any non-exact-type) in the MRO doesn't
# break things. See https://github.com/gevent/gevent/issues/1201
x = LocalWithABC()
x.d = {'a': 1}
self.assertEqual({'a': 1}, x.d)
# The ABC part works
self.assertIn('a', x.d)
self.assertEqual(['a'], list(x.keys()))
def test_local_with_staticmethod(self):
x = LocalWithStaticMethod()
self.assertEqual(42, x.a_staticmethod())
def test_local_with_classmethod(self):
x = LocalWithClassMethod()
self.assertIs(LocalWithClassMethod, x.a_classmethod())
class TestLocalInterface(greentest.TestCase):
__timeout__ = None
@greentest.ignores_leakcheck
def test_provides(self):
# https://github.com/gevent/gevent/issues/1122
# pylint:disable=inherit-non-class
class IFoo(interface.Interface):
pass
@interface.implementer(IFoo)
class Base(object):
pass
class Derived(Base, local):
pass
d = Derived()
p = list(interface.providedBy(d))
self.assertEqual([IFoo], p)
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase): # pragma: no cover
def test_c_extension(self):
self.assertEqual(local.__module__,
'gevent._gevent_clocal')
@greentest.skipWithCExtensions("Needs pure-python")
class TestPure(greentest.TestCase):
def test_extension(self):
self.assertEqual(local.__module__,
'gevent.local')
if __name__ == '__main__':
greentest.main()
| 11,741 | 26.56338 | 99 | py |
gevent | gevent-master/src/gevent/tests/test___ident.py | # -*- coding: utf-8 -*-
# copyright 2018 gevent contributors. See LICENSE for details.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import gevent.testing as greentest
from gevent._ident import IdentRegistry
from gevent._compat import PYPY
class Target(object):
pass
class TestIdent(greentest.TestCase):
def setUp(self):
self.reg = IdentRegistry()
def tearDown(self):
self.reg = None
def test_basic(self):
target = Target()
self.assertEqual(0, self.reg.get_ident(target))
self.assertEqual(1, len(self.reg))
self.assertEqual(0, self.reg.get_ident(target))
self.assertEqual(1, len(self.reg))
target2 = Target()
self.assertEqual(1, self.reg.get_ident(target2))
self.assertEqual(2, len(self.reg))
self.assertEqual(1, self.reg.get_ident(target2))
self.assertEqual(2, len(self.reg))
self.assertEqual(0, self.reg.get_ident(target))
# When an object dies, we can re-use
# its id. Under PyPy we need to collect garbage first.
del target
if PYPY:
for _ in range(3):
gc.collect()
self.assertEqual(1, len(self.reg))
target3 = Target()
self.assertEqual(1, self.reg.get_ident(target2))
self.assertEqual(0, self.reg.get_ident(target3))
self.assertEqual(2, len(self.reg))
@greentest.skipOnPyPy("This would need to GC very frequently")
def test_circle(self):
keep_count = 3
keepalive = [None] * keep_count
for i in range(1000):
target = Target()
# Drop an old one.
keepalive[i % keep_count] = target
self.assertLessEqual(self.reg.get_ident(target), keep_count)
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase):
def test_c_extension(self):
self.assertEqual(IdentRegistry.__module__,
'gevent._gevent_c_ident')
if __name__ == '__main__':
greentest.main()
| 2,109 | 25.049383 | 72 | py |
gevent | gevent-master/src/gevent/tests/test__issue1864.py | import sys
import unittest
from gevent import testing as greentest
class TestSubnormalFloatsAreNotDisabled(unittest.TestCase):
@greentest.skipOnCI('Some of our tests we compile with -Ofast, which breaks this.')
def test_subnormal_is_not_zero(self):
# Enabling the -Ofast compiler flag resulted in subnormal floats getting
# disabled the moment when gevent was imported. This impacted libraries
# that expect subnormal floats to be enabled.
#
# NOTE: This test is supposed to catch that. It doesn't seem to work perfectly, though.
# The test passes under Python 2 on macOS no matter whether -ffast-math is given or not;
# perhaps this is a difference in clang vs gcc? In contrast, the test on Python 2.7 always
# *fails* on GitHub actions (in both CPython 2.7 and PyPy). We're far past the EOL of
# Python 2.7 so I'm not going to spend much time investigating.
__import__('gevent')
# `sys.float_info.min` is the minimum representable positive normalized
# float, so dividing it by two gives us a positive subnormal float,
# as long as subnormals floats are not disabled.
self.assertGreater(sys.float_info.min / 2, 0.0)
if __name__ == "__main__":
unittest.main()
| 1,291 | 43.551724 | 98 | py |
gevent | gevent-master/src/gevent/tests/test__issue607.py | # A greenlet that's killed with an exception should fail.
import gevent.testing as greentest
import gevent
class ExpectedError(greentest.ExpectedException):
pass
def f():
gevent.sleep(999)
class TestKillWithException(greentest.TestCase):
def test_kill_without_exception(self):
g = gevent.spawn(f)
g.kill()
assert g.successful()
assert isinstance(g.get(), gevent.GreenletExit)
def test_kill_with_exception(self):
# issue-607 pointed this case.
g = gevent.spawn(f)
with gevent.get_hub().ignoring_expected_test_error():
# Hmm, this only needs the `with ignoring...` in
# PURE_PYTHON mode (or PyPy).
g.kill(ExpectedError)
self.assertFalse(g.successful())
self.assertRaises(ExpectedError, g.get)
self.assertIsNone(g.value)
self.assertIsInstance(g.exception, ExpectedError)
def test_kill_with_exception_after_started(self):
with gevent.get_hub().ignoring_expected_test_error():
g = gevent.spawn(f)
g.join(0)
g.kill(ExpectedError)
self.assertFalse(g.successful())
self.assertRaises(ExpectedError, g.get)
self.assertIsNone(g.value)
self.assertIsInstance(g.exception, ExpectedError)
if __name__ == '__main__':
greentest.main()
| 1,354 | 27.229167 | 61 | py |
gevent | gevent-master/src/gevent/tests/test__loop_callback.py | from gevent import get_hub
from gevent import testing as greentest
class Test(greentest.TestCase):
def test(self):
count = [0]
def incr():
count[0] += 1
loop = get_hub().loop
loop.run_callback(incr)
loop.run()
self.assertEqual(count, [1])
if __name__ == '__main__':
greentest.main()
| 356 | 17.789474 | 39 | py |
gevent | gevent-master/src/gevent/tests/test__ares_host_result.py | from __future__ import print_function
import pickle
import gevent.testing as greentest
try:
from gevent.resolver.cares import ares_host_result
except ImportError: # pragma: no cover
ares_host_result = None
@greentest.skipIf(ares_host_result is None,
"Must be able to import ares")
class TestPickle(greentest.TestCase):
# Issue 104: ares.ares_host_result unpickleable
def _test(self, protocol):
r = ares_host_result('family', ('arg1', 'arg2', ))
dumped = pickle.dumps(r, protocol)
loaded = pickle.loads(dumped)
self.assertEqual(r, loaded)
# pylint:disable=no-member
self.assertEqual(r.family, loaded.family)
for i in range(0, pickle.HIGHEST_PROTOCOL):
def make_test(j):
return lambda self: self._test(j)
setattr(TestPickle, 'test' + str(i), make_test(i))
if __name__ == '__main__':
greentest.main()
| 908 | 26.545455 | 58 | py |
gevent | gevent-master/src/gevent/tests/test__issue6.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
if not sys.argv[1:]:
from subprocess import Popen, PIPE
# not on Py2 pylint:disable=consider-using-with
p = Popen([sys.executable, __file__, 'subprocess'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(b'hello world\n')
code = p.poll()
assert p.poll() == 0, (out, err, code)
assert out.strip() == b'11 chars.', (out, err, code)
# XXX: This is seen sometimes to fail on Travis with the following value in err but a code of 0;
# it seems load related:
# 'Unhandled exception in thread started by \nsys.excepthook is missing\nlost sys.stderr\n'.
# If warnings are enabled, Python 3 has started producing this:
# '...importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__
# or __package__, falling back on __name__ and __path__\n return f(*args, **kwds)\n'
assert err == b'' or b'sys.excepthook' in err or b'Warning' in err, (out, err, code)
elif sys.argv[1:] == ['subprocess']: # pragma: no cover
import gevent
import gevent.monkey
gevent.monkey.patch_all(sys=True)
def printline():
try:
line = raw_input()
except NameError:
line = input() # pylint:disable=bad-builtin
print('%s chars.' % len(line))
sys.stdout.flush()
gevent.spawn(printline).join()
else: # pragma: no cover
sys.exit('Invalid arguments: %r' % (sys.argv, ))
| 1,530 | 37.275 | 100 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/__main__.py | from __future__ import print_function
# This file makes this directory into a runnable package.
# it exists to test 'python -m gevent.monkey monkey_package'
# Note that the __file__ may differ slightly; starting with
# Python 3.9, directly running it gets an abspath, but
# using ``runpy`` doesn't.
import os.path
print(os.path.abspath(__file__))
print(__name__)
| 363 | 35.4 | 60 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/script.py | # -*- coding: utf-8 -*-
"""
Test script file, to be used directly as a file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need some global imports
from textwrap import dedent
def use_import():
return dedent(" text")
if __name__ == '__main__':
import os.path
print(os.path.abspath(__file__))
print(__name__)
print(use_import())
| 427 | 19.380952 | 48 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/threadpool_monkey_patches.py | # -*- coding: utf-8 -*-
"""
This file runs ``gevent.monkey.patch_all()``.
It is intended to be used by ``python -m gevent.monkey <this file>``
to prove that monkey-patching twice doesn't have unfortunate sife effects (such as
breaking the threadpool).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from gevent import monkey
from gevent import get_hub
monkey.patch_all(thread=False, sys=True)
def thread_is_greenlet():
from gevent.thread import get_ident as gr_ident
std_thread_mod = 'thread' if bytes is str else '_thread'
thr_ident = monkey.get_original(std_thread_mod, 'get_ident')
return thr_ident() == gr_ident()
is_greenlet = get_hub().threadpool.apply(thread_is_greenlet)
print(is_greenlet)
print(len(sys._current_frames()))
sys.stdout.flush()
sys.stderr.flush()
| 869 | 27.064516 | 82 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/threadpool_no_monkey.py | # -*- coding: utf-8 -*-
"""
This file *does not* run ``gevent.monkey.patch_all()``.
It is intended to be used by ``python -m gevent.monkey <this file>``
to prove that the threadpool and getting the original value of things
works.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from gevent import monkey
from gevent import get_hub
from gevent.thread import get_ident as gr_ident
std_thread_mod = 'thread' if bytes is str else '_thread'
thr_ident = monkey.get_original(std_thread_mod, 'get_ident')
print(thr_ident is gr_ident)
def thread_is_greenlet():
return thr_ident() == gr_ident()
is_greenlet = get_hub().threadpool.apply(thread_is_greenlet)
print(is_greenlet)
print(len(sys._current_frames()))
| 787 | 24.419355 | 69 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/issue302monkey.py | from __future__ import print_function
import socket
import sys
import os.path
if sys.argv[1] == 'patched':
print('gevent' in repr(socket.socket))
else:
assert sys.argv[1] == 'stdlib'
print('gevent' not in repr(socket.socket))
print(os.path.abspath(__file__))
if sys.argv[1] == 'patched':
# __package__ is handled differently, for some reason, and
# runpy doesn't let us override it. When we call it, it
# becomes ''. This appears to be against the documentation for
# runpy, which says specifically "If the supplied path
# directly references a script file (whether as source or as
# precompiled byte code), then __file__ will be set to the
# supplied path, and __spec__, __cached__, __loader__ and
# __package__ will all be set to None."
print(__package__ == '') # pylint:disable=compare-to-empty-string
else:
# but the interpreter sets it to None
print(__package__ is None)
| 935 | 33.666667 | 69 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/issue1526_with_monkey.py | # -*- coding: utf-8 -*-
"""
Test for issue #1526:
- dnspython is imported first;
- monkey-patching happens early
"""
from __future__ import print_function, absolute_import
from gevent import monkey
monkey.patch_all()
# pylint:disable=import-error
import dns
assert dns
import socket
import sys
socket.getfqdn()
import gevent.resolver.dnspython
from gevent.resolver.dnspython import dns as gdns
from dns import rdtypes # NOT import dns.rdtypes
assert gevent.resolver.dnspython.dns is gdns
assert gdns is not dns, (gdns, dns, "id dns", id(dns))
assert gdns.rdtypes is not rdtypes, (gdns.rdtypes, rdtypes)
assert hasattr(dns, 'rdtypes')
print(sorted(sys.modules))
| 666 | 22 | 59 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/__init__.py | # -*- coding: utf-8 -*-
"""
Make a package.
This file has no other functionality. Individual modules in this package
are used for testing, often being run with 'python -m ...' in individual
test cases (functions).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 329 | 24.384615 | 72 | py |
gevent | gevent-master/src/gevent/tests/monkey_package/issue1526_no_monkey.py | # -*- coding: utf-8 -*-
"""
Test for issue #1526:
- dnspython is imported first;
- no monkey-patching is done.
"""
from __future__ import print_function
from __future__ import absolute_import
import dns # pylint:disable=import-error
assert dns
import gevent.socket as socket # pylint:disable=consider-using-from-import
socket.getfqdn() # create the resolver
from gevent.resolver.dnspython import dns as gdns
import dns.rdtypes # pylint:disable=import-error
assert dns is not gdns, (dns, gdns)
assert dns.rdtypes is not gdns.rdtypes
import sys
print(sorted(sys.modules))
| 573 | 25.090909 | 74 | py |
gevent | gevent-master/src/gevent/libev/watcher.py | # pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
# pylint: disable=no-member
from __future__ import absolute_import, print_function
import sys
from gevent.libev import _corecffi # pylint:disable=no-name-in-module,import-error
# Nothing public here
__all__ = []
ffi = _corecffi.ffi # pylint:disable=no-member
libev = _corecffi.lib # pylint:disable=no-member
if hasattr(libev, 'vfd_open'):
# Must be on windows
# pylint:disable=c-extension-no-member
assert sys.platform.startswith("win"), "vfd functions only needed on windows"
vfd_open = libev.vfd_open
vfd_free = libev.vfd_free
vfd_get = libev.vfd_get
else:
vfd_open = vfd_free = vfd_get = lambda fd: fd
#####
## NOTE on Windows:
# The C implementation does several things specially for Windows;
# a possibly incomplete list is:
#
# - the loop runs a periodic signal checker;
# - the io watcher constructor is different and it has a destructor;
# - the child watcher is not defined
#
# The CFFI implementation does none of these things, and so
# is possibly NOT FUNCTIONALLY CORRECT on Win32
#####
_NOARGS = ()
_events = [(libev.EV_READ, 'READ'),
(libev.EV_WRITE, 'WRITE'),
(libev.EV__IOFDSET, '_IOFDSET'),
(libev.EV_PERIODIC, 'PERIODIC'),
(libev.EV_SIGNAL, 'SIGNAL'),
(libev.EV_CHILD, 'CHILD'),
(libev.EV_STAT, 'STAT'),
(libev.EV_IDLE, 'IDLE'),
(libev.EV_PREPARE, 'PREPARE'),
(libev.EV_CHECK, 'CHECK'),
(libev.EV_EMBED, 'EMBED'),
(libev.EV_FORK, 'FORK'),
(libev.EV_CLEANUP, 'CLEANUP'),
(libev.EV_ASYNC, 'ASYNC'),
(libev.EV_CUSTOM, 'CUSTOM'),
(libev.EV_ERROR, 'ERROR')]
from gevent._ffi import watcher as _base
def _events_to_str(events):
return _base.events_to_str(events, _events)
class watcher(_base.watcher):
_FFI = ffi
_LIB = libev
_watcher_prefix = 'ev'
# Flags is a bitfield with the following meaning:
# 0000 -> default, referenced (when active)
# 0010 -> ev_unref has been called
# 0100 -> not referenced; independent of 0010
_flags = 0
def __init__(self, _loop, ref=True, priority=None, args=_base._NOARGS):
if ref:
self._flags = 0
else:
self._flags = 4
super(watcher, self).__init__(_loop, ref=ref, priority=priority, args=args)
def _watcher_ffi_set_priority(self, priority):
libev.ev_set_priority(self._watcher, priority)
def _watcher_ffi_init(self, args):
self._watcher_init(self._watcher,
self._watcher_callback,
*args)
def _watcher_ffi_start(self):
self._watcher_start(self.loop._ptr, self._watcher)
def _watcher_ffi_ref(self):
if self._flags & 2: # we've told libev we're not referenced
self.loop.ref()
self._flags &= ~2
def _watcher_ffi_unref(self):
if self._flags & 6 == 4:
# We're not referenced, but we haven't told libev that
self.loop.unref()
self._flags |= 2 # now we've told libev
def _get_ref(self):
return not self._flags & 4
def _set_ref(self, value):
if value:
if not self._flags & 4:
return # ref is already True
if self._flags & 2: # ev_unref was called, undo
self.loop.ref()
self._flags &= ~6 # do not want unref, no outstanding unref
else:
if self._flags & 4:
return # ref is already False
self._flags |= 4 # we're not referenced
if not self._flags & 2 and libev.ev_is_active(self._watcher):
# we haven't told libev we're not referenced, but it thinks we're
# active so we need to undo that
self.loop.unref()
self._flags |= 2 # libev knows we're not referenced
ref = property(_get_ref, _set_ref)
def _get_priority(self):
return libev.ev_priority(self._watcher)
@_base.not_while_active
def _set_priority(self, priority):
libev.ev_set_priority(self._watcher, priority)
priority = property(_get_priority, _set_priority)
def feed(self, revents, callback, *args):
self.callback = callback
self.args = args or _NOARGS
if self._flags & 6 == 4:
self.loop.unref()
self._flags |= 2
libev.ev_feed_event(self.loop._ptr, self._watcher, revents)
if not self._flags & 1:
# Py_INCREF(<PyObjectPtr>self)
self._flags |= 1
@property
def pending(self):
return bool(self._watcher and libev.ev_is_pending(self._watcher))
class io(_base.IoMixin, watcher):
EVENT_MASK = libev.EV__IOFDSET | libev.EV_READ | libev.EV_WRITE
def _get_fd(self):
return vfd_get(self._watcher.fd)
@_base.not_while_active
def _set_fd(self, fd):
vfd = vfd_open(fd)
vfd_free(self._watcher.fd)
self._watcher_init(self._watcher, self._watcher_callback, vfd, self._watcher.events)
fd = property(_get_fd, _set_fd)
def _get_events(self):
return self._watcher.events
@_base.not_while_active
def _set_events(self, events):
self._watcher_init(self._watcher, self._watcher_callback, self._watcher.fd, events)
events = property(_get_events, _set_events)
@property
def events_str(self):
return _events_to_str(self._watcher.events)
def _format(self):
return ' fd=%s events=%s' % (self.fd, self.events_str)
class timer(_base.TimerMixin, watcher):
@property
def at(self):
return self._watcher.at
def again(self, callback, *args, **kw):
# Exactly the same as start(), just with a different initializer
# function
self._watcher_start = libev.ev_timer_again
try:
self.start(callback, *args, **kw)
finally:
del self._watcher_start
class signal(_base.SignalMixin, watcher):
pass
class idle(_base.IdleMixin, watcher):
pass
class prepare(_base.PrepareMixin, watcher):
pass
class check(_base.CheckMixin, watcher):
pass
class fork(_base.ForkMixin, watcher):
pass
class async_(_base.AsyncMixin, watcher):
def send(self):
libev.ev_async_send(self.loop._ptr, self._watcher)
@property
def pending(self):
return self._watcher is not None and bool(libev.ev_async_pending(self._watcher))
# Provide BWC for those that have async
locals()['async'] = async_
class _ClosedWatcher(object):
__slots__ = ('pid', 'rpid', 'rstatus')
def __init__(self, other):
self.pid = other.pid
self.rpid = other.rpid
self.rstatus = other.rstatus
def __bool__(self):
return False
__nonzero__ = __bool__
class child(_base.ChildMixin, watcher):
_watcher_type = 'child'
def close(self):
# Capture the properties we defer to our _watcher, because
# we're about to discard it.
closed_watcher = _ClosedWatcher(self._watcher)
super(child, self).close()
self._watcher = closed_watcher
@property
def pid(self):
return self._watcher.pid
@property
def rpid(self):
return self._watcher.rpid
@rpid.setter
def rpid(self, value):
self._watcher.rpid = value
@property
def rstatus(self):
return self._watcher.rstatus
@rstatus.setter
def rstatus(self, value):
self._watcher.rstatus = value
class stat(_base.StatMixin, watcher):
_watcher_type = 'stat'
@property
def attr(self):
if not self._watcher.attr.st_nlink:
return
return self._watcher.attr
@property
def prev(self):
if not self._watcher.prev.st_nlink:
return
return self._watcher.prev
@property
def interval(self):
return self._watcher.interval
| 7,999 | 26.777778 | 92 | py |
gevent | gevent-master/src/gevent/libev/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Nothing public here
__all__ = []
| 169 | 20.25 | 38 | py |
gevent | gevent-master/src/gevent/libev/_corecffi_build.py | # pylint: disable=no-member
# This module is only used to create and compile the gevent._corecffi module;
# nothing should be directly imported from it except `ffi`, which should only be
# used for `ffi.compile()`; programs should import gevent._corecfffi.
# However, because we are using "out-of-line" mode, it is necessary to examine
# this file to know what functions are created and available on the generated
# module.
from __future__ import absolute_import, print_function
import sys
import os
import os.path # pylint:disable=no-name-in-module
from cffi import FFI
sys.path.append(".")
try:
import _setuplibev
import _setuputils
except ImportError:
print("This file must be imported with setup.py in the current working dir.")
raise
thisdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.abspath(os.path.join(thisdir, '..'))
setup_dir = os.path.abspath(os.path.join(thisdir, '..', '..', '..'))
__all__ = []
ffi = FFI()
distutils_ext = _setuplibev.build_extension()
def read_source(name):
# pylint:disable=unspecified-encoding
with open(os.path.join(thisdir, name), 'r') as f:
return f.read()
# cdef goes to the cffi library and determines what can be used in
# Python.
_cdef = read_source('_corecffi_cdef.c')
# These defines and uses help keep the C file readable and lintable by
# C tools.
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int',
'typedef int... nlink_t;')
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', 'nlink_t')
if _setuplibev.LIBEV_EMBED:
# Arrange access to the loop internals
_cdef += """
struct ev_loop {
int backend_fd;
int activecnt;
...;
};
"""
# arrange to be configured.
_setuputils.ConfiguringBuildExt.gevent_add_pre_run_action(distutils_ext.configure)
if sys.platform.startswith('win'):
# We must have the vfd_open, etc, functions on
# Windows. But on other platforms, going through
# CFFI to just return the file-descriptor is slower
# than just doing it in Python, so we check for and
# workaround their absence in corecffi.py
_cdef += """
typedef int... vfd_socket_t;
int vfd_open(vfd_socket_t);
vfd_socket_t vfd_get(int);
void vfd_free(int);
"""
# source goes to the C compiler
_source = read_source('_corecffi_source.c')
macros = list(distutils_ext.define_macros)
try:
# We need the data pointer.
macros.remove(('EV_COMMON', ''))
except ValueError:
pass
ffi.cdef(_cdef)
ffi.set_source(
'gevent.libev._corecffi',
_source,
include_dirs=distutils_ext.include_dirs + [
thisdir, # "libev.h"
parentdir, # _ffi/alloc.c
],
define_macros=macros,
undef_macros=distutils_ext.undef_macros,
libraries=distutils_ext.libraries,
)
if __name__ == '__main__':
# XXX: Note, on Windows, we would need to specify the external libraries
# that should be linked in, such as ws2_32 and (because libev_vfd.h makes
# Python.h calls) the proper Python library---at least for PyPy. I never got
# that to work though, and calling python functions is strongly discouraged
# from CFFI code.
# On macOS to make the non-embedded case work correctly, against
# our local copy of libev:
#
# 1) configure and make libev
# 2) CPPFLAGS=-Ideps/libev/ LDFLAGS=-Ldeps/libev/.libs GEVENTSETUP_EMBED_LIBEV=0 \
# python setup.py build_ext -i
# 3) export DYLD_LIBRARY_PATH=`pwd`/deps/libev/.libs
#
# The DYLD_LIBRARY_PATH is because the linker hard-codes
# /usr/local/lib/libev.4.dylib in the corecffi.so dylib, because
# that's the "install name" of the libev dylib that was built.
# Adding a -rpath to the LDFLAGS doesn't change things.
# This can be fixed with `install_name_tool`:
#
# 3) install_name_tool -change /usr/local/lib/libev.4.dylib \
# `pwd`/deps/libev/.libs/libev.4.dylib \
# src/gevent/libev/_corecffi.abi3.so
ffi.compile(verbose=True)
| 4,059 | 30.968504 | 86 | py |
gevent | gevent-master/src/gevent/libev/corecffi.py | # pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
# pylint: disable=no-member
from __future__ import absolute_import, print_function
import sys
# pylint: disable=undefined-all-variable
__all__ = [
'get_version',
'get_header_version',
'supported_backends',
'recommended_backends',
'embeddable_backends',
'time',
'loop',
]
from zope.interface import implementer
from gevent._interfaces import ILoop
from gevent.libev import _corecffi # pylint:disable=no-name-in-module,import-error
ffi = _corecffi.ffi # pylint:disable=no-member
libev = _corecffi.lib # pylint:disable=no-member
if hasattr(libev, 'vfd_open'):
# Must be on windows
# pylint:disable=c-extension-no-member
assert sys.platform.startswith("win"), "vfd functions only needed on windows"
vfd_open = libev.vfd_open
vfd_free = libev.vfd_free
vfd_get = libev.vfd_get
else:
vfd_open = vfd_free = vfd_get = lambda fd: fd
libev.gevent_set_ev_alloc()
#####
## NOTE on Windows:
# The C implementation does several things specially for Windows;
# a possibly incomplete list is:
#
# - the loop runs a periodic signal checker;
# - the io watcher constructor is different and it has a destructor;
# - the child watcher is not defined
#
# The CFFI implementation does none of these things, and so
# is possibly NOT FUNCTIONALLY CORRECT on Win32
#####
from gevent._ffi.loop import AbstractCallbacks
from gevent._ffi.loop import assign_standard_callbacks
class _Callbacks(AbstractCallbacks):
# pylint:disable=arguments-differ,arguments-renamed
def python_check_callback(self, *args):
# There's a pylint bug (pylint 2.9.3, astroid 2.6.2) that causes pylint to crash
# with an AttributeError on certain types of arguments-differ errors
# But code in _ffi/loop depends on being able to find the watcher_ptr
# argument is the local frame. BUT it gets invoked before the function body runs.
# Hence the override of _find_watcher_ptr_in_traceback.
# pylint:disable=unused-variable
_loop, watcher_ptr, _events = args
AbstractCallbacks.python_check_callback(self, watcher_ptr)
def _find_watcher_ptr_in_traceback(self, tb):
if tb is not None:
l = tb.tb_frame.f_locals
if 'watcher_ptr' in l:
return l['watcher_ptr']
if 'args' in l and len(l['args']) == 3:
return l['args'][1]
return AbstractCallbacks._find_watcher_ptr_in_traceback(self, tb)
def python_prepare_callback(self, _loop_ptr, watcher_ptr, _events):
AbstractCallbacks.python_prepare_callback(self, watcher_ptr)
def _find_loop_from_c_watcher(self, watcher_ptr):
loop_handle = ffi.cast('struct ev_watcher*', watcher_ptr).data
return self.from_handle(loop_handle)
_callbacks = assign_standard_callbacks(ffi, libev, _Callbacks)
UNDEF = libev.EV_UNDEF
NONE = libev.EV_NONE
READ = libev.EV_READ
WRITE = libev.EV_WRITE
TIMER = libev.EV_TIMER
PERIODIC = libev.EV_PERIODIC
SIGNAL = libev.EV_SIGNAL
CHILD = libev.EV_CHILD
STAT = libev.EV_STAT
IDLE = libev.EV_IDLE
PREPARE = libev.EV_PREPARE
CHECK = libev.EV_CHECK
EMBED = libev.EV_EMBED
FORK = libev.EV_FORK
CLEANUP = libev.EV_CLEANUP
ASYNC = libev.EV_ASYNC
CUSTOM = libev.EV_CUSTOM
ERROR = libev.EV_ERROR
READWRITE = libev.EV_READ | libev.EV_WRITE
MINPRI = libev.EV_MINPRI
MAXPRI = libev.EV_MAXPRI
BACKEND_PORT = libev.EVBACKEND_PORT
BACKEND_KQUEUE = libev.EVBACKEND_KQUEUE
BACKEND_EPOLL = libev.EVBACKEND_EPOLL
BACKEND_POLL = libev.EVBACKEND_POLL
BACKEND_SELECT = libev.EVBACKEND_SELECT
FORKCHECK = libev.EVFLAG_FORKCHECK
NOINOTIFY = libev.EVFLAG_NOINOTIFY
SIGNALFD = libev.EVFLAG_SIGNALFD
NOSIGMASK = libev.EVFLAG_NOSIGMASK
from gevent._ffi.loop import EVENTS
GEVENT_CORE_EVENTS = EVENTS
def get_version():
return 'libev-%d.%02d' % (libev.ev_version_major(), libev.ev_version_minor())
def get_header_version():
return 'libev-%d.%02d' % (libev.EV_VERSION_MAJOR, libev.EV_VERSION_MINOR)
# This list backends in the order they are actually tried by libev,
# as defined in loop_init. The names must be lower case.
_flags = [
# IOCP --- not supported/used.
(libev.EVBACKEND_PORT, 'port'),
(libev.EVBACKEND_KQUEUE, 'kqueue'),
(libev.EVBACKEND_IOURING, 'linux_iouring'),
(libev.EVBACKEND_LINUXAIO, "linux_aio"),
(libev.EVBACKEND_EPOLL, 'epoll'),
(libev.EVBACKEND_POLL, 'poll'),
(libev.EVBACKEND_SELECT, 'select'),
(libev.EVFLAG_NOENV, 'noenv'),
(libev.EVFLAG_FORKCHECK, 'forkcheck'),
(libev.EVFLAG_SIGNALFD, 'signalfd'),
(libev.EVFLAG_NOSIGMASK, 'nosigmask')
]
_flags_str2int = dict((string, flag) for (flag, string) in _flags)
def _flags_to_list(flags):
result = []
for code, value in _flags:
if flags & code:
result.append(value)
flags &= ~code
if not flags:
break
if flags:
result.append(flags)
return result
if sys.version_info[0] >= 3:
basestring = (bytes, str)
integer_types = (int,)
else:
import __builtin__ # pylint:disable=import-error
basestring = (__builtin__.basestring,)
integer_types = (int, __builtin__.long)
def _flags_to_int(flags):
# Note, that order does not matter, libev has its own predefined order
if not flags:
return 0
if isinstance(flags, integer_types):
return flags
result = 0
try:
if isinstance(flags, basestring):
flags = flags.split(',')
for value in flags:
value = value.strip().lower()
if value:
result |= _flags_str2int[value]
except KeyError as ex:
raise ValueError('Invalid backend or flag: %s\nPossible values: %s' % (ex, ', '.join(sorted(_flags_str2int.keys()))))
return result
def _str_hex(flag):
if isinstance(flag, integer_types):
return hex(flag)
return str(flag)
def _check_flags(flags):
as_list = []
flags &= libev.EVBACKEND_MASK
if not flags:
return
if not flags & libev.EVBACKEND_ALL:
raise ValueError('Invalid value for backend: 0x%x' % flags)
if not flags & libev.ev_supported_backends():
as_list = [_str_hex(x) for x in _flags_to_list(flags)]
raise ValueError('Unsupported backend: %s' % '|'.join(as_list))
def supported_backends():
return _flags_to_list(libev.ev_supported_backends())
def recommended_backends():
return _flags_to_list(libev.ev_recommended_backends())
def embeddable_backends():
return _flags_to_list(libev.ev_embeddable_backends())
def time():
return libev.ev_time()
from gevent._ffi.loop import AbstractLoop
from gevent.libev import watcher as _watchers
_events_to_str = _watchers._events_to_str # exported
@implementer(ILoop)
class loop(AbstractLoop):
# pylint:disable=too-many-public-methods
# libuv parameters simply won't accept anything lower than 1ms
# (0.001s), but libev takes fractional seconds. In practice, on
# one machine, libev can sleep for very small periods of time:
#
# sleep(0.00001) -> 0.000024
# sleep(0.0001) -> 0.000156
# sleep(0.001) -> 0.00136 (which is comparable to libuv)
approx_timer_resolution = 0.00001
error_handler = None
_CHECK_POINTER = 'struct ev_check *'
_PREPARE_POINTER = 'struct ev_prepare *'
_TIMER_POINTER = 'struct ev_timer *'
def __init__(self, flags=None, default=None):
AbstractLoop.__init__(self, ffi, libev, _watchers, flags, default)
self._default = bool(libev.ev_is_default_loop(self._ptr))
def _init_loop(self, flags, default):
c_flags = _flags_to_int(flags)
_check_flags(c_flags)
c_flags |= libev.EVFLAG_NOENV
c_flags |= libev.EVFLAG_FORKCHECK
if default is None:
default = True
if default:
ptr = libev.gevent_ev_default_loop(c_flags)
if not ptr:
raise SystemError("ev_default_loop(%s) failed" % (c_flags, ))
else:
ptr = libev.ev_loop_new(c_flags)
if not ptr:
raise SystemError("ev_loop_new(%s) failed" % (c_flags, ))
if default or SYSERR_CALLBACK is None:
set_syserr_cb(self._handle_syserr)
# Mark this loop as being used.
libev.ev_set_userdata(ptr, ptr)
return ptr
def _init_and_start_check(self):
libev.ev_check_init(self._check, libev.python_check_callback)
self._check.data = self._handle_to_self
libev.ev_check_start(self._ptr, self._check)
self.unref()
def _init_and_start_prepare(self):
libev.ev_prepare_init(self._prepare, libev.python_prepare_callback)
libev.ev_prepare_start(self._ptr, self._prepare)
self.unref()
def _init_callback_timer(self):
libev.ev_timer_init(self._timer0, libev.gevent_noop, 0.0, 0.0)
def _stop_callback_timer(self):
libev.ev_timer_stop(self._ptr, self._timer0)
def _start_callback_timer(self):
libev.ev_timer_start(self._ptr, self._timer0)
def _stop_aux_watchers(self):
super(loop, self)._stop_aux_watchers()
if libev.ev_is_active(self._prepare):
self.ref()
libev.ev_prepare_stop(self._ptr, self._prepare)
if libev.ev_is_active(self._check):
self.ref()
libev.ev_check_stop(self._ptr, self._check)
if libev.ev_is_active(self._timer0):
libev.ev_timer_stop(self._timer0)
def _setup_for_run_callback(self):
# XXX: libuv needs to start the callback timer to be sure
# that the loop wakes up and calls this. Our C version doesn't
# do this.
# self._start_callback_timer()
self.ref() # we should go through the loop now
def destroy(self):
if self._ptr:
super(loop, self).destroy()
# pylint:disable=comparison-with-callable
if globals()["SYSERR_CALLBACK"] == self._handle_syserr:
set_syserr_cb(None)
def _can_destroy_loop(self, ptr):
# Is it marked as destroyed?
return libev.ev_userdata(ptr)
def _destroy_loop(self, ptr):
# Mark as destroyed.
libev.ev_set_userdata(ptr, ffi.NULL)
libev.ev_loop_destroy(ptr)
libev.gevent_zero_prepare(self._prepare)
libev.gevent_zero_check(self._check)
libev.gevent_zero_timer(self._timer0)
del self._prepare
del self._check
del self._timer0
@property
def MAXPRI(self):
return libev.EV_MAXPRI
@property
def MINPRI(self):
return libev.EV_MINPRI
def _default_handle_error(self, context, type, value, tb): # pylint:disable=unused-argument
super(loop, self)._default_handle_error(context, type, value, tb)
libev.ev_break(self._ptr, libev.EVBREAK_ONE)
def run(self, nowait=False, once=False):
flags = 0
if nowait:
flags |= libev.EVRUN_NOWAIT
if once:
flags |= libev.EVRUN_ONCE
libev.ev_run(self._ptr, flags)
def reinit(self):
libev.ev_loop_fork(self._ptr)
def ref(self):
libev.ev_ref(self._ptr)
def unref(self):
libev.ev_unref(self._ptr)
def break_(self, how=libev.EVBREAK_ONE):
libev.ev_break(self._ptr, how)
def verify(self):
libev.ev_verify(self._ptr)
def now(self):
return libev.ev_now(self._ptr)
def update_now(self):
libev.ev_now_update(self._ptr)
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self._format())
@property
def iteration(self):
return libev.ev_iteration(self._ptr)
@property
def depth(self):
return libev.ev_depth(self._ptr)
@property
def backend_int(self):
return libev.ev_backend(self._ptr)
@property
def backend(self):
backend = libev.ev_backend(self._ptr)
for key, value in _flags:
if key == backend:
return value
return backend
@property
def pendingcnt(self):
return libev.ev_pending_count(self._ptr)
def closing_fd(self, fd):
pending_before = libev.ev_pending_count(self._ptr)
libev.ev_feed_fd_event(self._ptr, fd, 0xFFFF)
pending_after = libev.ev_pending_count(self._ptr)
return pending_after > pending_before
if sys.platform != "win32":
def install_sigchld(self):
libev.gevent_install_sigchld_handler()
def reset_sigchld(self):
libev.gevent_reset_sigchld_handler()
def fileno(self):
if self._ptr and LIBEV_EMBED:
# If we don't embed, we can't access these fields,
# the type is opaque
fd = self._ptr.backend_fd
if fd >= 0:
return fd
@property
def activecnt(self):
if not self._ptr:
raise ValueError('operation on destroyed loop')
if LIBEV_EMBED:
return self._ptr.activecnt
return -1
@ffi.def_extern()
def _syserr_cb(msg):
try:
msg = ffi.string(msg)
SYSERR_CALLBACK(msg, ffi.errno)
except:
set_syserr_cb(None)
raise # let cffi print the traceback
def set_syserr_cb(callback):
global SYSERR_CALLBACK
if callback is None:
libev.ev_set_syserr_cb(ffi.NULL)
SYSERR_CALLBACK = None
elif callable(callback):
libev.ev_set_syserr_cb(libev._syserr_cb)
SYSERR_CALLBACK = callback
else:
raise TypeError('Expected callable or None, got %r' % (callback, ))
SYSERR_CALLBACK = None
LIBEV_EMBED = libev.LIBEV_EMBED
| 13,720 | 28.131635 | 125 | py |
gevent | gevent-master/src/gevent/resolver/_hostsfile.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019 gevent contributors. See LICENSE for details.
#
# Portions of this code taken from dnspython
# https://github.com/rthalley/dnspython
#
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Private support for parsing /etc/hosts.
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import re
from gevent.resolver._addresses import is_ipv4_addr
from gevent.resolver._addresses import is_ipv6_addr
from gevent._compat import iteritems
class HostsFile(object):
"""
A class to read the contents of a hosts file (/etc/hosts).
"""
LINES_RE = re.compile(r"""
\s* # Leading space
([^\r\n#]+?) # The actual match, non-greedy so as not to include trailing space
\s* # Trailing space
(?:[#][^\r\n]+)? # Comments
(?:$|[\r\n]+) # EOF or newline
""", re.VERBOSE)
def __init__(self, fname=None):
self.v4 = {} # name -> ipv4
self.v6 = {} # name -> ipv6
self.aliases = {} # name -> canonical_name
self.reverse = {} # ip addr -> some name
if fname is None:
if os.name == 'posix':
fname = '/etc/hosts'
elif os.name == 'nt': # pragma: no cover
fname = os.path.expandvars(
r'%SystemRoot%\system32\drivers\etc\hosts')
self.fname = fname
assert self.fname
self._last_load = 0
def _readlines(self):
# Read the contents of the hosts file.
#
# Return list of lines, comment lines and empty lines are
# excluded. Note that this performs disk I/O so can be
# blocking.
with open(self.fname, 'rb') as fp:
fdata = fp.read()
# XXX: Using default decoding. Is that correct?
udata = fdata.decode(errors='ignore') if not isinstance(fdata, str) else fdata
return self.LINES_RE.findall(udata)
def load(self): # pylint:disable=too-many-locals
# Load hosts file
# This will (re)load the data from the hosts
# file if it has changed.
try:
load_time = os.stat(self.fname).st_mtime
needs_load = load_time > self._last_load
except OSError:
from gevent import get_hub
get_hub().handle_error(self, *sys.exc_info())
needs_load = False
if not needs_load:
return
v4 = {}
v6 = {}
aliases = {}
reverse = {}
for line in self._readlines():
parts = line.split()
if len(parts) < 2:
continue
ip = parts.pop(0)
if is_ipv4_addr(ip):
ipmap = v4
elif is_ipv6_addr(ip):
if ip.startswith('fe80'):
# Do not use link-local addresses, OSX stores these here
continue
ipmap = v6
else:
continue
cname = parts.pop(0).lower()
ipmap[cname] = ip
for alias in parts:
alias = alias.lower()
ipmap[alias] = ip
aliases[alias] = cname
# XXX: This is wrong for ipv6
if ipmap is v4:
ptr = '.'.join(reversed(ip.split('.'))) + '.in-addr.arpa'
else:
ptr = ip + '.ip6.arpa.'
if ptr not in reverse:
reverse[ptr] = cname
self._last_load = load_time
self.v4 = v4
self.v6 = v6
self.aliases = aliases
self.reverse = reverse
def iter_all_host_addr_pairs(self):
self.load()
for name, addr in iteritems(self.v4):
yield name, addr
for name, addr in iteritems(self.v6):
yield name, addr
| 4,629 | 30.712329 | 88 | py |
gevent | gevent-master/src/gevent/resolver/_addresses.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019 gevent contributors. See LICENSE for details.
#
# Portions of this code taken from dnspython
# https://github.com/rthalley/dnspython
#
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Private support for parsing textual addresses.
"""
from __future__ import absolute_import, division, print_function
import binascii
import re
import struct
from gevent.resolver import hostname_types
class AddressSyntaxError(ValueError):
pass
def _ipv4_inet_aton(text):
"""
Convert an IPv4 address in text form to binary struct.
*text*, a ``text``, the IPv4 address in textual form.
Returns a ``binary``.
"""
if not isinstance(text, bytes):
text = text.encode()
parts = text.split(b'.')
if len(parts) != 4:
raise AddressSyntaxError(text)
for part in parts:
if not part.isdigit():
raise AddressSyntaxError
if len(part) > 1 and part[0] == '0':
# No leading zeros
raise AddressSyntaxError(text)
try:
ints = [int(part) for part in parts]
return struct.pack('BBBB', *ints)
except:
raise AddressSyntaxError(text)
def _ipv6_inet_aton(text,
_v4_ending=re.compile(br'(.*):(\d+\.\d+\.\d+\.\d+)$'),
_colon_colon_start=re.compile(br'::.*'),
_colon_colon_end=re.compile(br'.*::$')):
"""
Convert an IPv6 address in text form to binary form.
*text*, a ``text``, the IPv6 address in textual form.
Returns a ``binary``.
"""
# pylint:disable=too-many-branches
#
# Our aim here is not something fast; we just want something that works.
#
if not isinstance(text, bytes):
text = text.encode()
if text == b'::':
text = b'0::'
#
# Get rid of the icky dot-quad syntax if we have it.
#
m = _v4_ending.match(text)
if not m is None:
b = bytearray(_ipv4_inet_aton(m.group(2)))
text = (u"{}:{:02x}{:02x}:{:02x}{:02x}".format(m.group(1).decode(),
b[0], b[1], b[2],
b[3])).encode()
#
# Try to turn '::<whatever>' into ':<whatever>'; if no match try to
# turn '<whatever>::' into '<whatever>:'
#
m = _colon_colon_start.match(text)
if not m is None:
text = text[1:]
else:
m = _colon_colon_end.match(text)
if not m is None:
text = text[:-1]
#
# Now canonicalize into 8 chunks of 4 hex digits each
#
chunks = text.split(b':')
l = len(chunks)
if l > 8:
raise SyntaxError
seen_empty = False
canonical = []
for c in chunks:
if c == b'':
if seen_empty:
raise AddressSyntaxError(text)
seen_empty = True
for _ in range(0, 8 - l + 1):
canonical.append(b'0000')
else:
lc = len(c)
if lc > 4:
raise AddressSyntaxError(text)
if lc != 4:
c = (b'0' * (4 - lc)) + c
canonical.append(c)
if l < 8 and not seen_empty:
raise AddressSyntaxError(text)
text = b''.join(canonical)
#
# Finally we can go to binary.
#
try:
return binascii.unhexlify(text)
except (binascii.Error, TypeError):
raise AddressSyntaxError(text)
def _is_addr(host, parse=_ipv4_inet_aton):
if not host or not isinstance(host, hostname_types):
return False
try:
parse(host)
except AddressSyntaxError:
return False
return True
# Return True if host is a valid IPv4 address
is_ipv4_addr = _is_addr
def is_ipv6_addr(host):
# Return True if host is a valid IPv6 address
if host and isinstance(host, hostname_types):
s = '%' if isinstance(host, str) else b'%'
host = host.split(s, 1)[0]
return _is_addr(host, _ipv6_inet_aton)
| 4,795 | 28.243902 | 76 | py |
gevent | gevent-master/src/gevent/resolver/thread.py | # Copyright (c) 2012-2015 Denis Bilenko. See LICENSE for details.
"""
Native thread-based hostname resolver.
"""
import _socket
from gevent.hub import get_hub
__all__ = ['Resolver']
class Resolver(object):
"""
Implementation of the resolver API using native threads and native resolution
functions.
Using the native resolution mechanisms ensures the highest
compatibility with what a non-gevent program would return
including good support for platform specific configuration
mechanisms. The use of native (non-greenlet) threads ensures that
a caller doesn't block other greenlets.
This implementation also has the benefit of being very simple in comparison to
:class:`gevent.resolver_ares.Resolver`.
.. tip::
Most users find this resolver to be quite reliable in a
properly monkey-patched environment. However, there have been
some reports of long delays, slow performance or even hangs,
particularly in long-lived programs that make many, many DNS
requests. If you suspect that may be happening to you, try the
dnspython or ares resolver (and submit a bug report).
"""
def __init__(self, hub=None):
if hub is None:
hub = get_hub()
self.pool = hub.threadpool
if _socket.gaierror not in hub.NOT_ERROR:
# Do not cause lookup failures to get printed by the default
# error handler. This can be very noisy.
hub.NOT_ERROR += (_socket.gaierror, _socket.herror)
def __repr__(self):
return '<%s.%s at 0x%x pool=%r>' % (type(self).__module__,
type(self).__name__,
id(self), self.pool)
def close(self):
pass
# from briefly reading socketmodule.c, it seems that all of the functions
# below are thread-safe in Python, even if they are not thread-safe in C.
def gethostbyname(self, *args):
return self.pool.apply(_socket.gethostbyname, args)
def gethostbyname_ex(self, *args):
return self.pool.apply(_socket.gethostbyname_ex, args)
def getaddrinfo(self, *args, **kwargs):
return self.pool.apply(_socket.getaddrinfo, args, kwargs)
def gethostbyaddr(self, *args, **kwargs):
return self.pool.apply(_socket.gethostbyaddr, args, kwargs)
def getnameinfo(self, *args, **kwargs):
return self.pool.apply(_socket.getnameinfo, args, kwargs)
| 2,487 | 34.542857 | 82 | py |
gevent | gevent-master/src/gevent/resolver/ares.py | # Copyright (c) 2011-2015 Denis Bilenko. See LICENSE for details.
"""
c-ares based hostname resolver.
"""
from __future__ import absolute_import, print_function, division
import os
import warnings
from _socket import gaierror
from _socket import herror
from _socket import error
from _socket import EAI_NONAME
from gevent._compat import text_type
from gevent._compat import integer_types
from gevent.hub import Waiter
from gevent.hub import get_hub
from gevent.socket import AF_UNSPEC
from gevent.socket import AF_INET
from gevent.socket import AF_INET6
from gevent.socket import SOCK_DGRAM
from gevent.socket import SOCK_STREAM
from gevent.socket import SOL_TCP
from gevent.socket import SOL_UDP
from gevent._config import config
from gevent._config import AresSettingMixin
from .cares import channel, InvalidIP # pylint:disable=import-error,no-name-in-module
from . import _lookup_port as lookup_port
from . import AbstractResolver
__all__ = ['Resolver']
class Resolver(AbstractResolver):
"""
Implementation of the resolver API using the `c-ares`_ library.
This implementation uses the c-ares library to handle name
resolution. c-ares is natively asynchronous at the socket level
and so integrates well into gevent's event loop.
In comparison to :class:`gevent.resolver_thread.Resolver` (which
delegates to the native system resolver), the implementation is
much more complex. In addition, there have been reports of it not
properly honoring certain system configurations (for example, the
order in which IPv4 and IPv6 results are returned may not match
the threaded resolver). However, because it does not use threads,
it may scale better for applications that make many lookups.
There are some known differences from the system resolver.
- ``gethostbyname_ex`` and ``gethostbyaddr`` may return
different for the ``aliaslist`` tuple member. (Sometimes the
same, sometimes in a different order, sometimes a different
alias altogether.)
- ``gethostbyname_ex`` may return the ``ipaddrlist`` in a
different order.
- ``getaddrinfo`` does not return ``SOCK_RAW`` results.
- ``getaddrinfo`` may return results in a different order.
- Handling of ``.local`` (mDNS) names may be different, even
if they are listed in the hosts file.
- c-ares will not resolve ``broadcasthost``, even if listed in
the hosts file prior to 2020-04-30.
- This implementation may raise ``gaierror(4)`` where the
system implementation would raise ``herror(1)`` or vice versa,
with different error numbers. However, after 2020-04-30, this should be
much reduced.
- The results for ``localhost`` may be different. In
particular, some system resolvers will return more results
from ``getaddrinfo`` than c-ares does, such as SOCK_DGRAM
results, and c-ares may report more ips on a multi-homed
host.
- The system implementation may return some names fully qualified, where
this implementation returns only the host name. This appears to be
the case only with entries found in ``/etc/hosts``.
- c-ares supports a limited set of flags for ``getnameinfo`` and
``getaddrinfo``; unknown flags are ignored. System-specific flags
such as ``AI_V4MAPPED_CFG`` are not supported.
- ``getaddrinfo`` may return canonical names even without the ``AI_CANONNAME``
being set.
.. caution::
This module is considered extremely experimental on PyPy, and
due to its implementation in cython, it may be slower. It may also lead to
interpreter crashes.
.. versionchanged:: 1.5.0
This version of gevent typically embeds c-ares 1.15.0 or newer. In
that version of c-ares, domains ending in ``.onion`` `are never
resolved <https://github.com/c-ares/c-ares/issues/196>`_ or even
sent to the DNS server.
.. versionchanged:: 20.5.0
``getaddrinfo`` is now implemented using the native c-ares function
from c-ares 1.16 or newer.
.. versionchanged:: 20.5.0
Now ``herror`` and ``gaierror`` are raised more consistently with
the standard library resolver, and have more consistent errno values.
Handling of localhost and broadcast names is now more consistent.
.. versionchanged:: 22.10.1
Now has a ``__del__`` method that warns if the object is destroyed
without being properly closed.
.. _c-ares: http://c-ares.haxx.se
"""
cares_class = channel
def __init__(self, hub=None, use_environ=True, **kwargs):
AbstractResolver.__init__(self)
if hub is None:
hub = get_hub()
self.hub = hub
if use_environ:
for setting in config.settings.values():
if isinstance(setting, AresSettingMixin):
value = setting.get()
if value is not None:
kwargs.setdefault(setting.kwarg_name, value)
self.cares = self.cares_class(hub.loop, **kwargs)
self.pid = os.getpid()
self.params = kwargs
self.fork_watcher = hub.loop.fork(ref=False) # We shouldn't keep the loop alive
self.fork_watcher.start(self._on_fork)
def __repr__(self):
return '<gevent.resolver_ares.Resolver at 0x%x ares=%r>' % (id(self), self.cares)
def _on_fork(self):
# NOTE: See comment in gevent.hub.reinit.
pid = os.getpid()
if pid != self.pid:
self.hub.loop.run_callback(self.cares.destroy)
self.cares = self.cares_class(self.hub.loop, **self.params)
self.pid = pid
def close(self):
AbstractResolver.close(self)
if self.cares is not None:
self.hub.loop.run_callback(self.cares.destroy)
self.cares = None
self.fork_watcher.stop()
def __del__(self):
if self.cares is not None:
warnings.warn("cares Resolver destroyed while not closed",
ResourceWarning)
self.close()
def _gethostbyname_ex(self, hostname_bytes, family):
while True:
ares = self.cares
try:
waiter = Waiter(self.hub)
ares.gethostbyname(waiter, hostname_bytes, family)
result = waiter.get()
if not result[-1]:
raise herror(EAI_NONAME, self.EAI_NONAME_MSG)
return result
except herror as ex:
if ares is self.cares:
if ex.args[0] == 1:
# Somewhere along the line, the internal
# implementation of gethostbyname_ex changed to invoke
# getaddrinfo() as a first pass, much like we do for ``getnameinfo()``;
# this means it raises a different error for not-found hosts.
raise gaierror(EAI_NONAME, self.EAI_NONAME_MSG)
raise
# "self.cares is not ares" means channel was destroyed (because we were forked)
def _lookup_port(self, port, socktype):
return lookup_port(port, socktype)
def __getaddrinfo(
self, host, port,
family=0, socktype=0, proto=0, flags=0,
fill_in_type_proto=True
):
"""
Returns a list ``(family, socktype, proto, canonname, sockaddr)``
:raises gaierror: If no results are found.
"""
# pylint:disable=too-many-locals,too-many-branches
if isinstance(host, text_type):
host = host.encode('idna')
if isinstance(port, text_type):
port = port.encode('ascii')
elif isinstance(port, integer_types):
if port == 0:
port = None
else:
port = str(port).encode('ascii')
waiter = Waiter(self.hub)
self.cares.getaddrinfo(
waiter,
host,
port,
family,
socktype,
proto,
flags,
)
# Result is a list of:
# (family, socktype, proto, canonname, sockaddr)
# Where sockaddr depends on family; for INET it is
# (address, port)
# and INET6 is
# (address, port, flow info, scope id)
result = waiter.get()
if not result:
raise gaierror(EAI_NONAME, self.EAI_NONAME_MSG)
if fill_in_type_proto:
# c-ares 1.16 DOES NOT fill in socktype or proto in the results,
# ever. It's at least supposed to do that if they were given as
# hints, but it doesn't (https://github.com/c-ares/c-ares/issues/317)
# Sigh.
# The SOL_* constants are another (older?) name for IPPROTO_*
if socktype:
hard_type_proto = [
(socktype, SOL_TCP if socktype == SOCK_STREAM else SOL_UDP),
]
elif proto:
hard_type_proto = [
(SOCK_STREAM if proto == SOL_TCP else SOCK_DGRAM, proto),
]
else:
hard_type_proto = [
(SOCK_STREAM, SOL_TCP),
(SOCK_DGRAM, SOL_UDP),
]
# pylint:disable=not-an-iterable,unsubscriptable-object
result = [
(rfamily,
hard_type if not rtype else rtype,
hard_proto if not rproto else rproto,
rcanon,
raddr)
for rfamily, rtype, rproto, rcanon, raddr
in result
for hard_type, hard_proto
in hard_type_proto
]
return result
def _getaddrinfo(self, host_bytes, port, family, socktype, proto, flags):
while True:
ares = self.cares
try:
return self.__getaddrinfo(host_bytes, port, family, socktype, proto, flags)
except gaierror:
if ares is self.cares:
raise
def __gethostbyaddr(self, ip_address):
waiter = Waiter(self.hub)
try:
self.cares.gethostbyaddr(waiter, ip_address)
return waiter.get()
except InvalidIP:
result = self._getaddrinfo(ip_address, None,
family=AF_UNSPEC, socktype=SOCK_DGRAM,
proto=0, flags=0)
if not result:
raise
# pylint:disable=unsubscriptable-object
_ip_address = result[0][-1][0]
if isinstance(_ip_address, text_type):
_ip_address = _ip_address.encode('ascii')
if _ip_address == ip_address:
raise
waiter.clear()
self.cares.gethostbyaddr(waiter, _ip_address)
return waiter.get()
def _gethostbyaddr(self, ip_address_bytes):
while True:
ares = self.cares
try:
return self.__gethostbyaddr(ip_address_bytes)
except herror:
if ares is self.cares:
raise
def __getnameinfo(self, hostname, port, sockaddr, flags):
result = self.__getaddrinfo(
hostname, port,
family=AF_UNSPEC, socktype=SOCK_DGRAM,
proto=0, flags=0,
fill_in_type_proto=False)
if len(result) != 1:
raise error('sockaddr resolved to multiple addresses')
family, _socktype, _proto, _name, address = result[0]
if family == AF_INET:
if len(sockaddr) != 2:
raise error("IPv4 sockaddr must be 2 tuple")
elif family == AF_INET6:
address = address[:2] + sockaddr[2:]
waiter = Waiter(self.hub)
self.cares.getnameinfo(waiter, address, flags)
node, service = waiter.get()
if service is None:
# ares docs: "If the query did not complete
# successfully, or one of the values was not
# requested, node or service will be NULL ". Python 2
# allows that for the service, but Python 3 raises
# an error. This is tested by test_socket in py 3.4
err = gaierror(EAI_NONAME, self.EAI_NONAME_MSG)
err.errno = EAI_NONAME
raise err
return node, service or '0'
def _getnameinfo(self, address_bytes, port, sockaddr, flags):
while True:
ares = self.cares
try:
return self.__getnameinfo(address_bytes, port, sockaddr, flags)
except gaierror:
if ares is self.cares:
raise
# # Things that need proper error handling
# gethostbyaddr = AbstractResolver.convert_gaierror_to_herror(AbstractResolver.gethostbyaddr)
| 12,890 | 35.415254 | 97 | py |
gevent | gevent-master/src/gevent/resolver/__init__.py | # Copyright (c) 2018 gevent contributors. See LICENSE for details.
import _socket
from _socket import AF_INET
from _socket import AF_UNSPEC
from _socket import AI_CANONNAME
from _socket import AI_PASSIVE
from _socket import AI_NUMERICHOST
from _socket import EAI_NONAME
from _socket import EAI_SERVICE
from _socket import SOCK_DGRAM
from _socket import SOCK_STREAM
from _socket import SOL_TCP
from _socket import error
from _socket import gaierror
from _socket import getaddrinfo as native_getaddrinfo
from _socket import getnameinfo as native_getnameinfo
from _socket import gethostbyaddr as native_gethostbyaddr
from _socket import gethostbyname as native_gethostbyname
from _socket import gethostbyname_ex as native_gethostbyname_ex
from _socket import getservbyname as native_getservbyname
from gevent._compat import string_types
from gevent._compat import text_type
from gevent._compat import hostname_types
from gevent._compat import integer_types
from gevent._compat import PYPY
from gevent._compat import MAC
from gevent.resolver._addresses import is_ipv6_addr
# Nothing public here.
__all__ = ()
# trigger import of encodings.idna to avoid https://github.com/gevent/gevent/issues/349
u'foo'.encode('idna')
def _lookup_port(port, socktype):
# pylint:disable=too-many-branches
socktypes = []
if isinstance(port, string_types):
try:
port = int(port)
except ValueError:
try:
if socktype == 0:
origport = port
try:
port = native_getservbyname(port, 'tcp')
socktypes.append(SOCK_STREAM)
except error:
port = native_getservbyname(port, 'udp')
socktypes.append(SOCK_DGRAM)
else:
try:
if port == native_getservbyname(origport, 'udp'):
socktypes.append(SOCK_DGRAM)
except error:
pass
elif socktype == SOCK_STREAM:
port = native_getservbyname(port, 'tcp')
elif socktype == SOCK_DGRAM:
port = native_getservbyname(port, 'udp')
else:
raise gaierror(EAI_SERVICE, 'Servname not supported for ai_socktype')
except error as ex:
if 'not found' in str(ex):
raise gaierror(EAI_SERVICE, 'Servname not supported for ai_socktype')
raise gaierror(str(ex))
except UnicodeEncodeError:
raise error('Int or String expected', port)
elif port is None:
port = 0
elif isinstance(port, integer_types):
pass
else:
raise error('Int or String expected', port, type(port))
port = int(port % 65536)
if not socktypes and socktype:
socktypes.append(socktype)
return port, socktypes
def _resolve_special(hostname, family):
if not isinstance(hostname, hostname_types):
raise TypeError("argument 1 must be str, bytes or bytearray, not %s" % (type(hostname),))
if hostname in (u'', b''):
result = native_getaddrinfo(None, 0, family, SOCK_DGRAM, 0, AI_PASSIVE)
if len(result) != 1:
raise error('wildcard resolved to multiple address')
return result[0][4][0]
return hostname
class AbstractResolver(object):
HOSTNAME_ENCODING = 'idna'
_LOCAL_HOSTNAMES = (
b'localhost',
b'ip6-localhost',
b'::1',
b'127.0.0.1',
)
_LOCAL_AND_BROADCAST_HOSTNAMES = _LOCAL_HOSTNAMES + (
b'255.255.255.255',
b'<broadcast>',
)
EAI_NONAME_MSG = (
'nodename nor servname provided, or not known'
if MAC else
'Name or service not known'
)
EAI_FAMILY_MSG = (
'ai_family not supported'
)
_KNOWN_ADDR_FAMILIES = {
v
for k, v in vars(_socket).items()
if k.startswith('AF_')
}
_KNOWN_SOCKTYPES = {
v
for k, v in vars(_socket).items()
if k.startswith('SOCK_')
and k not in ('SOCK_CLOEXEC', 'SOCK_MAX_SIZE')
}
def close(self):
"""
Release resources held by this object.
Subclasses that define resources should override.
.. versionadded:: 22.10.1
"""
@staticmethod
def fixup_gaierror(func):
import functools
@functools.wraps(func)
def resolve(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except gaierror as ex:
if ex.args[0] == EAI_NONAME and len(ex.args) == 1:
# dnspython doesn't set an error message
ex.args = (EAI_NONAME, self.EAI_NONAME_MSG)
ex.errno = EAI_NONAME
raise
return resolve
def _hostname_to_bytes(self, hostname):
if isinstance(hostname, text_type):
hostname = hostname.encode(self.HOSTNAME_ENCODING)
elif not isinstance(hostname, (bytes, bytearray)):
raise TypeError('Expected str, bytes or bytearray, not %s' % type(hostname).__name__)
return bytes(hostname)
def gethostbyname(self, hostname, family=AF_INET):
# The native ``gethostbyname`` and ``gethostbyname_ex`` have some different
# behaviour with special names. Notably, ``gethostbyname`` will handle
# both "<broadcast>" and "255.255.255.255", while ``gethostbyname_ex`` refuses to
# handle those; they result in different errors, too. So we can't
# pass those through.
hostname = self._hostname_to_bytes(hostname)
if hostname in self._LOCAL_AND_BROADCAST_HOSTNAMES:
return native_gethostbyname(hostname)
hostname = _resolve_special(hostname, family)
return self.gethostbyname_ex(hostname, family)[-1][0]
def _gethostbyname_ex(self, hostname_bytes, family):
"""Raise an ``herror`` or a ``gaierror``."""
aliases = self._getaliases(hostname_bytes, family)
addresses = []
tuples = self.getaddrinfo(hostname_bytes, 0, family,
SOCK_STREAM,
SOL_TCP, AI_CANONNAME)
canonical = tuples[0][3]
for item in tuples:
addresses.append(item[4][0])
# XXX we just ignore aliases
return (canonical, aliases, addresses)
def gethostbyname_ex(self, hostname, family=AF_INET):
hostname = self._hostname_to_bytes(hostname)
if hostname in self._LOCAL_AND_BROADCAST_HOSTNAMES:
# The broadcast specials aren't handled here, but they may produce
# special errors that are hard to replicate across all systems.
return native_gethostbyname_ex(hostname)
return self._gethostbyname_ex(hostname, family)
def _getaddrinfo(self, host_bytes, port, family, socktype, proto, flags):
raise NotImplementedError
def getaddrinfo(self, host, port, family=0, socktype=0, proto=0, flags=0):
host = self._hostname_to_bytes(host) if host is not None else None
if (
not isinstance(host, bytes) # 1, 2
or (flags & AI_NUMERICHOST) # 3
or host in self._LOCAL_HOSTNAMES # 4
or (is_ipv6_addr(host) and host.startswith(b'fe80')) # 5
):
# This handles cases which do not require network access
# 1) host is None
# 2) host is of an invalid type
# 3) AI_NUMERICHOST flag is set
# 4) It's a well-known alias. TODO: This is special casing for c-ares that we don't
# really want to do. It's here because it resolves a discrepancy with the system
# resolvers caught by test cases. In gevent 20.4.0, this only worked correctly on
# Python 3 and not Python 2, by accident.
# 5) host is a link-local ipv6; dnspython returns the wrong
# scope-id for those.
return native_getaddrinfo(host, port, family, socktype, proto, flags)
return self._getaddrinfo(host, port, family, socktype, proto, flags)
def _getaliases(self, hostname, family):
# pylint:disable=unused-argument
return []
def _gethostbyaddr(self, ip_address_bytes):
"""Raises herror."""
raise NotImplementedError
def gethostbyaddr(self, ip_address):
ip_address = _resolve_special(ip_address, AF_UNSPEC)
ip_address = self._hostname_to_bytes(ip_address)
if ip_address in self._LOCAL_AND_BROADCAST_HOSTNAMES:
return native_gethostbyaddr(ip_address)
return self._gethostbyaddr(ip_address)
def _getnameinfo(self, address_bytes, port, sockaddr, flags):
raise NotImplementedError
def getnameinfo(self, sockaddr, flags):
if not isinstance(flags, integer_types):
raise TypeError('an integer is required')
if not isinstance(sockaddr, tuple):
raise TypeError('getnameinfo() argument 1 must be a tuple')
address = sockaddr[0]
address = self._hostname_to_bytes(sockaddr[0])
if address in self._LOCAL_AND_BROADCAST_HOSTNAMES:
return native_getnameinfo(sockaddr, flags)
port = sockaddr[1]
if not isinstance(port, integer_types):
raise TypeError('port must be an integer, not %s' % type(port))
if not PYPY and port >= 65536:
# System resolvers do different things with an
# out-of-bound port; macOS CPython 3.8 raises ``gaierror: [Errno 8]
# nodename nor servname provided, or not known``, while
# manylinux CPython 2.7 appears to ignore it and raises ``error:
# sockaddr resolved to multiple addresses``. TravisCI, at least ot
# one point, successfully resolved www.gevent.org to ``(readthedocs.org, '0')``.
# But c-ares 1.16 would raise ``gaierror(25, 'ARES_ESERVICE: unknown')``.
# Doing this appears to get the expected results on CPython
port = 0
if PYPY and (port < 0 or port >= 65536):
# PyPy seems to always be strict about that and produce the same results
# on all platforms.
raise OverflowError("port must be 0-65535.")
if len(sockaddr) > 2:
# Must be IPv6: (host, port, [flowinfo, [scopeid]])
flowinfo = sockaddr[2]
if flowinfo > 0xfffff:
raise OverflowError("getnameinfo(): flowinfo must be 0-1048575.")
return self._getnameinfo(address, port, sockaddr, flags)
| 10,760 | 36.625874 | 97 | py |
gevent | gevent-master/src/gevent/resolver/dnspython.py | # Copyright (c) 2018 gevent contributors. See LICENSE for details.
# Portions of this code taken from the gogreen project:
# http://github.com/slideinc/gogreen
#
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Portions of this code taken from the eventlet project:
# https://github.com/eventlet/eventlet/blob/master/eventlet/support/greendns.py
# Unless otherwise noted, the files in Eventlet are under the following MIT license:
# Copyright (c) 2005-2006, Bob Ippolito
# Copyright (c) 2007-2010, Linden Research, Inc.
# Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import sys
import time
from _socket import error
from _socket import gaierror
from _socket import herror
from _socket import NI_NUMERICSERV
from _socket import AF_INET
from _socket import AF_INET6
from _socket import AF_UNSPEC
from _socket import EAI_NONAME
from _socket import EAI_FAMILY
import socket
from gevent.resolver import AbstractResolver
from gevent.resolver._hostsfile import HostsFile
from gevent.builtins import __import__ as g_import
from gevent._compat import string_types
from gevent._compat import iteritems
from gevent._config import config
__all__ = [
'Resolver',
]
# Import the DNS packages to use the gevent modules,
# even if the system is not monkey-patched. If it *is* already
# patched, this imports a second copy under a different name,
# which is probably not strictly necessary, but matches
# what we've historically done, and allows configuring the resolvers
# differently.
def _patch_dns():
from gevent._patcher import import_patched as importer
# The dns package itself is empty but defines __all__
# we make sure to import all of those things now under the
# patch. Note this triggers two DeprecationWarnings,
# one of which we could avoid.
extras = {
'dns': ('rdata', 'resolver', 'rdtypes'),
'dns.rdtypes': ('IN', 'ANY', ),
'dns.rdtypes.IN': ('A', 'AAAA',),
'dns.rdtypes.ANY': ('SOA', 'PTR'),
}
def extra_all(mod_name):
return extras.get(mod_name, ())
def after_import_hook(dns): # pylint:disable=redefined-outer-name
# Runs while still in the original patching scope.
# The dns.rdata:get_rdata_class() function tries to
# dynamically import modules using __import__ and then walk
# through the attribute tree to find classes in `dns.rdtypes`.
# It is critical that this all matches up, otherwise we can
# get different exception classes that don't get caught.
# We could patch __import__ to do things at runtime, but it's
# easier to enumerate the world and populate the cache now
# before we then switch the names back.
rdata = dns.rdata
get_rdata_class = rdata.get_rdata_class
try:
rdclass_values = list(dns.rdataclass.RdataClass)
except AttributeError:
# dnspython < 2.0
rdclass_values = dns.rdataclass._by_value
try:
rdtype_values = list(dns.rdatatype.RdataType)
except AttributeError:
# dnspython < 2.0
rdtype_values = dns.rdatatype._by_value
for rdclass in rdclass_values:
for rdtype in rdtype_values:
get_rdata_class(rdclass, rdtype)
patcher = importer('dns', extra_all, after_import_hook)
top = patcher.module
# Now disable the dynamic imports
def _no_dynamic_imports(name):
raise ValueError(name)
top.rdata.__import__ = _no_dynamic_imports
return top
dns = _patch_dns()
resolver = dns.resolver
dTimeout = dns.resolver.Timeout
# This is a wrapper for dns.resolver._getaddrinfo with two crucial changes.
# First, it backports https://github.com/rthalley/dnspython/issues/316
# from version 2.0. This can be dropped when we support only dnspython 2
# (which means only Python 3.)
# Second, it adds calls to sys.exc_clear() to avoid failing tests in
# test__refcount.py (timeouts) on Python 2. (Actually, this isn't
# strictly necessary, it was necessary to increase the timeouts in
# that function because dnspython is doing some parsing/regex/host
# lookups that are not super fast. But it does have a habit of leaving
# exceptions around which can complicate our memleak checks.)
def _getaddrinfo(host=None, service=None, family=AF_UNSPEC, socktype=0,
proto=0, flags=0,
_orig_gai=resolver._getaddrinfo,
_exc_clear=getattr(sys, 'exc_clear', lambda: None)):
if flags & (socket.AI_ADDRCONFIG | socket.AI_V4MAPPED) != 0:
# Not implemented. We raise a gaierror as opposed to a
# NotImplementedError as it helps callers handle errors more
# appropriately. [Issue #316]
raise socket.gaierror(socket.EAI_SYSTEM)
res = _orig_gai(host, service, family, socktype, proto, flags)
_exc_clear()
return res
resolver._getaddrinfo = _getaddrinfo
HOSTS_TTL = 300.0
class _HostsAnswer(dns.resolver.Answer):
# Answer class for HostsResolver object
def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
self.response = None
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.canonical_name = qname
if not rrset and raise_on_no_answer:
raise dns.resolver.NoAnswer()
self.rrset = rrset
self.expiration = (time.time() +
rrset.ttl if hasattr(rrset, 'ttl') else 0)
class _HostsResolver(object):
"""
Class to parse the hosts file
"""
def __init__(self, fname=None, interval=HOSTS_TTL):
self.hosts_file = HostsFile(fname)
self.interval = interval
self._last_load = 0
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True): # pylint:disable=unused-argument
# Query the hosts file
#
# The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and
# dns.rdatatype.CNAME.
# The ``rdclass`` parameter must be dns.rdataclass.IN while the
# ``tcp`` and ``source`` parameters are ignored.
# Return a HostAnswer instance or raise a dns.resolver.NoAnswer
# exception.
now = time.time()
hosts_file = self.hosts_file
if self._last_load + self.interval < now:
self._last_load = now
hosts_file.load()
rdclass = dns.rdataclass.IN # Always
if isinstance(qname, string_types):
name = qname
qname = dns.name.from_text(qname)
else:
name = str(qname)
name = name.lower()
rrset = dns.rrset.RRset(qname, rdclass, rdtype)
rrset.ttl = self._last_load + self.interval - now
if rdtype == dns.rdatatype.A:
mapping = hosts_file.v4
kind = dns.rdtypes.IN.A.A
elif rdtype == dns.rdatatype.AAAA:
mapping = hosts_file.v6
kind = dns.rdtypes.IN.AAAA.AAAA
elif rdtype == dns.rdatatype.CNAME:
mapping = hosts_file.aliases
kind = lambda c, t, addr: dns.rdtypes.ANY.CNAME.CNAME(c, t, dns.name.from_text(addr))
elif rdtype == dns.rdatatype.PTR:
mapping = hosts_file.reverse
kind = lambda c, t, addr: dns.rdtypes.ANY.PTR.PTR(c, t, dns.name.from_text(addr))
addr = mapping.get(name)
if not addr and qname.is_absolute():
addr = mapping.get(name[:-1])
if addr:
rrset.add(kind(rdclass, rdtype, addr))
return _HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
def getaliases(self, hostname):
# Return a list of all the aliases of a given cname
# Due to the way store aliases this is a bit inefficient, this
# clearly was an afterthought. But this is only used by
# gethostbyname_ex so it's probably fine.
aliases = self.hosts_file.aliases
result = []
if hostname in aliases: # pylint:disable=consider-using-get
cannon = aliases[hostname]
else:
cannon = hostname
result.append(cannon)
for alias, cname in iteritems(aliases):
if cannon == cname:
result.append(alias)
result.remove(hostname)
return result
class _DualResolver(object):
def __init__(self):
self.hosts_resolver = _HostsResolver()
self.network_resolver = resolver.get_default_resolver()
self.network_resolver.cache = resolver.LRUCache()
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True,
_hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.PTR)):
# Query the resolver, using /etc/hosts
# Behavior:
# 1. if hosts is enabled and contains answer, return it now
# 2. query nameservers for qname
if qname is None:
qname = '0.0.0.0'
if not isinstance(qname, string_types):
if isinstance(qname, bytes):
qname = qname.decode("idna")
if isinstance(qname, string_types):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if rdclass == dns.rdataclass.IN and rdtype in _hosts_rdtypes:
try:
answer = self.hosts_resolver.query(qname, rdtype, raise_on_no_answer=False)
except Exception: # pylint: disable=broad-except
from gevent import get_hub
get_hub().handle_error(self, *sys.exc_info())
else:
if answer.rrset:
return answer
return self.network_resolver.query(qname, rdtype, rdclass,
tcp, source, raise_on_no_answer=raise_on_no_answer)
def _family_to_rdtype(family):
if family == socket.AF_INET:
rdtype = dns.rdatatype.A
elif family == socket.AF_INET6:
rdtype = dns.rdatatype.AAAA
else:
raise socket.gaierror(socket.EAI_FAMILY,
'Address family not supported')
return rdtype
class Resolver(AbstractResolver):
"""
An *experimental* resolver that uses `dnspython`_.
This is typically slower than the default threaded resolver
(unless there's a cache hit, in which case it can be much faster).
It is usually much faster than the c-ares resolver. It tends to
scale well as more concurrent resolutions are attempted.
Under Python 2, if the ``idna`` package is installed, this
resolver can resolve Unicode host names that the system resolver
cannot.
.. note::
This **does not** use dnspython's default resolver object, or share any
classes with ``import dns``. A separate copy of the objects is imported to
be able to function in a non monkey-patched process. The documentation for the resolver
object still applies.
The resolver that we use is available as the :attr:`resolver` attribute
of this object (typically ``gevent.get_hub().resolver.resolver``).
.. caution::
Many of the same caveats about DNS results apply here as are documented
for :class:`gevent.resolver.ares.Resolver`. In addition, the handling of
symbolic scope IDs in IPv6 addresses passed to ``getaddrinfo`` exhibits
some differences.
On PyPy, ``getnameinfo`` can produce results when CPython raises
``socket.error``, and gevent's DNSPython resolver also
raises ``socket.error``.
.. caution::
This resolver is experimental. It may be removed or modified in
the future. As always, feedback is welcome.
.. versionadded:: 1.3a2
.. versionchanged:: 20.5.0
The errors raised are now much more consistent with those
raised by the standard library resolvers.
Handling of localhost and broadcast names is now more consistent.
.. _dnspython: http://www.dnspython.org
"""
def __init__(self, hub=None): # pylint: disable=unused-argument
if resolver._resolver is None:
_resolver = resolver._resolver = _DualResolver()
if config.resolver_nameservers:
_resolver.network_resolver.nameservers[:] = config.resolver_nameservers
if config.resolver_timeout:
_resolver.network_resolver.lifetime = config.resolver_timeout
# Different hubs in different threads could be sharing the same
# resolver.
assert isinstance(resolver._resolver, _DualResolver)
self._resolver = resolver._resolver
@property
def resolver(self):
"""
The dnspython resolver object we use.
This object has several useful attributes that can be used to
adjust the behaviour of the DNS system:
* ``cache`` is a :class:`dns.resolver.LRUCache`. Its maximum size
can be configured by calling :meth:`resolver.cache.set_max_size`
* ``nameservers`` controls which nameservers to talk to
* ``lifetime`` configures a timeout for each individual query.
"""
return self._resolver.network_resolver
def close(self):
pass
def _getaliases(self, hostname, family):
if not isinstance(hostname, str):
if isinstance(hostname, bytes):
hostname = hostname.decode("idna")
aliases = self._resolver.hosts_resolver.getaliases(hostname)
net_resolver = self._resolver.network_resolver
rdtype = _family_to_rdtype(family)
while 1:
try:
ans = net_resolver.query(hostname, dns.rdatatype.CNAME, rdtype)
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
break
except dTimeout:
break
except AttributeError as ex:
if hostname is None or isinstance(hostname, int):
raise TypeError(ex)
raise
else:
aliases.extend(str(rr.target) for rr in ans.rrset)
hostname = ans[0].target
return aliases
def _getaddrinfo(self, host_bytes, port, family, socktype, proto, flags):
# dnspython really wants the host to be in native format.
if not isinstance(host_bytes, str):
host_bytes = host_bytes.decode(self.HOSTNAME_ENCODING)
if host_bytes == 'ff02::1de:c0:face:8D':
# This is essentially a hack to make stdlib
# test_socket:GeneralModuleTests.test_getaddrinfo_ipv6_basic
# pass. They expect to get back a lowercase ``D``, but
# dnspython does not do that.
# ``test_getaddrinfo_ipv6_scopeid_symbolic`` also expect
# the scopeid to be dropped, but again, dnspython does not
# do that; we cant fix that here so we skip that test.
host_bytes = 'ff02::1de:c0:face:8d'
if family == AF_UNSPEC:
# This tends to raise in the case that a v6 address did not exist
# but a v4 does. So we break it into two parts.
# Note that if there is no ipv6 in the hosts file, but there *is*
# an ipv4, and there *is* an ipv6 in the nameservers, we will return
# both (from the first call). The system resolver on OS X only returns
# the results from the hosts file. doubleclick.com is one example.
# See also https://github.com/gevent/gevent/issues/1012
try:
return _getaddrinfo(host_bytes, port, family, socktype, proto, flags)
except gaierror:
try:
return _getaddrinfo(host_bytes, port, AF_INET6, socktype, proto, flags)
except gaierror:
return _getaddrinfo(host_bytes, port, AF_INET, socktype, proto, flags)
else:
try:
return _getaddrinfo(host_bytes, port, family, socktype, proto, flags)
except gaierror as ex:
if ex.args[0] == EAI_NONAME and family not in self._KNOWN_ADDR_FAMILIES:
# It's possible that we got sent an unsupported family. Check
# that.
ex.args = (EAI_FAMILY, self.EAI_FAMILY_MSG)
ex.errno = EAI_FAMILY
raise
def _getnameinfo(self, address_bytes, port, sockaddr, flags):
try:
return resolver._getnameinfo(sockaddr, flags)
except error:
if not flags:
# dnspython doesn't like getting ports it can't resolve.
# We have one test, test__socket_dns.py:Test_getnameinfo_geventorg.test_port_zero
# that does this. We conservatively fix it here; this could be expanded later.
return resolver._getnameinfo(sockaddr, NI_NUMERICSERV)
def _gethostbyaddr(self, ip_address_bytes):
try:
return resolver._gethostbyaddr(ip_address_bytes)
except gaierror as ex:
if ex.args[0] == EAI_NONAME:
# Note: The system doesn't *always* raise herror;
# sometimes the original gaierror propagates through.
# It's impossible to say ahead of time or just based
# on the name which it should be. The herror seems to
# be by far the most common, though.
raise herror(1, "Unknown host")
raise
# Things that need proper error handling
getnameinfo = AbstractResolver.fixup_gaierror(AbstractResolver.getnameinfo)
gethostbyaddr = AbstractResolver.fixup_gaierror(AbstractResolver.gethostbyaddr)
gethostbyname_ex = AbstractResolver.fixup_gaierror(AbstractResolver.gethostbyname_ex)
getaddrinfo = AbstractResolver.fixup_gaierror(AbstractResolver.getaddrinfo)
| 20,663 | 39.517647 | 97 | py |
gevent | gevent-master/src/gevent/resolver/blocking.py | # Copyright (c) 2018 gevent contributors. See LICENSE for details.
import _socket
__all__ = [
'Resolver',
]
class Resolver(object):
"""
A resolver that directly uses the system's resolver functions.
.. caution::
This resolver is *not* cooperative.
This resolver has the lowest overhead of any resolver and
typically approaches the speed of the unmodified :mod:`socket`
functions. However, it is not cooperative, so if name resolution
blocks, the entire thread and all its greenlets will be blocked.
This can be useful during debugging, or it may be a good choice if
your operating system provides a good caching resolver (such as
macOS's Directory Services) that is usually very fast and
functionally non-blocking.
.. versionchanged:: 1.3a2
This was previously undocumented and existed in :mod:`gevent.socket`.
"""
def __init__(self, hub=None):
pass
def close(self):
pass
for method in (
'gethostbyname',
'gethostbyname_ex',
'getaddrinfo',
'gethostbyaddr',
'getnameinfo'
):
locals()[method] = staticmethod(getattr(_socket, method))
| 1,216 | 25.456522 | 76 | py |
gevent | gevent-master/src/gevent/_ffi/watcher.py | """
Useful base classes for watchers. The available
watchers will depend on the specific event loop.
"""
# pylint:disable=not-callable
from __future__ import absolute_import, print_function
import signal as signalmodule
import functools
import warnings
from gevent._config import config
from gevent._util import LazyOnClass
try:
from tracemalloc import get_object_traceback
def tracemalloc(init):
# PYTHONTRACEMALLOC env var controls this on Python 3.
return init
except ImportError: # Python < 3.4
if config.trace_malloc:
# Use the same env var to turn this on for Python 2
import traceback
class _TB(object):
__slots__ = ('lines',)
def __init__(self, lines):
# These end in newlines, which we don't want for consistency
self.lines = [x.rstrip() for x in lines]
def format(self):
return self.lines
def tracemalloc(init):
@functools.wraps(init)
def traces(self, *args, **kwargs):
init(self, *args, **kwargs)
self._captured_malloc = _TB(traceback.format_stack())
return traces
def get_object_traceback(obj):
return obj._captured_malloc
else:
def get_object_traceback(_obj):
return None
def tracemalloc(init):
return init
from gevent._compat import fsencode
from gevent._ffi import _dbg # pylint:disable=unused-import
from gevent._ffi import GEVENT_DEBUG_LEVEL
from gevent._ffi import DEBUG
from gevent._ffi.loop import GEVENT_CORE_EVENTS
from gevent._ffi.loop import _NOARGS
ALLOW_WATCHER_DEL = GEVENT_DEBUG_LEVEL >= DEBUG
__all__ = [
]
try:
ResourceWarning # pylint:disable=used-before-assignment
except NameError:
class ResourceWarning(Warning):
"Python 2 fallback"
class _NoWatcherResult(int):
def __repr__(self):
return "<NoWatcher>"
_NoWatcherResult = _NoWatcherResult(0)
def events_to_str(event_field, all_events):
result = []
for (flag, string) in all_events:
c_flag = flag
if event_field & c_flag:
result.append(string)
event_field &= (~c_flag)
if not event_field:
break
if event_field:
result.append(hex(event_field))
return '|'.join(result)
def not_while_active(func):
@functools.wraps(func)
def nw(self, *args, **kwargs):
if self.active:
raise ValueError("not while active")
func(self, *args, **kwargs)
return nw
def only_if_watcher(func):
@functools.wraps(func)
def if_w(self):
if self._watcher:
return func(self)
return _NoWatcherResult
return if_w
class AbstractWatcherType(type):
"""
Base metaclass for watchers.
To use, you will:
- subclass the watcher class defined from this type.
- optionally subclass this type
"""
# pylint:disable=bad-mcs-classmethod-argument
_FFI = None
_LIB = None
def __new__(cls, name, bases, cls_dict):
if name != 'watcher' and not cls_dict.get('_watcher_skip_ffi'):
cls._fill_watcher(name, bases, cls_dict)
if '__del__' in cls_dict and not ALLOW_WATCHER_DEL: # pragma: no cover
raise TypeError("CFFI watchers are not allowed to have __del__")
return type.__new__(cls, name, bases, cls_dict)
@classmethod
def _fill_watcher(cls, name, bases, cls_dict):
# TODO: refactor smaller
# pylint:disable=too-many-locals
if name.endswith('_'):
# Strip trailing _ added to avoid keyword duplications
# e.g., async_
name = name[:-1]
def _mro_get(attr, bases, error=True):
for b in bases:
try:
return getattr(b, attr)
except AttributeError:
continue
if error: # pragma: no cover
raise AttributeError(attr)
_watcher_prefix = cls_dict.get('_watcher_prefix') or _mro_get('_watcher_prefix', bases)
if '_watcher_type' not in cls_dict:
watcher_type = _watcher_prefix + '_' + name
cls_dict['_watcher_type'] = watcher_type
elif not cls_dict['_watcher_type'].startswith(_watcher_prefix):
watcher_type = _watcher_prefix + '_' + cls_dict['_watcher_type']
cls_dict['_watcher_type'] = watcher_type
active_name = _watcher_prefix + '_is_active'
def _watcher_is_active(self):
return getattr(self._LIB, active_name)
LazyOnClass.lazy(cls_dict, _watcher_is_active)
watcher_struct_name = cls_dict.get('_watcher_struct_name')
if not watcher_struct_name:
watcher_struct_pattern = (cls_dict.get('_watcher_struct_pattern')
or _mro_get('_watcher_struct_pattern', bases, False)
or 'struct %s')
watcher_struct_name = watcher_struct_pattern % (watcher_type,)
def _watcher_struct_pointer_type(self):
return self._FFI.typeof(watcher_struct_name + ' *')
LazyOnClass.lazy(cls_dict, _watcher_struct_pointer_type)
callback_name = (cls_dict.get('_watcher_callback_name')
or _mro_get('_watcher_callback_name', bases, False)
or '_gevent_generic_callback')
def _watcher_callback(self):
return self._FFI.addressof(self._LIB, callback_name)
LazyOnClass.lazy(cls_dict, _watcher_callback)
def _make_meth(name, watcher_name):
def meth(self):
lib_name = self._watcher_type + '_' + name
return getattr(self._LIB, lib_name)
meth.__name__ = watcher_name
return meth
for meth_name in 'start', 'stop', 'init':
watcher_name = '_watcher' + '_' + meth_name
if watcher_name not in cls_dict:
LazyOnClass.lazy(cls_dict, _make_meth(meth_name, watcher_name))
def new_handle(cls, obj):
return cls._FFI.new_handle(obj)
def new(cls, kind):
return cls._FFI.new(kind)
class watcher(object):
_callback = None
_args = None
_watcher = None
# self._handle has a reference to self, keeping it alive.
# We must keep self._handle alive for ffi.from_handle() to be
# able to work. We only fill this in when we are started,
# and when we are stopped we destroy it.
# NOTE: This is a GC cycle, so we keep it around for as short
# as possible.
_handle = None
@tracemalloc
def __init__(self, _loop, ref=True, priority=None, args=_NOARGS):
self.loop = _loop
self.__init_priority = priority
self.__init_args = args
self.__init_ref = ref
self._watcher_full_init()
def _watcher_full_init(self):
priority = self.__init_priority
ref = self.__init_ref
args = self.__init_args
self._watcher_create(ref)
if priority is not None:
self._watcher_ffi_set_priority(priority)
try:
self._watcher_ffi_init(args)
except:
# Let these be GC'd immediately.
# If we keep them around to when *we* are gc'd,
# they're probably invalid, meaning any native calls
# we do then to close() them are likely to fail
self._watcher = None
raise
self._watcher_ffi_set_init_ref(ref)
@classmethod
def _watcher_ffi_close(cls, ffi_watcher):
pass
def _watcher_create(self, ref): # pylint:disable=unused-argument
self._watcher = self._watcher_new()
def _watcher_new(self):
return type(self).new(self._watcher_struct_pointer_type) # pylint:disable=no-member
def _watcher_ffi_set_init_ref(self, ref):
pass
def _watcher_ffi_set_priority(self, priority):
pass
def _watcher_ffi_init(self, args):
raise NotImplementedError()
def _watcher_ffi_start(self):
raise NotImplementedError()
def _watcher_ffi_stop(self):
self._watcher_stop(self.loop.ptr, self._watcher)
def _watcher_ffi_ref(self):
raise NotImplementedError()
def _watcher_ffi_unref(self):
raise NotImplementedError()
def _watcher_ffi_start_unref(self):
# While a watcher is active, we don't keep it
# referenced. This allows a timer, for example, to be started,
# and still allow the loop to end if there is nothing
# else to do. see test__order.TestSleep0 for one example.
self._watcher_ffi_unref()
def _watcher_ffi_stop_ref(self):
self._watcher_ffi_ref()
# A string identifying the type of libev object we watch, e.g., 'ev_io'
# This should be a class attribute.
_watcher_type = None
# A class attribute that is the callback on the libev object that init's the C struct,
# e.g., libev.ev_io_init. If None, will be set by _init_subclasses.
_watcher_init = None
# A class attribute that is the callback on the libev object that starts the C watcher,
# e.g., libev.ev_io_start. If None, will be set by _init_subclasses.
_watcher_start = None
# A class attribute that is the callback on the libev object that stops the C watcher,
# e.g., libev.ev_io_stop. If None, will be set by _init_subclasses.
_watcher_stop = None
# A cffi ctype object identifying the struct pointer we create.
# This is a class attribute set based on the _watcher_type
_watcher_struct_pointer_type = None
# The attribute of the libev object identifying the custom
# callback function for this type of watcher. This is a class
# attribute set based on the _watcher_type in _init_subclasses.
_watcher_callback = None
_watcher_is_active = None
def close(self):
if self._watcher is None:
return
self.stop()
_watcher = self._watcher
self._watcher = None
self._watcher_set_data(_watcher, self._FFI.NULL) # pylint: disable=no-member
self._watcher_ffi_close(_watcher)
self.loop = None
def _watcher_set_data(self, the_watcher, data):
# This abstraction exists for the sole benefit of
# libuv.watcher.stat, which "subclasses" uv_handle_t.
# Can we do something to avoid this extra function call?
the_watcher.data = data
return data
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
if ALLOW_WATCHER_DEL:
def __del__(self):
if self._watcher:
tb = get_object_traceback(self)
tb_msg = ''
if tb is not None:
tb_msg = '\n'.join(tb.format())
tb_msg = '\nTraceback:\n' + tb_msg
warnings.warn("Failed to close watcher %r%s" % (self, tb_msg),
ResourceWarning)
# may fail if __init__ did; will be harmlessly printed
self.close()
__in_repr = False
def __repr__(self):
basic = "<%s at 0x%x" % (self.__class__.__name__, id(self))
if self.__in_repr:
return basic + '>'
# Running child watchers have been seen to have a
# recursive repr in ``self.args``, thanks to ``gevent.os.fork_and_watch``
# passing the watcher as an argument to its callback.
self.__in_repr = True
try:
result = '%s%s' % (basic, self._format())
if self.pending:
result += " pending"
if self.callback is not None:
fself = getattr(self.callback, '__self__', None)
if fself is self:
result += " callback=<bound method %s of self>" % (self.callback.__name__)
else:
result += " callback=%r" % (self.callback, )
if self.args is not None:
result += " args=%r" % (self.args, )
if self.callback is None and self.args is None:
result += " stopped"
result += " watcher=%s" % (self._watcher)
result += " handle=%s" % (self._watcher_handle)
result += " ref=%s" % (self.ref)
return result + ">"
finally:
self.__in_repr = False
@property
def _watcher_handle(self):
if self._watcher:
return self._watcher.data
def _format(self):
return ''
@property
def ref(self):
raise NotImplementedError()
def _get_callback(self):
return self._callback if '_callback' in self.__dict__ else None
def _set_callback(self, cb):
if not callable(cb) and cb is not None:
raise TypeError("Expected callable, not %r" % (cb, ))
if cb is None:
if '_callback' in self.__dict__:
del self._callback
else:
self._callback = cb
callback = property(_get_callback, _set_callback)
def _get_args(self):
return self._args
def _set_args(self, args):
if not isinstance(args, tuple) and args is not None:
raise TypeError("args must be a tuple or None")
if args is None:
if '_args' in self.__dict__:
del self._args
else:
self._args = args
args = property(_get_args, _set_args)
def start(self, callback, *args):
if callback is None:
raise TypeError('callback must be callable, not None')
self.callback = callback
self.args = args or _NOARGS
self.loop._keepaliveset.add(self)
self._handle = self._watcher_set_data(self._watcher, type(self).new_handle(self)) # pylint:disable=no-member
self._watcher_ffi_start()
self._watcher_ffi_start_unref()
def stop(self):
if self.callback is None:
assert self.loop is None or self not in self.loop._keepaliveset
return
self.callback = None
# Only after setting the signal to make this idempotent do
# we move ahead.
self._watcher_ffi_stop_ref()
self._watcher_ffi_stop()
self.loop._keepaliveset.discard(self)
self._handle = None
self._watcher_set_data(self._watcher, self._FFI.NULL) # pylint:disable=no-member
self.args = None
def _get_priority(self):
return None
@not_while_active
def _set_priority(self, priority):
pass
priority = property(_get_priority, _set_priority)
@property
def active(self):
if self._watcher is not None and self._watcher_is_active(self._watcher):
return True
return False
@property
def pending(self):
return False
watcher = AbstractWatcherType('watcher', (object,), dict(watcher.__dict__))
class IoMixin(object):
EVENT_MASK = 0
def __init__(self, loop, fd, events, ref=True, priority=None, _args=None):
# Win32 only works with sockets, and only when we use libuv, because
# we don't use _open_osfhandle. See libuv/watchers.py:io for a description.
if fd < 0:
raise ValueError('fd must be non-negative: %r' % fd)
if events & ~self.EVENT_MASK:
raise ValueError('illegal event mask: %r' % events)
self._fd = fd
super(IoMixin, self).__init__(loop, ref=ref, priority=priority,
args=_args or (fd, events))
def start(self, callback, *args, **kwargs):
args = args or _NOARGS
if kwargs.get('pass_events'):
args = (GEVENT_CORE_EVENTS, ) + args
super(IoMixin, self).start(callback, *args)
def _format(self):
return ' fd=%d' % self._fd
class TimerMixin(object):
_watcher_type = 'timer'
def __init__(self, loop, after=0.0, repeat=0.0, ref=True, priority=None):
if repeat < 0.0:
raise ValueError("repeat must be positive or zero: %r" % repeat)
self._after = after
self._repeat = repeat
super(TimerMixin, self).__init__(loop, ref=ref, priority=priority, args=(after, repeat))
def start(self, callback, *args, **kw):
update = kw.get("update", self.loop.starting_timer_may_update_loop_time)
if update:
# Quoth the libev doc: "This is a costly operation and is
# usually done automatically within ev_run(). This
# function is rarely useful, but when some event callback
# runs for a very long time without entering the event
# loop, updating libev's idea of the current time is a
# good idea."
# 1.3 changed the default for this to False *unless* the loop is
# running a callback; see libuv for details. Note that
# starting Timeout objects still sets this to true.
self.loop.update_now()
super(TimerMixin, self).start(callback, *args)
def again(self, callback, *args, **kw):
raise NotImplementedError()
class SignalMixin(object):
_watcher_type = 'signal'
def __init__(self, loop, signalnum, ref=True, priority=None):
if signalnum < 1 or signalnum >= signalmodule.NSIG:
raise ValueError('illegal signal number: %r' % signalnum)
# still possible to crash on one of libev's asserts:
# 1) "libev: ev_signal_start called with illegal signal number"
# EV_NSIG might be different from signal.NSIG on some platforms
# 2) "libev: a signal must not be attached to two different loops"
# we probably could check that in LIBEV_EMBED mode, but not in general
self._signalnum = signalnum
super(SignalMixin, self).__init__(loop, ref=ref, priority=priority, args=(signalnum, ))
class IdleMixin(object):
_watcher_type = 'idle'
class PrepareMixin(object):
_watcher_type = 'prepare'
class CheckMixin(object):
_watcher_type = 'check'
class ForkMixin(object):
_watcher_type = 'fork'
class AsyncMixin(object):
_watcher_type = 'async'
def send(self):
raise NotImplementedError()
def send_ignoring_arg(self, _ignored):
"""
Calling compatibility with ``greenlet.switch(arg)``
as used by waiters that have ``rawlink``.
This is an advanced method, not usually needed.
"""
return self.send()
@property
def pending(self):
raise NotImplementedError()
class ChildMixin(object):
# hack for libuv which doesn't extend watcher
_CALL_SUPER_INIT = True
def __init__(self, loop, pid, trace=0, ref=True):
if not loop.default:
raise TypeError('child watchers are only available on the default loop')
loop.install_sigchld()
self._pid = pid
if self._CALL_SUPER_INIT:
super(ChildMixin, self).__init__(loop, ref=ref, args=(pid, trace))
def _format(self):
return ' pid=%r rstatus=%r' % (self.pid, self.rstatus)
@property
def pid(self):
return self._pid
@property
def rpid(self):
# The received pid, the result of the waitpid() call.
return self._rpid
_rpid = None
_rstatus = 0
@property
def rstatus(self):
return self._rstatus
class StatMixin(object):
@staticmethod
def _encode_path(path):
return fsencode(path)
def __init__(self, _loop, path, interval=0.0, ref=True, priority=None):
# Store the encoded path in the same attribute that corecext does
self._paths = self._encode_path(path)
# Keep the original path to avoid re-encoding, especially on Python 3
self._path = path
# Although CFFI would automatically convert a bytes object into a char* when
# calling ev_stat_init(..., char*, ...), on PyPy the char* pointer is not
# guaranteed to live past the function call. On CPython, only with a constant/interned
# bytes object is the pointer guaranteed to last path the function call. (And since
# Python 3 is pretty much guaranteed to produce a newly-encoded bytes object above, thats
# rarely the case). Therefore, we must keep a reference to the produced cdata object
# so that the struct ev_stat_watcher's `path` pointer doesn't become invalid/deallocated
self._cpath = self._FFI.new('char[]', self._paths)
self._interval = interval
super(StatMixin, self).__init__(_loop, ref=ref, priority=priority,
args=(self._cpath,
interval))
@property
def path(self):
return self._path
@property
def attr(self):
raise NotImplementedError
@property
def prev(self):
raise NotImplementedError
@property
def interval(self):
return self._interval
| 20,954 | 31.488372 | 116 | py |
gevent | gevent-master/src/gevent/_ffi/loop.py | """
Basic loop implementation for ffi-based cores.
"""
# pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
from __future__ import absolute_import, print_function
from collections import deque
import sys
import os
import traceback
from gevent._ffi import _dbg
from gevent._ffi import GEVENT_DEBUG_LEVEL
from gevent._ffi import TRACE
from gevent._ffi.callback import callback
from gevent._compat import PYPY
from gevent.exceptions import HubDestroyed
from gevent import getswitchinterval
__all__ = [
'AbstractLoop',
'assign_standard_callbacks',
]
class _EVENTSType(object):
def __repr__(self):
return 'gevent.core.EVENTS'
EVENTS = GEVENT_CORE_EVENTS = _EVENTSType()
class _DiscardedSet(frozenset):
__slots__ = ()
def discard(self, o):
"Does nothing."
#####
## Note on CFFI objects, callbacks and the lifecycle of watcher objects
#
# Each subclass of `watcher` allocates a C structure of the
# appropriate type e.g., struct gevent_ev_io and holds this pointer in
# its `_gwatcher` attribute. When that watcher instance is garbage
# collected, then the C structure is also freed. The C structure is
# passed to libev from the watcher's start() method and then to the
# appropriate C callback function, e.g., _gevent_ev_io_callback, which
# passes it back to python's _python_callback where we need the
# watcher instance. Therefore, as long as that callback is active (the
# watcher is started), the watcher instance must not be allowed to get
# GC'd---any access at the C level or even the FFI level to the freed
# memory could crash the process.
#
# However, the typical idiom calls for writing something like this:
# loop.io(fd, python_cb).start()
# thus forgetting the newly created watcher subclass and allowing it to be immediately
# GC'd. To combat this, when the watcher is started, it places itself into the loop's
# `_keepaliveset`, and it only removes itself when the watcher's `stop()` method is called.
# Often, this is the *only* reference keeping the watcher object, and hence its C structure,
# alive.
#
# This is slightly complicated by the fact that the python-level
# callback, called from the C callback, could choose to manually stop
# the watcher. When we return to the C level callback, we now have an
# invalid pointer, and attempting to pass it back to Python (e.g., to
# handle an error) could crash. Hence, _python_callback,
# _gevent_io_callback, and _python_handle_error cooperate to make sure
# that the watcher instance stays in the loops `_keepaliveset` while
# the C code could be running---and if it gets removed, to not call back
# to Python again.
# See also https://github.com/gevent/gevent/issues/676
####
class AbstractCallbacks(object):
def __init__(self, ffi):
self.ffi = ffi
self.callbacks = []
if GEVENT_DEBUG_LEVEL < TRACE:
self.from_handle = ffi.from_handle
def from_handle(self, handle): # pylint:disable=method-hidden
x = self.ffi.from_handle(handle)
return x
def python_callback(self, handle, revents):
"""
Returns an integer having one of three values:
- -1
An exception occurred during the callback and you must call
:func:`_python_handle_error` to deal with it. The Python watcher
object will have the exception tuple saved in ``_exc_info``.
- 1
Everything went according to plan. You should check to see if the native
watcher is still active, and call :func:`python_stop` if it is not. This will
clean up the memory. Finding the watcher still active at the event loop level,
but not having stopped itself at the gevent level is a buggy scenario and
shouldn't happen.
- 2
Everything went according to plan, but the watcher has already
been stopped. Its memory may no longer be valid.
This function should never return 0, as that's the default value that
Python exceptions will produce.
"""
#_dbg("Running callback", handle)
orig_ffi_watcher = None
orig_loop = None
try:
# Even dereferencing the handle needs to be inside the try/except;
# if we don't return normally (e.g., a signal) then we wind up going
# to the 'onerror' handler (unhandled_onerror), which
# is not what we want; that can permanently wedge the loop depending
# on which callback was executing.
# XXX: See comments in that function. We may be able to restart and do better?
if not handle:
# Hmm, a NULL handle. That's not supposed to happen.
# We can easily get into a loop if we deref it and allow that
# to raise.
_dbg("python_callback got null handle")
return 1
the_watcher = self.from_handle(handle)
orig_ffi_watcher = the_watcher._watcher
orig_loop = the_watcher.loop
args = the_watcher.args
if args is None:
# Legacy behaviour from corecext: convert None into ()
# See test__core_watcher.py
args = _NOARGS
if args and args[0] == GEVENT_CORE_EVENTS:
args = (revents, ) + args[1:]
the_watcher.callback(*args) # None here means we weren't started
except: # pylint:disable=bare-except
# It's possible for ``the_watcher`` to be undefined (UnboundLocalError)
# if we threw an exception (signal) on the line that created that variable.
# This is typically the case with a signal under libuv
try:
the_watcher
except UnboundLocalError:
the_watcher = self.from_handle(handle)
# It may not be safe to do anything with `handle` or `orig_ffi_watcher`
# anymore. If the watcher closed or stopped itself *before* throwing the exception,
# then the `handle` and `orig_ffi_watcher` may no longer be valid. Attempting to
# e.g., dereference the handle is likely to crash the process.
the_watcher._exc_info = sys.exc_info()
# If it hasn't been stopped, we need to make sure its
# memory stays valid so we can stop it at the native level if needed.
# If its loop is gone, it has already been stopped,
# see https://github.com/gevent/gevent/issues/1295 for a case where
# that happened, as well as issue #1482
if (
# The last thing it does. Full successful close.
the_watcher.loop is None
# Only a partial close. We could leak memory and even crash later.
or the_watcher._handle is None
):
# Prevent unhandled_onerror from using the invalid handle
handle = None
exc_info = the_watcher._exc_info
del the_watcher._exc_info
try:
if orig_loop is not None:
orig_loop.handle_error(the_watcher, *exc_info)
else:
self.unhandled_onerror(*exc_info)
except:
print("WARNING: gevent: Error when handling error",
file=sys.stderr)
traceback.print_exc()
# Signal that we're closed, no need to do more.
return 2
# Keep it around so we can close it later.
the_watcher.loop._keepaliveset.add(the_watcher)
return -1
if (the_watcher.loop is not None
and the_watcher in the_watcher.loop._keepaliveset
and the_watcher._watcher is orig_ffi_watcher):
# It didn't stop itself, *and* it didn't stop itself, reset
# its watcher, and start itself again. libuv's io watchers
# multiplex and may do this.
# The normal, expected scenario when we find the watcher still
# in the keepaliveset is that it is still active at the event loop
# level, so we don't expect that python_stop gets called.
#_dbg("The watcher has not stopped itself, possibly still active", the_watcher)
return 1
return 2 # it stopped itself
def python_handle_error(self, handle, _revents):
_dbg("Handling error for handle", handle)
if not handle:
return
try:
watcher = self.from_handle(handle)
exc_info = watcher._exc_info
del watcher._exc_info
# In the past, we passed the ``watcher`` itself as the context,
# which typically meant that the Hub would just print
# the exception. This is a problem because sometimes we can't
# detect signals until late in ``python_callback``; specifically,
# test_selectors.py:DefaultSelectorTest.test_select_interrupt_exc
# installs a SIGALRM handler that raises an exception. That exception can happen
# before we enter ``python_callback`` or at any point within it because of the way
# libuv swallows signals. By passing None, we get the exception prapagated into
# the main greenlet (which is probably *also* not what we always want, but
# I see no way to distinguish the cases).
watcher.loop.handle_error(None, *exc_info)
finally:
# XXX Since we're here on an error condition, and we
# made sure that the watcher object was put in loop._keepaliveset,
# what about not stopping the watcher? Looks like a possible
# memory leak?
# XXX: This used to do "if revents & (libev.EV_READ | libev.EV_WRITE)"
# before stopping. Why?
try:
watcher.stop()
except: # pylint:disable=bare-except
watcher.loop.handle_error(watcher, *sys.exc_info())
return # pylint:disable=lost-exception
def unhandled_onerror(self, t, v, tb):
# This is supposed to be called for signals, etc.
# This is the onerror= value for CFFI.
# If we return None, C will get a value of 0/NULL;
# if we raise, CFFI will print the exception and then
# return 0/NULL; (unless error= was configured)
# If things go as planned, we return the value that asks
# C to call back and check on if the watcher needs to be closed or
# not.
# XXX: TODO: Could this cause events to be lost? Maybe we need to return
# a value that causes the C loop to try the callback again?
# at least for signals under libuv, which are delivered at very odd times.
# Hopefully the event still shows up when we poll the next time.
watcher = None
handle = tb.tb_frame.f_locals.get('handle') if tb is not None else None
if handle: # handle could be NULL
watcher = self.from_handle(handle)
if watcher is not None:
watcher.loop.handle_error(None, t, v, tb)
return 1
# Raising it causes a lot of noise from CFFI
print("WARNING: gevent: Unhandled error with no watcher",
file=sys.stderr)
traceback.print_exception(t, v, tb)
def python_stop(self, handle):
if not handle: # pragma: no cover
print(
"WARNING: gevent: Unable to dereference handle; not stopping watcher. "
"Native resources may leak. This is most likely a bug in gevent.",
file=sys.stderr)
# The alternative is to crash with no helpful information
# NOTE: Raising exceptions here does nothing, they're swallowed by CFFI.
# Since the C level passed in a null pointer, even dereferencing the handle
# will just produce some exceptions.
return
watcher = self.from_handle(handle)
watcher.stop()
if not PYPY:
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
# If we have the onerror callback, this is a no-op; all the real
# work to rethrow the exception is done by the onerror callback
# NOTE: Unlike the rest of the functions, this is called with a pointer
# to the C level structure, *not* a pointer to the void* that represents a
# <cdata> for the Python Watcher object.
pass
else: # PyPy
# On PyPy, we need the function to have some sort of body, otherwise
# the signal exceptions don't always get caught, *especially* with
# libuv (however, there's no reason to expect this to only be a libuv
# issue; it's just that we don't depend on the periodic signal timer
# under libev, so the issue is much more pronounced under libuv)
# test_socket's test_sendall_interrupted can hang.
# See https://github.com/gevent/gevent/issues/1112
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
# Things we've tried that *don't* work:
# greenlet.getcurrent()
# 1 + 1
try:
raise MemoryError()
except MemoryError:
pass
def python_prepare_callback(self, watcher_ptr):
loop = self._find_loop_from_c_watcher(watcher_ptr)
if loop is None: # pragma: no cover
print("WARNING: gevent: running prepare callbacks from a destroyed handle: ",
watcher_ptr)
return
loop._run_callbacks()
def check_callback_onerror(self, t, v, tb):
watcher_ptr = self._find_watcher_ptr_in_traceback(tb)
if watcher_ptr:
loop = self._find_loop_from_c_watcher(watcher_ptr)
if loop is not None:
# None as the context argument causes the exception to be raised
# in the main greenlet.
loop.handle_error(None, t, v, tb)
return None
raise v # Let CFFI print
def _find_loop_from_c_watcher(self, watcher_ptr):
raise NotImplementedError()
def _find_watcher_ptr_in_traceback(self, tb):
return tb.tb_frame.f_locals['watcher_ptr'] if tb is not None else None
def assign_standard_callbacks(ffi, lib, callbacks_class, extras=()): # pylint:disable=unused-argument
"""
Given the typical *ffi* and *lib* arguments, and a subclass of :class:`AbstractCallbacks`
in *callbacks_class*, set up the ``def_extern`` Python callbacks from C
into an instance of *callbacks_class*.
:param tuple extras: If given, this is a sequence of ``(name, error_function)``
additional callbacks to register. Each *name* is an attribute of
the *callbacks_class* instance. (Each element cas also be just a *name*.)
:return: The *callbacks_class* instance. This object must be kept alive,
typically at module scope.
"""
# callbacks keeps these cdata objects alive at the python level
callbacks = callbacks_class(ffi)
extras = [extra if len(extra) == 2 else (extra, None) for extra in extras]
extras = tuple((getattr(callbacks, name), error) for name, error in extras)
for (func, error_func) in (
(callbacks.python_callback, None),
(callbacks.python_handle_error, None),
(callbacks.python_stop, None),
(callbacks.python_check_callback, callbacks.check_callback_onerror),
(callbacks.python_prepare_callback, callbacks.check_callback_onerror)
) + extras:
# The name of the callback function matches the 'extern Python' declaration.
error_func = error_func or callbacks.unhandled_onerror
callback = ffi.def_extern(onerror=error_func)(func)
# keep alive the cdata
# (def_extern returns the original function, and it requests that
# the function be "global", so maybe it keeps a hard reference to it somewhere now
# unlike ffi.callback(), and we don't need to do this?)
callbacks.callbacks.append(callback)
# At this point, the library C variable (static function, actually)
# is filled in.
return callbacks
if sys.version_info[0] >= 3:
basestring = (bytes, str)
integer_types = (int,)
else:
import __builtin__ # pylint:disable=import-error
basestring = (__builtin__.basestring,)
integer_types = (int, __builtin__.long)
_NOARGS = ()
class AbstractLoop(object):
# pylint:disable=too-many-public-methods,too-many-instance-attributes
# How many callbacks we should run between checking against the
# switch interval.
CALLBACK_CHECK_COUNT = 50
error_handler = None
_CHECK_POINTER = None
_TIMER_POINTER = None
_TIMER_CALLBACK_SIG = None
_PREPARE_POINTER = None
starting_timer_may_update_loop_time = False
# Subclasses should set this in __init__ to reflect
# whether they were the default loop.
_default = None
_keepaliveset = _DiscardedSet()
_threadsafe_async = None
def __init__(self, ffi, lib, watchers, flags=None, default=None):
self._ffi = ffi
self._lib = lib
self._ptr = None
self._handle_to_self = self._ffi.new_handle(self) # XXX: Reference cycle?
self._watchers = watchers
self._in_callback = False
self._callbacks = deque()
# Stores python watcher objects while they are started
self._keepaliveset = set()
self._init_loop_and_aux_watchers(flags, default)
def _init_loop_and_aux_watchers(self, flags=None, default=None):
self._ptr = self._init_loop(flags, default)
# self._check is a watcher that runs in each iteration of the
# mainloop, just after the blocking call. It's point is to handle
# signals. It doesn't run watchers or callbacks, it just exists to give
# CFFI a chance to raise signal exceptions so we can handle them.
self._check = self._ffi.new(self._CHECK_POINTER)
self._check.data = self._handle_to_self
self._init_and_start_check()
# self._prepare is a watcher that runs in each iteration of the mainloop,
# just before the blocking call. It's where we run deferred callbacks
# from self.run_callback. This cooperates with _setup_for_run_callback()
# to schedule self._timer0 if needed.
self._prepare = self._ffi.new(self._PREPARE_POINTER)
self._prepare.data = self._handle_to_self
self._init_and_start_prepare()
# A timer we start and stop on demand. If we have callbacks,
# too many to run in one iteration of _run_callbacks, we turn this
# on so as to have the next iteration of the run loop return to us
# as quickly as possible.
# TODO: There may be a more efficient way to do this using ev_timer_again;
# see the "ev_timer" section of the ev manpage (http://linux.die.net/man/3/ev)
# Alternatively, setting the ev maximum block time may also work.
self._timer0 = self._ffi.new(self._TIMER_POINTER)
self._timer0.data = self._handle_to_self
self._init_callback_timer()
self._threadsafe_async = self.async_(ref=False)
# No need to do anything with this on ``fork()``, both libev and libuv
# take care of creating a new pipe in their respective ``loop_fork()`` methods.
self._threadsafe_async.start(lambda: None)
# TODO: We may be able to do something nicer and use the existing python_callback
# combined with onerror and the class check/timer/prepare to simplify things
# and unify our handling
def _init_loop(self, flags, default):
"""
Called by __init__ to create or find the loop. The return value
is assigned to self._ptr.
"""
raise NotImplementedError()
def _init_and_start_check(self):
raise NotImplementedError()
def _init_and_start_prepare(self):
raise NotImplementedError()
def _init_callback_timer(self):
raise NotImplementedError()
def _stop_callback_timer(self):
raise NotImplementedError()
def _start_callback_timer(self):
raise NotImplementedError()
def _check_callback_handle_error(self, t, v, tb):
self.handle_error(None, t, v, tb)
def _run_callbacks(self): # pylint:disable=too-many-branches
# When we're running callbacks, its safe for timers to
# update the notion of the current time (because if we're here,
# we're not running in a timer callback that may let other timers
# run; this is mostly an issue for libuv).
# That's actually a bit of a lie: on libev, self._timer0 really is
# a timer, and so sometimes this is running in a timer callback, not
# a prepare callback. But that's OK, libev doesn't suffer from cascading
# timer expiration and its safe to update the loop time at any
# moment there.
self.starting_timer_may_update_loop_time = True
try:
count = self.CALLBACK_CHECK_COUNT
now = self.now()
expiration = now + getswitchinterval()
self._stop_callback_timer()
while self._callbacks:
cb = self._callbacks.popleft() # pylint:disable=assignment-from-no-return
count -= 1
self.unref() # XXX: libuv doesn't have a global ref count!
callback = cb.callback
cb.callback = None
args = cb.args
if callback is None or args is None:
# it's been stopped
continue
try:
callback(*args)
except: # pylint:disable=bare-except
# If we allow an exception to escape this method (while we are running the ev callback),
# then CFFI will print the error and libev will continue executing.
# There are two problems with this. The first is that the code after
# the loop won't run. The second is that any remaining callbacks scheduled
# for this loop iteration will be silently dropped; they won't run, but they'll
# also not be *stopped* (which is not a huge deal unless you're looking for
# consistency or checking the boolean/pending status; the loop doesn't keep
# a reference to them like it does to watchers...*UNLESS* the callback itself had
# a reference to a watcher; then I don't know what would happen, it depends on
# the state of the watcher---a leak or crash is not totally inconceivable).
# The Cython implementation in core.ppyx uses gevent_call from callbacks.c
# to run the callback, which uses gevent_handle_error to handle any errors the
# Python callback raises...it unconditionally simply prints any error raised
# by loop.handle_error and clears it, so callback handling continues.
# We take a similar approach (but are extra careful about printing)
try:
self.handle_error(cb, *sys.exc_info())
except: # pylint:disable=bare-except
try:
print("Exception while handling another error", file=sys.stderr)
traceback.print_exc()
except: # pylint:disable=bare-except
pass # Nothing we can do here
finally:
# NOTE: this must be reset here, because cb.args is used as a flag in
# the callback class so that bool(cb) of a callback that has been run
# becomes False
cb.args = None
# We've finished running one group of callbacks
# but we may have more, so before looping check our
# switch interval.
if count == 0 and self._callbacks:
count = self.CALLBACK_CHECK_COUNT
self.update_now()
if self.now() >= expiration:
now = 0
break
# Update the time before we start going again, if we didn't
# just do so.
if now != 0:
self.update_now()
if self._callbacks:
self._start_callback_timer()
finally:
self.starting_timer_may_update_loop_time = False
def _stop_aux_watchers(self):
if self._threadsafe_async is not None:
self._threadsafe_async.close()
self._threadsafe_async = None
def destroy(self):
ptr = self.ptr
if ptr:
try:
if not self._can_destroy_loop(ptr):
return False
self._stop_aux_watchers()
self._destroy_loop(ptr)
finally:
# not ffi.NULL, we don't want something that can be
# passed to C and crash later. This will create nice friendly
# TypeError from CFFI.
self._ptr = None
del self._handle_to_self
del self._callbacks
del self._keepaliveset
return True
def _can_destroy_loop(self, ptr):
raise NotImplementedError()
def _destroy_loop(self, ptr):
raise NotImplementedError()
@property
def ptr(self):
# Use this when you need to be sure the pointer is valid.
return self._ptr
@property
def WatcherType(self):
return self._watchers.watcher
@property
def MAXPRI(self):
return 1
@property
def MINPRI(self):
return 1
def _handle_syserr(self, message, errno):
try:
errno = os.strerror(errno)
except: # pylint:disable=bare-except
traceback.print_exc()
try:
message = '%s: %s' % (message, errno)
except: # pylint:disable=bare-except
traceback.print_exc()
self.handle_error(None, SystemError, SystemError(message), None)
def handle_error(self, context, type, value, tb):
if type is HubDestroyed:
self._callbacks.clear()
self.break_()
return
handle_error = None
error_handler = self.error_handler
if error_handler is not None:
# we do want to do getattr every time so that setting Hub.handle_error property just works
handle_error = getattr(error_handler, 'handle_error', error_handler)
handle_error(context, type, value, tb)
else:
self._default_handle_error(context, type, value, tb)
def _default_handle_error(self, context, type, value, tb): # pylint:disable=unused-argument
# note: Hub sets its own error handler so this is not used by gevent
# this is here to make core.loop usable without the rest of gevent
# Should cause the loop to stop running.
traceback.print_exception(type, value, tb)
def run(self, nowait=False, once=False):
raise NotImplementedError()
def reinit(self):
raise NotImplementedError()
def ref(self):
# XXX: libuv doesn't do it this way
raise NotImplementedError()
def unref(self):
raise NotImplementedError()
def break_(self, how=None):
raise NotImplementedError()
def verify(self):
pass
def now(self):
raise NotImplementedError()
def update_now(self):
raise NotImplementedError()
def update(self):
import warnings
warnings.warn("'update' is deprecated; use 'update_now'",
DeprecationWarning,
stacklevel=2)
self.update_now()
def __repr__(self):
return '<%s.%s at 0x%x %s>' % (
self.__class__.__module__,
self.__class__.__name__,
id(self),
self._format()
)
@property
def default(self):
return self._default if self.ptr else False
@property
def iteration(self):
return -1
@property
def depth(self):
return -1
@property
def backend_int(self):
return 0
@property
def backend(self):
return "default"
@property
def pendingcnt(self):
return 0
def io(self, fd, events, ref=True, priority=None):
return self._watchers.io(self, fd, events, ref, priority)
def closing_fd(self, fd): # pylint:disable=unused-argument
return False
def timer(self, after, repeat=0.0, ref=True, priority=None):
return self._watchers.timer(self, after, repeat, ref, priority)
def signal(self, signum, ref=True, priority=None):
return self._watchers.signal(self, signum, ref, priority)
def idle(self, ref=True, priority=None):
return self._watchers.idle(self, ref, priority)
def prepare(self, ref=True, priority=None):
return self._watchers.prepare(self, ref, priority)
def check(self, ref=True, priority=None):
return self._watchers.check(self, ref, priority)
def fork(self, ref=True, priority=None):
return self._watchers.fork(self, ref, priority)
def async_(self, ref=True, priority=None):
return self._watchers.async_(self, ref, priority)
# Provide BWC for those that can use 'async' as is
locals()['async'] = async_
if sys.platform != "win32":
def child(self, pid, trace=0, ref=True):
return self._watchers.child(self, pid, trace, ref)
def install_sigchld(self):
pass
def stat(self, path, interval=0.0, ref=True, priority=None):
return self._watchers.stat(self, path, interval, ref, priority)
def callback(self, priority=None):
return callback(self, priority)
def _setup_for_run_callback(self):
raise NotImplementedError()
def run_callback(self, func, *args):
# If we happen to already be running callbacks (inside
# _run_callbacks), this could happen almost immediately,
# without the loop cycling.
cb = callback(func, args)
self._callbacks.append(cb) # Relying on the GIL for this to be threadsafe
self._setup_for_run_callback() # XXX: This may not be threadsafe.
return cb
def run_callback_threadsafe(self, func, *args):
cb = self.run_callback(func, *args)
self._threadsafe_async.send()
return cb
def _format(self):
ptr = self.ptr
if not ptr:
return 'destroyed'
msg = "backend=" + self.backend
msg += ' ptr=' + str(ptr)
if self.default:
msg += ' default'
msg += ' pending=%s' % self.pendingcnt
msg += self._format_details()
return msg
def _format_details(self):
msg = ''
fileno = self.fileno() # pylint:disable=assignment-from-none
try:
activecnt = self.activecnt
except AttributeError:
activecnt = None
if activecnt is not None:
msg += ' ref=' + repr(activecnt)
if fileno is not None:
msg += ' fileno=' + repr(fileno)
#if sigfd is not None and sigfd != -1:
# msg += ' sigfd=' + repr(sigfd)
msg += ' callbacks=' + str(len(self._callbacks))
return msg
def fileno(self):
return None
@property
def activecnt(self):
if not self.ptr:
raise ValueError('operation on destroyed loop')
return 0
| 31,948 | 39.187421 | 108 | py |
gevent | gevent-master/src/gevent/_ffi/__init__.py | """
Internal helpers for FFI implementations.
"""
from __future__ import print_function, absolute_import
import os
import sys
def _dbg(*args, **kwargs):
# pylint:disable=unused-argument
pass
#_dbg = print
def _pid_dbg(*args, **kwargs):
kwargs['file'] = sys.stderr
print(os.getpid(), *args, **kwargs)
CRITICAL = 1
ERROR = 3
DEBUG = 5
TRACE = 9
GEVENT_DEBUG_LEVEL = vars()[os.getenv("GEVENT_DEBUG", 'CRITICAL').upper()]
if GEVENT_DEBUG_LEVEL >= TRACE:
_dbg = _pid_dbg
| 493 | 16.642857 | 74 | py |
gevent | gevent-master/src/gevent/_ffi/callback.py | from __future__ import absolute_import
from __future__ import print_function
from zope.interface import implementer
from gevent._interfaces import ICallback
__all__ = [
'callback',
]
@implementer(ICallback)
class callback(object):
__slots__ = ('callback', 'args')
def __init__(self, cb, args):
self.callback = cb
self.args = args
def stop(self):
self.callback = None
self.args = None
close = stop
# Note that __nonzero__ and pending are different
# bool() is used in contexts where we need to know whether to schedule another callback,
# so it's true if it's pending or currently running
# 'pending' has the same meaning as libev watchers: it is cleared before actually
# running the callback
def __bool__(self):
# it's nonzero if it's pending or currently executing
# NOTE: This depends on loop._run_callbacks setting the args property
# to None.
return self.args is not None
@property
def pending(self):
return self.callback is not None
def _format(self):
return ''
def __repr__(self):
result = "<%s at 0x%x" % (self.__class__.__name__, id(self))
if self.pending:
result += " pending"
if self.callback is not None:
result += " callback=%r" % (self.callback, )
if self.args is not None:
result += " args=%r" % (self.args, )
if self.callback is None and self.args is None:
result += " stopped"
return result + ">"
| 1,564 | 25.982759 | 92 | py |
gevent | gevent-master/src/gevent/libuv/watcher.py | # pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
# pylint: disable=no-member
from __future__ import absolute_import, print_function
import functools
import sys
from gevent.libuv import _corecffi # pylint:disable=no-name-in-module,import-error
# Nothing public here
__all__ = []
ffi = _corecffi.ffi
libuv = _corecffi.lib
from gevent._ffi import watcher as _base
from gevent._ffi import _dbg
# A set of uv_handle_t* CFFI objects. Kept around
# to keep the memory alive until libuv is done with them.
class _ClosingWatchers(dict):
__slots__ = ()
def remove(self, obj):
try:
del self[obj]
except KeyError: # pragma: no cover
# This has been seen to happen if the module is executed twice
# and so the callback doesn't match the storage seen by watcher objects.
print(
'gevent error: Unable to remove closing watcher from keepaliveset. '
'Has the module state been corrupted or executed more than once?',
file=sys.stderr
)
_closing_watchers = _ClosingWatchers()
# In debug mode, it would be nice to be able to clear the memory of
# the watcher (its size determined by
# libuv.uv_handle_size(ffi_watcher.type)) using memset so that if we
# are using it after it's supposedly been closed and deleted, we'd
# catch it sooner. BUT doing so breaks test__threadpool. We get errors
# about `pthread_mutex_lock[3]: Invalid argument` (and sometimes we
# crash) suggesting either that we're writing on memory that doesn't
# belong to us, somehow, or that we haven't actually lost all
# references...
_uv_close_callback = ffi.def_extern(name='_uv_close_callback')(
_closing_watchers.remove
)
_events = [(libuv.UV_READABLE, "READ"),
(libuv.UV_WRITABLE, "WRITE")]
def _events_to_str(events): # export
return _base.events_to_str(events, _events)
class UVFuncallError(ValueError):
pass
class libuv_error_wrapper(object):
# Makes sure that everything stored as a function
# on the wrapper instances (classes, actually,
# because this is used by the metaclass)
# checks its return value and raises an error.
# This expects that everything we call has an int
# or void return value and follows the conventions
# of error handling (that negative values are errors)
def __init__(self, uv):
self._libuv = uv
def __getattr__(self, name):
libuv_func = getattr(self._libuv, name)
@functools.wraps(libuv_func)
def wrap(*args, **kwargs):
if args and isinstance(args[0], watcher):
args = args[1:]
res = libuv_func(*args, **kwargs)
if res is not None and res < 0:
raise UVFuncallError(
str(ffi.string(libuv.uv_err_name(res)).decode('ascii')
+ ' '
+ ffi.string(libuv.uv_strerror(res)).decode('ascii'))
+ " Args: " + repr(args) + " KWARGS: " + repr(kwargs)
)
return res
setattr(self, name, wrap)
return wrap
class ffi_unwrapper(object):
# undoes the wrapping of libuv_error_wrapper for
# the methods used by the metaclass that care
def __init__(self, ff):
self._ffi = ff
def __getattr__(self, name):
return getattr(self._ffi, name)
def addressof(self, lib, name):
assert isinstance(lib, libuv_error_wrapper)
return self._ffi.addressof(libuv, name)
class watcher(_base.watcher):
_FFI = ffi_unwrapper(ffi)
_LIB = libuv_error_wrapper(libuv)
_watcher_prefix = 'uv'
_watcher_struct_pattern = '%s_t'
@classmethod
def _watcher_ffi_close(cls, ffi_watcher):
# Managing the lifetime of _watcher is tricky.
# They have to be uv_close()'d, but that only
# queues them to be closed in the *next* loop iteration.
# The memory must stay valid for at least that long,
# or assert errors are triggered. We can't use a ffi.gc()
# pointer to queue the uv_close, because by the time the
# destructor is called, there's no way to keep the memory alive
# and it could be re-used.
# So here we resort to resurrecting the pointer object out
# of our scope, keeping it alive past this object's lifetime.
# We then use the uv_close callback to handle removing that
# reference. There's no context passed to the close callback,
# so we have to do this globally.
# Sadly, doing this causes crashes if there were multiple
# watchers for a given FD, so we have to take special care
# about that. See https://github.com/gevent/gevent/issues/790#issuecomment-208076604
# Note that this cannot be a __del__ method, because we store
# the CFFI handle to self on self, which is a cycle, and
# objects with a __del__ method cannot be collected on CPython < 3.4
# Instead, this is arranged as a callback to GC when the
# watcher class dies. Obviously it's important to keep the ffi
# watcher alive.
# We can pass in "subclasses" of uv_handle_t that line up at the C level,
# but that don't in CFFI without a cast. But be careful what we use the cast
# for, don't pass it back to C.
ffi_handle_watcher = cls._FFI.cast('uv_handle_t*', ffi_watcher)
ffi_handle_watcher.data = ffi.NULL
if ffi_handle_watcher.type and not libuv.uv_is_closing(ffi_watcher):
# If the type isn't set, we were never properly initialized,
# and trying to close it results in libuv terminating the process.
# Sigh. Same thing if it's already in the process of being
# closed.
_closing_watchers[ffi_handle_watcher] = ffi_watcher
libuv.uv_close(ffi_watcher, libuv._uv_close_callback)
def _watcher_ffi_set_init_ref(self, ref):
self.ref = ref
def _watcher_ffi_init(self, args):
# TODO: we could do a better job chokepointing this
return self._watcher_init(self.loop.ptr,
self._watcher,
*args)
def _watcher_ffi_start(self):
self._watcher_start(self._watcher, self._watcher_callback)
def _watcher_ffi_stop(self):
if self._watcher:
# The multiplexed io watcher deletes self._watcher
# when it closes down. If that's in the process of
# an error handler, AbstractCallbacks.unhandled_onerror
# will try to close us again.
self._watcher_stop(self._watcher)
@_base.only_if_watcher
def _watcher_ffi_ref(self):
libuv.uv_ref(self._watcher)
@_base.only_if_watcher
def _watcher_ffi_unref(self):
libuv.uv_unref(self._watcher)
def _watcher_ffi_start_unref(self):
pass
def _watcher_ffi_stop_ref(self):
pass
def _get_ref(self):
# Convert 1/0 to True/False
if self._watcher is None:
return None
return bool(libuv.uv_has_ref(self._watcher))
def _set_ref(self, value):
if value:
self._watcher_ffi_ref()
else:
self._watcher_ffi_unref()
ref = property(_get_ref, _set_ref)
def feed(self, _revents, _callback, *_args):
# pylint:disable-next=broad-exception-raised
raise Exception("Not implemented")
class io(_base.IoMixin, watcher):
_watcher_type = 'poll'
_watcher_callback_name = '_gevent_poll_callback2'
# On Windows is critical to be able to garbage collect these
# objects in a timely fashion so that they don't get reused
# for multiplexing completely different sockets. This is because
# uv_poll_init_socket does a lot of setup for the socket to make
# polling work. If get reused for another socket that has the same
# fileno, things break badly. (In theory this could be a problem
# on posix too, but in practice it isn't).
# TODO: We should probably generalize this to all
# ffi watchers. Avoiding GC cycles as much as possible
# is a good thing, and potentially allocating new handles
# as needed gets us better memory locality.
# Especially on Windows, we must also account for the case that a
# reference to this object has leaked (e.g., the socket object is
# still around), but the fileno has been closed and a new one
# opened. We must still get a new native watcher at that point. We
# handle this case by simply making sure that we don't even have
# a native watcher until the object is started, and we shut it down
# when the object is stopped.
# XXX: I was able to solve at least Windows test_ftplib.py issues
# with more of a careful use of io objects in socket.py, so
# delaying this entirely is at least temporarily on hold. Instead
# sticking with the _watcher_create function override for the
# moment.
# XXX: Note 2: Moving to a deterministic close model, which was necessary
# for PyPy, also seems to solve the Windows issues. So we're completely taking
# this object out of the loop's registration; we don't want GC callbacks and
# uv_close anywhere *near* this object.
_watcher_registers_with_loop_on_create = False
EVENT_MASK = libuv.UV_READABLE | libuv.UV_WRITABLE | libuv.UV_DISCONNECT
_multiplex_watchers = ()
def __init__(self, loop, fd, events, ref=True, priority=None):
super(io, self).__init__(loop, fd, events, ref=ref, priority=priority, _args=(fd,))
self._fd = fd
self._events = events
self._multiplex_watchers = []
def _get_fd(self):
return self._fd
@_base.not_while_active
def _set_fd(self, fd):
self._fd = fd
self._watcher_ffi_init((fd,))
def _get_events(self):
return self._events
def _set_events(self, events):
if events == self._events:
return
self._events = events
if self.active:
# We're running but libuv specifically says we can
# call start again to change our event mask.
assert self._handle is not None
self._watcher_start(self._watcher, self._events, self._watcher_callback)
events = property(_get_events, _set_events)
def _watcher_ffi_start(self):
self._watcher_start(self._watcher, self._events, self._watcher_callback)
if sys.platform.startswith('win32'):
# uv_poll can only handle sockets on Windows, but the plain
# uv_poll_init we call on POSIX assumes that the fileno
# argument is already a C fileno, as created by
# _get_osfhandle. C filenos are limited resources, must be
# closed with _close. So there are lifetime issues with that:
# calling the C function _close to dispose of the fileno
# *also* closes the underlying win32 handle, possibly
# prematurely. (XXX: Maybe could do something with weak
# references? But to what?)
# All libuv wants to do with the fileno in uv_poll_init is
# turn it back into a Win32 SOCKET handle.
# Now, libuv provides uv_poll_init_socket, which instead of
# taking a C fileno takes the SOCKET, avoiding the need to dance with
# the C runtime.
# It turns out that SOCKET (win32 handles in general) can be
# represented with `intptr_t`. It further turns out that
# CPython *directly* exposes the SOCKET handle as the value of
# fileno (32-bit PyPy does some munging on it, which should
# rarely matter). So we can pass socket.fileno() through
# to uv_poll_init_socket.
# See _corecffi_build.
_watcher_init = watcher._LIB.uv_poll_init_socket
class _multiplexwatcher(object):
callback = None
args = ()
pass_events = False
ref = True
def __init__(self, events, watcher):
self._events = events
# References:
# These objects must keep the original IO object alive;
# the IO object SHOULD NOT keep these alive to avoid cycles
# We MUST NOT rely on GC to clean up the IO objects, but the explicit
# calls to close(); see _multiplex_closed.
self._watcher_ref = watcher
events = property(
lambda self: self._events,
_base.not_while_active(lambda self, nv: setattr(self, '_events', nv)))
def start(self, callback, *args, **kwargs):
self.pass_events = kwargs.get("pass_events")
self.callback = callback
self.args = args
watcher = self._watcher_ref
if watcher is not None:
if not watcher.active:
watcher._io_start()
else:
# Make sure we're in the event mask
watcher._calc_and_update_events()
def stop(self):
self.callback = None
self.pass_events = None
self.args = None
watcher = self._watcher_ref
if watcher is not None:
watcher._io_maybe_stop()
def close(self):
if self._watcher_ref is not None:
self._watcher_ref._multiplex_closed(self)
self._watcher_ref = None
@property
def active(self):
return self.callback is not None
@property
def _watcher(self):
# For testing.
return self._watcher_ref._watcher
# ares.pyx depends on this property,
# and test__core uses it too
fd = property(lambda self: getattr(self._watcher_ref, '_fd', -1),
lambda self, nv: self._watcher_ref._set_fd(nv))
def _io_maybe_stop(self):
self._calc_and_update_events()
for w in self._multiplex_watchers:
if w.callback is not None:
# There's still a reference to it, and it's started,
# so we can't stop.
return
# If we get here, nothing was started
# so we can take ourself out of the polling set
self.stop()
def _io_start(self):
self._calc_and_update_events()
self.start(self._io_callback, pass_events=True)
def _calc_and_update_events(self):
events = 0
for watcher in self._multiplex_watchers:
if watcher.callback is not None:
# Only ask for events that are active.
events |= watcher.events
self._set_events(events)
def multiplex(self, events):
watcher = self._multiplexwatcher(events, self)
self._multiplex_watchers.append(watcher)
self._calc_and_update_events()
return watcher
def close(self):
super(io, self).close()
del self._multiplex_watchers
def _multiplex_closed(self, watcher):
self._multiplex_watchers.remove(watcher)
if not self._multiplex_watchers:
self.stop() # should already be stopped
self._no_more_watchers()
# It is absolutely critical that we control when the call
# to uv_close() gets made. uv_close() of a uv_poll_t
# handle winds up calling uv__platform_invalidate_fd,
# which, as the name implies, destroys any outstanding
# events for the *fd* that haven't been delivered yet, and also removes
# the *fd* from the poll set. So if this happens later, at some
# non-deterministic time when (cyclic or otherwise) GC runs,
# *and* we've opened a new watcher for the fd, that watcher will
# suddenly and mysteriously stop seeing events. So we do this now;
# this method is smart enough not to close the handle twice.
self.close()
else:
self._calc_and_update_events()
def _no_more_watchers(self):
# The loop sets this on an individual watcher to delete it from
# the active list where it keeps hard references.
pass
def _io_callback(self, events):
if events < 0:
# actually a status error code
_dbg("Callback error on", self._fd,
ffi.string(libuv.uv_err_name(events)),
ffi.string(libuv.uv_strerror(events)))
# XXX: We've seen one half of a FileObjectPosix pair
# (the read side of a pipe) report errno 11 'bad file descriptor'
# after the write side was closed and its watcher removed. But
# we still need to attempt to read from it to clear out what's in
# its buffers--if we return with the watcher inactive before proceeding to wake up
# the reader, we get a LoopExit. So we can't return here and arguably shouldn't print it
# either. The negative events mask will match the watcher's mask.
# See test__fileobject.py:Test.test_newlines for an example.
# On Windows (at least with PyPy), we can get ENOTSOCK (socket operation on non-socket)
# if a socket gets closed. If we don't pass the events on, we hang.
# See test__makefile_ref.TestSSL for examples.
# return
for watcher in self._multiplex_watchers:
if not watcher.callback:
# Stopped
continue
assert watcher._watcher_ref is self, (self, watcher._watcher_ref)
send_event = (events & watcher.events) or events < 0
if send_event:
if not watcher.pass_events:
watcher.callback(*watcher.args)
else:
watcher.callback(events, *watcher.args)
class _SimulatedWithAsyncMixin(object):
_watcher_skip_ffi = True
def __init__(self, loop, *args, **kwargs):
self._async = loop.async_()
try:
super(_SimulatedWithAsyncMixin, self).__init__(loop, *args, **kwargs)
except:
self._async.close()
raise
def _watcher_create(self, _args):
return
@property
def _watcher_handle(self):
return None
def _watcher_ffi_init(self, _args):
return
def _watcher_ffi_set_init_ref(self, ref):
self._async.ref = ref
@property
def active(self):
return self._async.active
def start(self, cb, *args):
assert self._async is not None
self._register_loop_callback()
self.callback = cb
self.args = args
self._async.start(cb, *args)
def stop(self):
self._unregister_loop_callback()
self.callback = None
self.args = None
if self._async is not None:
# If we're stop() after close().
# That should be allowed.
self._async.stop()
def close(self):
if self._async is not None:
a = self._async
self._async = None
a.close()
def _register_loop_callback(self):
# called from start()
raise NotImplementedError()
def _unregister_loop_callback(self):
# called from stop
raise NotImplementedError()
class fork(_SimulatedWithAsyncMixin,
_base.ForkMixin,
watcher):
# We'll have to implement this one completely manually.
_watcher_skip_ffi = False
def _register_loop_callback(self):
self.loop._fork_watchers.add(self)
def _unregister_loop_callback(self):
try:
# stop() should be idempotent
self.loop._fork_watchers.remove(self)
except KeyError:
pass
def _on_fork(self):
self._async.send()
class child(_SimulatedWithAsyncMixin,
_base.ChildMixin,
watcher):
_watcher_skip_ffi = True
# We'll have to implement this one completely manually.
# Our approach is to use a SIGCHLD handler and the original
# os.waitpid call.
# On Unix, libuv's uv_process_t and uv_spawn use SIGCHLD,
# just like libev does for its child watchers. So
# we're not adding any new SIGCHLD related issues not already
# present in libev.
def _register_loop_callback(self):
self.loop._register_child_watcher(self)
def _unregister_loop_callback(self):
self.loop._unregister_child_watcher(self)
def _set_waitpid_status(self, pid, status):
self._rpid = pid
self._rstatus = status
self._async.send()
class async_(_base.AsyncMixin, watcher):
_watcher_callback_name = '_gevent_async_callback0'
# libuv async watchers are different than all other watchers:
# They don't have a separate start/stop method (presumably
# because of race conditions). Simply initing them places them
# into the active queue.
#
# In the past, we sent a NULL C callback to the watcher, trusting
# that no one would call send() without actually starting us (or after
# closing us); doing so would crash. But we don't want to delay
# initing the struct because it will crash in uv_close() when we get GC'd,
# and send() will also crash. Plus that complicates our lifecycle (managing
# the memory).
#
# Now, we always init the correct C callback, and use a dummy
# Python callback that gets replaced when we are started and
# stopped. This prevents mistakes from being crashes.
_callback = lambda: None
def _watcher_ffi_init(self, args):
# NOTE: uv_async_init is NOT idempotent. Calling it more than
# once adds the uv_async_t to the internal queue multiple times,
# and uv_close only cleans up one of them, meaning that we tend to
# crash. Thus we have to be very careful not to allow that.
return self._watcher_init(self.loop.ptr, self._watcher,
self._watcher_callback)
def _watcher_ffi_start(self):
pass
def _watcher_ffi_stop(self):
pass
def send(self):
assert self._callback is not async_._callback, "Sending to a closed watcher"
if libuv.uv_is_closing(self._watcher):
# pylint:disable-next=broad-exception-raised
raise Exception("Closing handle")
libuv.uv_async_send(self._watcher)
@property
def pending(self):
return None
locals()['async'] = async_
class timer(_base.TimerMixin, watcher):
_watcher_callback_name = '_gevent_timer_callback0'
# In libuv, timer callbacks continue running while any timer is
# expired, including newly added timers. Newly added non-zero
# timers (especially of small duration) can be seen to be expired
# if the loop time is updated while we are in a timer callback.
# This can lead to us being stuck running timers for a terribly
# long time, which is not good. So default to not updating the
# time.
# Also, newly-added timers of 0 duration can *also* stall the
# loop, because they'll be seen to be expired immediately.
# Updating the time can prevent that, *if* there was already a
# timer for a longer duration scheduled.
# To mitigate the above problems, our loop implementation turns
# zero duration timers into check watchers instead using OneShotCheck.
# This ensures the loop cycles. Of course, the 'again' method does
# nothing on them and doesn't exist. In practice that's not an issue.
_again = False
def _watcher_ffi_init(self, args):
self._watcher_init(self.loop.ptr, self._watcher)
self._after, self._repeat = args
if self._after and self._after < 0.001:
import warnings
# XXX: The stack level is hard to determine, could be getting here
# through a number of different ways.
warnings.warn("libuv only supports millisecond timer resolution; "
"all times less will be set to 1 ms",
stacklevel=6)
# The alternative is to effectively pass in int(0.1) == 0, which
# means no sleep at all, which leads to excessive wakeups
self._after = 0.001
if self._repeat and self._repeat < 0.001:
import warnings
warnings.warn("libuv only supports millisecond timer resolution; "
"all times less will be set to 1 ms",
stacklevel=6)
self._repeat = 0.001
def _watcher_ffi_start(self):
if self._again:
libuv.uv_timer_again(self._watcher)
else:
try:
self._watcher_start(self._watcher, self._watcher_callback,
int(self._after * 1000),
int(self._repeat * 1000))
except ValueError:
# in case of non-ints in _after/_repeat
raise TypeError()
def again(self, callback, *args, **kw):
if not self.active:
# If we've never been started, this is the same as starting us.
# libuv makes the distinction, libev doesn't.
self.start(callback, *args, **kw)
return
self._again = True
try:
self.start(callback, *args, **kw)
finally:
del self._again
class stat(_base.StatMixin, watcher):
_watcher_type = 'fs_poll'
_watcher_struct_name = 'gevent_fs_poll_t'
_watcher_callback_name = '_gevent_fs_poll_callback3'
def _watcher_set_data(self, the_watcher, data):
the_watcher.handle.data = data
return data
def _watcher_ffi_init(self, args):
return self._watcher_init(self.loop.ptr, self._watcher)
MIN_STAT_INTERVAL = 0.1074891 # match libev; 0.0 is default
def _watcher_ffi_start(self):
# libev changes this when the watcher is started
if self._interval < self.MIN_STAT_INTERVAL:
self._interval = self.MIN_STAT_INTERVAL
self._watcher_start(self._watcher, self._watcher_callback,
self._cpath,
int(self._interval * 1000))
@property
def _watcher_handle(self):
return self._watcher.handle.data
@property
def attr(self):
if not self._watcher.curr.st_nlink:
return
return self._watcher.curr
@property
def prev(self):
if not self._watcher.prev.st_nlink:
return
return self._watcher.prev
class signal(_base.SignalMixin, watcher):
_watcher_callback_name = '_gevent_signal_callback1'
def _watcher_ffi_init(self, args):
self._watcher_init(self.loop.ptr, self._watcher)
self.ref = False # libev doesn't ref these by default
def _watcher_ffi_start(self):
self._watcher_start(self._watcher, self._watcher_callback,
self._signalnum)
class idle(_base.IdleMixin, watcher):
# Because libuv doesn't support priorities, idle watchers are
# potentially quite a bit different than under libev
_watcher_callback_name = '_gevent_idle_callback0'
class check(_base.CheckMixin, watcher):
_watcher_callback_name = '_gevent_check_callback0'
class OneShotCheck(check):
_watcher_skip_ffi = True
def __make_cb(self, func):
stop = self.stop
@functools.wraps(func)
def cb(*args):
stop()
return func(*args)
return cb
def start(self, callback, *args):
return check.start(self, self.__make_cb(callback), *args)
class prepare(_base.PrepareMixin, watcher):
_watcher_callback_name = '_gevent_prepare_callback0'
| 27,699 | 35.256545 | 100 | py |
gevent | gevent-master/src/gevent/libuv/loop.py | """
libuv loop implementation
"""
# pylint: disable=no-member
from __future__ import absolute_import, print_function
import os
from collections import defaultdict
from collections import namedtuple
from operator import delitem
import signal
from zope.interface import implementer
from gevent import getcurrent
from gevent.exceptions import LoopExit
from gevent._ffi import _dbg # pylint: disable=unused-import
from gevent._ffi.loop import AbstractLoop
from gevent._ffi.loop import assign_standard_callbacks
from gevent._ffi.loop import AbstractCallbacks
from gevent._interfaces import ILoop
from gevent.libuv import _corecffi # pylint:disable=no-name-in-module,import-error
ffi = _corecffi.ffi
libuv = _corecffi.lib
__all__ = [
]
class _Callbacks(AbstractCallbacks):
def _find_loop_from_c_watcher(self, watcher_ptr):
loop_handle = ffi.cast('uv_handle_t*', watcher_ptr).data
return self.from_handle(loop_handle) if loop_handle else None
def python_sigchld_callback(self, watcher_ptr, _signum):
self.from_handle(ffi.cast('uv_handle_t*', watcher_ptr).data)._sigchld_callback()
def python_timer0_callback(self, watcher_ptr):
return self.python_prepare_callback(watcher_ptr)
def python_queue_callback(self, watcher_ptr, revents):
watcher_handle = watcher_ptr.data
the_watcher = self.from_handle(watcher_handle)
the_watcher.loop._queue_callback(watcher_ptr, revents)
_callbacks = assign_standard_callbacks(
ffi, libuv, _Callbacks,
[
'python_sigchld_callback',
'python_timer0_callback',
'python_queue_callback',
]
)
from gevent._ffi.loop import EVENTS
GEVENT_CORE_EVENTS = EVENTS # export
from gevent.libuv import watcher as _watchers # pylint:disable=no-name-in-module
_events_to_str = _watchers._events_to_str # export
READ = libuv.UV_READABLE
WRITE = libuv.UV_WRITABLE
def get_version():
uv_bytes = ffi.string(libuv.uv_version_string())
if not isinstance(uv_bytes, str):
# Py3
uv_str = uv_bytes.decode("ascii")
else:
uv_str = uv_bytes
return 'libuv-' + uv_str
def get_header_version():
return 'libuv-%d.%d.%d' % (libuv.UV_VERSION_MAJOR, libuv.UV_VERSION_MINOR, libuv.UV_VERSION_PATCH)
def supported_backends():
return ['default']
libuv.gevent_set_uv_alloc()
@implementer(ILoop)
class loop(AbstractLoop):
# libuv parameters simply won't accept anything lower than 1ms. In
# practice, looping on gevent.sleep(0.001) takes about 0.00138 s
# (+- 0.000036s)
approx_timer_resolution = 0.001 # 1ms
# It's relatively more expensive to break from the callback loop
# because we don't do it "inline" from C, we're looping in Python
CALLBACK_CHECK_COUNT = max(AbstractLoop.CALLBACK_CHECK_COUNT, 100)
# Defines the maximum amount of time the loop will sleep waiting for IO,
# which is also the interval at which signals are checked and handled.
SIGNAL_CHECK_INTERVAL_MS = 300
error_handler = None
_CHECK_POINTER = 'uv_check_t *'
_PREPARE_POINTER = 'uv_prepare_t *'
_PREPARE_CALLBACK_SIG = "void(*)(void*)"
_TIMER_POINTER = _CHECK_POINTER # This is poorly named. It's for the callback "timer"
def __init__(self, flags=None, default=None):
AbstractLoop.__init__(self, ffi, libuv, _watchers, flags, default)
self._child_watchers = defaultdict(list)
self._io_watchers = {}
self._fork_watchers = set()
self._pid = os.getpid()
# pylint:disable-next=superfluous-parens
self._default = (self._ptr == libuv.uv_default_loop())
self._queued_callbacks = []
def _queue_callback(self, watcher_ptr, revents):
self._queued_callbacks.append((watcher_ptr, revents))
def _init_loop(self, flags, default):
if default is None:
default = True
# Unlike libev, libuv creates a new default
# loop automatically if the old default loop was
# closed.
if default:
# XXX: If the default loop had been destroyed, this
# will create a new one, but we won't destroy it
ptr = libuv.uv_default_loop()
else:
ptr = libuv.uv_loop_new()
if not ptr:
raise SystemError("Failed to get loop")
# Track whether or not any object has destroyed
# this loop. See _can_destroy_default_loop
ptr.data = self._handle_to_self
return ptr
_signal_idle = None
@property
def ptr(self):
if not self._ptr:
return None
if self._ptr and not self._ptr.data:
# Another instance of the Python loop destroyed
# the C loop. It was probably the default.
self._ptr = None
return self._ptr
def _init_and_start_check(self):
libuv.uv_check_init(self.ptr, self._check)
libuv.uv_check_start(self._check, libuv.python_check_callback)
libuv.uv_unref(self._check)
# We also have to have an idle watcher to be able to handle
# signals in a timely manner. Without them, libuv won't loop again
# and call into its check and prepare handlers.
# Note that this basically forces us into a busy-loop
# XXX: As predicted, using an idle watcher causes our process
# to eat 100% CPU time. We instead use a timer with a max of a .3 second
# delay to notice signals. Note that this timeout also implements fork
# watchers, effectively.
# XXX: Perhaps we could optimize this to notice when there are other
# timers in the loop and start/stop it then. When we have a callback
# scheduled, this should also be the same and unnecessary?
# libev does takes this basic approach on Windows.
self._signal_idle = ffi.new("uv_timer_t*")
libuv.uv_timer_init(self.ptr, self._signal_idle)
self._signal_idle.data = self._handle_to_self
sig_cb = ffi.cast('void(*)(uv_timer_t*)', libuv.python_check_callback)
libuv.uv_timer_start(self._signal_idle,
sig_cb,
self.SIGNAL_CHECK_INTERVAL_MS,
self.SIGNAL_CHECK_INTERVAL_MS)
libuv.uv_unref(self._signal_idle)
def __check_and_die(self):
if not self.ptr:
# We've been destroyed during the middle of self.run().
# This method is being called into from C, and it's not
# safe to go back to C (Windows in particular can abort
# the process with "GetQueuedCompletionStatusEx: (6) The
# handle is invalid.") So switch to the parent greenlet.
getcurrent().parent.throw(LoopExit('Destroyed during run'))
def _run_callbacks(self):
self.__check_and_die()
# Manually handle fork watchers.
curpid = os.getpid()
if curpid != self._pid:
self._pid = curpid
for watcher in self._fork_watchers:
watcher._on_fork()
# The contents of queued_callbacks at this point should be timers
# that expired when the loop began along with any idle watchers.
# We need to run them so that any manual callbacks they want to schedule
# get added to the list and ran next before we go on to poll for IO.
# This is critical for libuv on linux: closing a socket schedules some manual
# callbacks to actually stop the watcher; if those don't run before
# we poll for IO, then libuv can abort the process for the closed file descriptor.
# XXX: There's still a race condition here because we may not run *all* the manual
# callbacks. We need a way to prioritize those.
# Running these before the manual callbacks lead to some
# random test failures. In test__event.TestEvent_SetThenClear
# we would get a LoopExit sometimes. The problem occurred when
# a timer expired on entering the first loop; we would process
# it there, and then process the callback that it created
# below, leaving nothing for the loop to do. Having the
# self.run() manually process manual callbacks before
# continuing solves the problem. (But we must still run callbacks
# here again.)
self._prepare_ran_callbacks = self.__run_queued_callbacks()
super(loop, self)._run_callbacks()
def _init_and_start_prepare(self):
libuv.uv_prepare_init(self.ptr, self._prepare)
libuv.uv_prepare_start(self._prepare, libuv.python_prepare_callback)
libuv.uv_unref(self._prepare)
def _init_callback_timer(self):
libuv.uv_check_init(self.ptr, self._timer0)
def _stop_callback_timer(self):
libuv.uv_check_stop(self._timer0)
def _start_callback_timer(self):
# The purpose of the callback timer is to ensure that we run
# callbacks as soon as possible on the next iteration of the event loop.
# In libev, we set a 0 duration timer with a no-op callback.
# This executes immediately *after* the IO poll is done (it
# actually determines the time that the IO poll will block
# for), so having the timer present simply spins the loop, and
# our normal prepare watcher kicks in to run the callbacks.
# In libuv, however, timers are run *first*, before prepare
# callbacks and before polling for IO. So a no-op 0 duration
# timer actually does *nothing*. (Also note that libev queues all
# watchers found during IO poll to run at the end (I think), while libuv
# runs them in uv__io_poll itself.)
# From the loop inside uv_run:
# while True:
# uv__update_time(loop);
# uv__run_timers(loop);
# # we don't use pending watchers. They are how libuv
# # implements the pipe/udp/tcp streams.
# ran_pending = uv__run_pending(loop);
# uv__run_idle(loop);
# uv__run_prepare(loop);
# ...
# uv__io_poll(loop, timeout); # <--- IO watchers run here!
# uv__run_check(loop);
# libev looks something like this (pseudo code because the real code is
# hard to read):
#
# do {
# run_fork_callbacks();
# run_prepare_callbacks();
# timeout = min(time of all timers or normal block time)
# io_poll() # <--- Only queues IO callbacks
# update_now(); calculate_expired_timers();
# run callbacks in this order: (although specificying priorities changes it)
# check
# stat
# child
# signal
# timer
# io
# }
# So instead of running a no-op and letting the side-effect of spinning
# the loop run the callbacks, we must explicitly run them here.
# If we don't, test__systemerror:TestCallback will be flaky, failing
# one time out of ~20, depending on timing.
# To get them to run immediately after this current loop,
# we use a check watcher, instead of a 0 duration timer entirely.
# If we use a 0 duration timer, we can get stuck in a timer loop.
# Python 3.6 fails in test_ftplib.py
# As a final note, if we have not yet entered the loop *at
# all*, and a timer was created with a duration shorter than
# the amount of time it took for us to enter the loop in the
# first place, it may expire and get called before our callback
# does. This could also lead to test__systemerror:TestCallback
# appearing to be flaky.
# As yet another final note, if we are currently running a
# timer callback, meaning we're inside uv__run_timers() in C,
# and the Python starts a new timer, if the Python code then
# update's the loop's time, it's possible that timer will
# expire *and be run in the same iteration of the loop*. This
# is trivial to do: In sequential code, anything after
# `gevent.sleep(0.1)` is running in a timer callback. Starting
# a new timer---e.g., another gevent.sleep() call---will
# update the time, *before* uv__run_timers exits, meaning
# other timers get a chance to run before our check or prepare
# watcher callbacks do. Therefore, we do indeed have to have a 0
# timer to run callbacks---it gets inserted before any other user
# timers---ideally, this should be especially careful about how much time
# it runs for.
# AND YET: We can't actually do that. We get timeouts that I haven't fully
# investigated if we do. Probably stuck in a timer loop.
# As a partial remedy to this, unlike libev, our timer watcher
# class doesn't update the loop time by default.
libuv.uv_check_start(self._timer0, libuv.python_timer0_callback)
def _stop_aux_watchers(self):
super(loop, self)._stop_aux_watchers()
assert self._prepare
assert self._check
assert self._signal_idle
libuv.uv_prepare_stop(self._prepare)
libuv.uv_ref(self._prepare) # Why are we doing this?
libuv.uv_check_stop(self._check)
libuv.uv_ref(self._check)
libuv.uv_timer_stop(self._signal_idle)
libuv.uv_ref(self._signal_idle)
libuv.uv_check_stop(self._timer0)
def _setup_for_run_callback(self):
self._start_callback_timer()
libuv.uv_ref(self._timer0)
def _can_destroy_loop(self, ptr):
return ptr
def __close_loop(self, ptr):
closed_failed = 1
while closed_failed:
closed_failed = libuv.uv_loop_close(ptr)
if not closed_failed:
break
if closed_failed != libuv.UV_EBUSY:
raise SystemError("Unknown close failure reason", closed_failed)
# We already closed all the handles. Run the loop
# once to let them be cut off from the loop.
ran_has_more_callbacks = libuv.uv_run(ptr, libuv.UV_RUN_ONCE)
if ran_has_more_callbacks:
libuv.uv_run(ptr, libuv.UV_RUN_NOWAIT)
def _destroy_loop(self, ptr):
# We're being asked to destroy a loop that's, potentially, at
# the time it was constructed, was the default loop. If loop
# objects were constructed more than once, it may have already
# been destroyed, though. We track this in the data member.
data = ptr.data
ptr.data = ffi.NULL
try:
if data:
libuv.uv_stop(ptr)
libuv.gevent_close_all_handles(ptr)
finally:
ptr.data = ffi.NULL
try:
if data:
self.__close_loop(ptr)
finally:
# Destroy the native resources *after* we have closed
# the loop. If we do it before, walking the handles
# attached to the loop is likely to segfault.
# Note that these may have been closed already if the default loop was shared.
if data:
libuv.gevent_zero_check(self._check)
libuv.gevent_zero_check(self._timer0)
libuv.gevent_zero_prepare(self._prepare)
libuv.gevent_zero_timer(self._signal_idle)
libuv.gevent_zero_loop(ptr)
del self._check
del self._prepare
del self._signal_idle
del self._timer0
# Destroy any watchers we're still holding on to.
del self._io_watchers
del self._fork_watchers
del self._child_watchers
_HandleState = namedtuple("HandleState",
['handle',
'type',
'watcher',
'ref',
'active',
'closing'])
def debug(self):
"""
Return all the handles that are open and their ref status.
"""
if not self.ptr:
return ["Loop has been destroyed"]
handle_state = self._HandleState
handles = []
# XXX: Convert this to a modern callback.
def walk(handle, _arg):
data = handle.data
if data:
watcher = ffi.from_handle(data)
else:
watcher = None
handles.append(handle_state(handle,
ffi.string(libuv.uv_handle_type_name(handle.type)),
watcher,
libuv.uv_has_ref(handle),
libuv.uv_is_active(handle),
libuv.uv_is_closing(handle)))
libuv.uv_walk(self.ptr,
ffi.callback("void(*)(uv_handle_t*,void*)",
walk),
ffi.NULL)
return handles
def ref(self):
pass
def unref(self):
# XXX: Called by _run_callbacks.
pass
def break_(self, how=None):
if self.ptr:
libuv.uv_stop(self.ptr)
def reinit(self):
# TODO: How to implement? We probably have to simply
# re-__init__ this whole class? Does it matter?
# OR maybe we need to uv_walk() and close all the handles?
# XXX: libuv < 1.12 simply CANNOT handle a fork unless you immediately
# exec() in the child. There are multiple calls to abort() that
# will kill the child process:
# - The OS X poll implementation (kqueue) aborts on an error return
# value; since kqueue FDs can't be inherited, then the next call
# to kqueue in the child will fail and get aborted; fork() is likely
# to be called during the gevent loop, meaning we're deep inside the
# runloop already, so we can't even close the loop that we're in:
# it's too late, the next call to kqueue is already scheduled.
# - The threadpool, should it be in use, also aborts
# (https://github.com/joyent/libuv/pull/1136)
# - There global shared state that breaks signal handling
# and leads to an abort() in the child, EVEN IF the loop in the parent
# had already been closed
# (https://github.com/joyent/libuv/issues/1405)
# In 1.12, the uv_loop_fork function was added (by gevent!)
libuv.uv_loop_fork(self.ptr)
_prepare_ran_callbacks = False
def __run_queued_callbacks(self):
if not self._queued_callbacks:
return False
cbs = self._queued_callbacks[:]
del self._queued_callbacks[:]
for watcher_ptr, arg in cbs:
handle = watcher_ptr.data
if not handle:
# It's been stopped and possibly closed
assert not libuv.uv_is_active(watcher_ptr)
continue
val = _callbacks.python_callback(handle, arg)
if val == -1: # Failure.
_callbacks.python_handle_error(handle, arg)
elif val == 1: # Success, and we may need to close the Python watcher.
if not libuv.uv_is_active(watcher_ptr):
# The callback closed the native watcher resources. Good.
# It's *supposed* to also reset the .data handle to NULL at
# that same time. If it resets it to something else, we're
# re-using the same watcher object, and that's not correct either.
# On Windows in particular, if the .data handle is changed because
# the IO multiplexer is being restarted, trying to dereference the
# *old* handle can crash with an FFI error.
handle_after_callback = watcher_ptr.data
try:
if handle_after_callback and handle_after_callback == handle:
_callbacks.python_stop(handle_after_callback)
finally:
watcher_ptr.data = ffi.NULL
return True
def run(self, nowait=False, once=False):
# we can only respect one flag or the other.
# nowait takes precedence because it can't block
mode = libuv.UV_RUN_DEFAULT
if once:
mode = libuv.UV_RUN_ONCE
if nowait:
mode = libuv.UV_RUN_NOWAIT
if mode == libuv.UV_RUN_DEFAULT:
while self._ptr and self._ptr.data:
# This is here to better preserve order guarantees.
# See _run_callbacks for details.
# It may get run again from the prepare watcher, so
# potentially we could take twice as long as the
# switch interval.
# If we have *lots* of callbacks to run, we may not actually
# get through them all before we're requested to poll for IO;
# so in that case, just spin the loop once (UV_RUN_NOWAIT) and
# go again.
self._run_callbacks()
self._prepare_ran_callbacks = False
# UV_RUN_ONCE will poll for IO, blocking for up to the time needed
# for the next timer to expire. Worst case, that's our _signal_idle
# timer, about 1/3 second. UV_RUN_ONCE guarantees that some forward progress
# is made, either by an IO watcher or a timer.
#
# In contrast, UV_RUN_NOWAIT makes no such guarantee, it only polls for IO once and
# immediately returns; it does not update the loop time or timers after
# polling for IO.
run_mode = (
libuv.UV_RUN_ONCE
if not self._callbacks and not self._queued_callbacks
else libuv.UV_RUN_NOWAIT
)
ran_status = libuv.uv_run(self._ptr, run_mode)
# Note that we run queued callbacks when the prepare watcher runs,
# thus accounting for timers that expired before polling for IO,
# and idle watchers. This next call should get IO callbacks and
# callbacks from timers that expired *after* polling for IO.
ran_callbacks = self.__run_queued_callbacks()
if not ran_status and not ran_callbacks and not self._prepare_ran_callbacks:
# A return of 0 means there are no referenced and
# active handles. The loop is over.
# If we didn't run any callbacks, then we couldn't schedule
# anything to switch in the future, so there's no point
# running again.
return ran_status
return 0 # Somebody closed the loop
result = libuv.uv_run(self._ptr, mode)
self.__run_queued_callbacks()
return result
def now(self):
self.__check_and_die()
# libuv's now is expressed as an integer number of
# milliseconds, so to get it compatible with time.time units
# that this method is supposed to return, we have to divide by 1000.0
now = libuv.uv_now(self.ptr)
return now / 1000.0
def update_now(self):
self.__check_and_die()
libuv.uv_update_time(self.ptr)
def fileno(self):
if self.ptr:
fd = libuv.uv_backend_fd(self._ptr)
if fd >= 0:
return fd
_sigchld_watcher = None
_sigchld_callback_ffi = None
def install_sigchld(self):
if not self.default:
return
if self._sigchld_watcher:
return
self._sigchld_watcher = ffi.new('uv_signal_t*')
libuv.uv_signal_init(self.ptr, self._sigchld_watcher)
self._sigchld_watcher.data = self._handle_to_self
# Don't let this keep the loop alive
libuv.uv_unref(self._sigchld_watcher)
libuv.uv_signal_start(self._sigchld_watcher,
libuv.python_sigchld_callback,
signal.SIGCHLD)
def reset_sigchld(self):
if not self.default or not self._sigchld_watcher:
return
libuv.uv_signal_stop(self._sigchld_watcher)
# Must go through this to manage the memory lifetime
# correctly. Alternately, we could just stop it and restart
# it in install_sigchld?
_watchers.watcher._watcher_ffi_close(self._sigchld_watcher)
del self._sigchld_watcher
def _sigchld_callback(self):
# Signals can arrive at (relatively) any time. To eliminate
# race conditions, and behave more like libev, we "queue"
# sigchld to run when we run callbacks.
while True:
try:
pid, status, _usage = os.wait3(os.WNOHANG)
except OSError:
# Python 3 raises ChildProcessError
break
if pid == 0:
break
children_watchers = self._child_watchers.get(pid, []) + self._child_watchers.get(0, [])
for watcher in children_watchers:
self.run_callback(watcher._set_waitpid_status, pid, status)
# Don't invoke child watchers for 0 more than once
self._child_watchers[0] = []
def _register_child_watcher(self, watcher):
self._child_watchers[watcher._pid].append(watcher)
def _unregister_child_watcher(self, watcher):
try:
# stop() should be idempotent
self._child_watchers[watcher._pid].remove(watcher)
except ValueError:
pass
# Now's a good time to clean up any dead watchers we don't need
# anymore
for pid in list(self._child_watchers):
if not self._child_watchers[pid]:
del self._child_watchers[pid]
def io(self, fd, events, ref=True, priority=None):
# We rely on hard references here and explicit calls to
# close() on the returned object to correctly manage
# the watcher lifetimes.
io_watchers = self._io_watchers
try:
io_watcher = io_watchers[fd]
assert io_watcher._multiplex_watchers, ("IO Watcher %s unclosed but should be dead" % io_watcher)
except KeyError:
# Start the watcher with just the events that we're interested in.
# as multiplexers are added, the real event mask will be updated to keep in sync.
# If we watch for too much, we get spurious wakeups and busy loops.
io_watcher = self._watchers.io(self, fd, 0)
io_watchers[fd] = io_watcher
io_watcher._no_more_watchers = lambda: delitem(io_watchers, fd)
return io_watcher.multiplex(events)
def prepare(self, ref=True, priority=None):
# We run arbitrary code in python_prepare_callback. That could switch
# greenlets. If it does that while also manipulating the active prepare
# watchers, we could corrupt the process state, since the prepare watcher
# queue is iterated on the stack (on unix). We could workaround this by implementing
# prepare watchers in pure Python.
# See https://github.com/gevent/gevent/issues/1126
raise TypeError("prepare watchers are not currently supported in libuv. "
"If you need them, please contact the maintainers.")
| 27,626 | 38.981187 | 109 | py |
gevent | gevent-master/src/gevent/libuv/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Nothing public here
__all__ = []
| 169 | 20.25 | 38 | py |
gevent | gevent-master/src/gevent/libuv/_corecffi_build.py | # pylint: disable=no-member
# This module is only used to create and compile the gevent.libuv._corecffi module;
# nothing should be directly imported from it except `ffi`, which should only be
# used for `ffi.compile()`; programs should import gevent._corecfffi.
# However, because we are using "out-of-line" mode, it is necessary to examine
# this file to know what functions are created and available on the generated
# module.
from __future__ import absolute_import, print_function
import os
import os.path # pylint:disable=no-name-in-module
import platform
import sys
from cffi import FFI
sys.path.append(".")
try:
import _setuputils
except ImportError:
print("This file must be imported with setup.py in the current working dir.")
raise
__all__ = []
WIN = sys.platform.startswith('win32')
LIBUV_EMBED = _setuputils.should_embed('libuv')
PY2 = sys.version_info[0] == 2
ffi = FFI()
thisdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.abspath(os.path.join(thisdir, '..'))
setup_py_dir = os.path.abspath(os.path.join(thisdir, '..', '..', '..'))
libuv_dir = os.path.abspath(os.path.join(setup_py_dir, 'deps', 'libuv'))
def read_source(name):
# pylint:disable=unspecified-encoding
with open(os.path.join(thisdir, name), 'r') as f:
return f.read()
_cdef = read_source('_corecffi_cdef.c')
_source = read_source('_corecffi_source.c')
# These defines and uses help keep the C file readable and lintable by
# C tools.
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
# nlink_t is not used in libuv.
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int',
'')
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', 'nlink_t')
_cdef = _cdef.replace('#define GEVENT_UV_OS_SOCK_T int', '')
# uv_os_sock_t is int on POSIX and SOCKET on Win32, but socket is
# just another name for handle, which is just another name for 'void*'
# which we will treat as an 'unsigned long' or 'unsigned long long'
# since it comes through 'fileno()' where it has been cast as an int.
# See class watcher.io
_void_pointer_as_integer = 'intptr_t'
_cdef = _cdef.replace("GEVENT_UV_OS_SOCK_T", 'int' if not WIN else _void_pointer_as_integer)
LIBUV_INCLUDE_DIRS = [
os.path.join(libuv_dir, 'include'),
os.path.join(libuv_dir, 'src'),
]
# Initially based on https://github.com/saghul/pyuv/blob/v1.x/setup_libuv.py
def _libuv_source(rel_path):
# Certain versions of setuptools, notably on windows, are *very*
# picky about what we feed to sources= "setup() arguments must
# *always* be /-separated paths relative to the setup.py
# directory, *never* absolute paths." POSIX doesn't have that issue.
path = os.path.join('deps', 'libuv', 'src', rel_path)
return path
LIBUV_SOURCES = [
_libuv_source('fs-poll.c'),
_libuv_source('inet.c'),
_libuv_source('threadpool.c'),
_libuv_source('uv-common.c'),
_libuv_source('version.c'),
_libuv_source('uv-data-getter-setters.c'),
_libuv_source('timer.c'),
_libuv_source('idna.c'),
_libuv_source('strscpy.c'),
# Added between 1.42.0 and 1.44.2; only used
# on unix in that release, but generic
_libuv_source('strtok.c'),
]
if WIN:
LIBUV_SOURCES += [
_libuv_source('win/async.c'),
_libuv_source('win/core.c'),
_libuv_source('win/detect-wakeup.c'),
_libuv_source('win/dl.c'),
_libuv_source('win/error.c'),
_libuv_source('win/fs-event.c'),
_libuv_source('win/fs.c'),
# getaddrinfo.c refers to ConvertInterfaceIndexToLuid
# and ConvertInterfaceLuidToNameA, which are supposedly in iphlpapi.h
# and iphlpapi.lib/dll. But on Windows 10 with Python 3.5 and VC 14 (Visual Studio 2015),
# I get an undefined warning from the compiler for those functions and
# a link error from the linker, so this file can't be included.
# This is possibly because the functions are defined for Windows Vista, and
# Python 3.5 builds with at earlier SDK?
# Fortunately we don't use those functions.
#_libuv_source('win/getaddrinfo.c'),
# getnameinfo.c refers to uv__getaddrinfo_translate_error from
# getaddrinfo.c, which we don't have.
#_libuv_source('win/getnameinfo.c'),
_libuv_source('win/handle.c'),
_libuv_source('win/loop-watcher.c'),
_libuv_source('win/pipe.c'),
_libuv_source('win/poll.c'),
_libuv_source('win/process-stdio.c'),
_libuv_source('win/process.c'),
_libuv_source('win/signal.c'),
_libuv_source('win/snprintf.c'),
_libuv_source('win/stream.c'),
_libuv_source('win/tcp.c'),
_libuv_source('win/thread.c'),
_libuv_source('win/tty.c'),
_libuv_source('win/udp.c'),
_libuv_source('win/util.c'),
_libuv_source('win/winapi.c'),
_libuv_source('win/winsock.c'),
]
else:
LIBUV_SOURCES += [
_libuv_source('unix/async.c'),
_libuv_source('unix/core.c'),
_libuv_source('unix/dl.c'),
_libuv_source('unix/fs.c'),
_libuv_source('unix/getaddrinfo.c'),
_libuv_source('unix/getnameinfo.c'),
_libuv_source('unix/loop-watcher.c'),
_libuv_source('unix/loop.c'),
_libuv_source('unix/pipe.c'),
_libuv_source('unix/poll.c'),
_libuv_source('unix/process.c'),
_libuv_source('unix/signal.c'),
_libuv_source('unix/stream.c'),
_libuv_source('unix/tcp.c'),
_libuv_source('unix/thread.c'),
_libuv_source('unix/tty.c'),
_libuv_source('unix/udp.c'),
]
if sys.platform.startswith('linux'):
LIBUV_SOURCES += [
_libuv_source('unix/linux-core.c'),
_libuv_source('unix/linux-inotify.c'),
_libuv_source('unix/linux-syscalls.c'),
_libuv_source('unix/procfs-exepath.c'),
_libuv_source('unix/proctitle.c'),
_libuv_source('unix/random-sysctl-linux.c'),
_libuv_source('unix/epoll.c'),
]
elif sys.platform == 'darwin':
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/darwin.c'),
_libuv_source('unix/darwin-proctitle.c'),
_libuv_source('unix/fsevents.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/proctitle.c'),
]
elif sys.platform.startswith(('freebsd', 'dragonfly')): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/freebsd.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('openbsd'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/openbsd.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('netbsd'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/netbsd.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('sunos'): # pragma: no cover
# Not tested.
LIBUV_SOURCES += [
_libuv_source('unix/no-proctitle.c'),
_libuv_source('unix/sunos.c'),
]
elif sys.platform.startswith('aix'): # pragma: no cover
# Not tested.
LIBUV_SOURCES += [
_libuv_source('unix/aix.c'),
_libuv_source('unix/aix-common.c'),
]
elif sys.platform.startswith('haiku'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/haiku.c')
]
elif sys.platform.startswith('cygwin'): # pragma: no cover
# Not tested.
# Based on Cygwin package sources /usr/src/libuv-1.32.0-1.src/libuv-1.32.0/Makefile.am
# Apparently the same upstream at https://github.com/libuv/libuv/blob/v1.x/Makefile.am
LIBUV_SOURCES += [
_libuv_source('unix/cygwin.c'),
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/no-fsevents.c'),
_libuv_source('unix/no-proctitle.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/posix-poll.c'),
_libuv_source('unix/procfs-exepath.c'),
_libuv_source('unix/sysinfo-loadavg.c'),
_libuv_source('unix/sysinfo-memory.c'),
]
LIBUV_MACROS = [
('LIBUV_EMBED', int(LIBUV_EMBED)),
]
def _define_macro(name, value):
LIBUV_MACROS.append((name, value))
LIBUV_LIBRARIES = []
def _add_library(name):
LIBUV_LIBRARIES.append(name)
if sys.platform != 'win32':
_define_macro('_LARGEFILE_SOURCE', 1)
_define_macro('_FILE_OFFSET_BITS', 64)
if sys.platform.startswith('linux'):
_add_library('dl')
_add_library('rt')
_define_macro('_GNU_SOURCE', 1)
_define_macro('_POSIX_C_SOURCE', '200112')
elif sys.platform == 'darwin':
_define_macro('_DARWIN_USE_64_BIT_INODE', 1)
_define_macro('_DARWIN_UNLIMITED_SELECT', 1)
elif sys.platform.startswith('netbsd'): # pragma: no cover
_add_library('kvm')
elif sys.platform.startswith('sunos'): # pragma: no cover
_define_macro('__EXTENSIONS__', 1)
_define_macro('_XOPEN_SOURCE', 500)
_define_macro('_REENTRANT', 1)
_add_library('kstat')
_add_library('nsl')
_add_library('sendfile')
_add_library('socket')
if platform.release() == '5.10':
# https://github.com/libuv/libuv/issues/1458
# https://github.com/giampaolo/psutil/blob/4d6a086411c77b7909cce8f4f141bbdecfc0d354/setup.py#L298-L300
_define_macro('SUNOS_NO_IFADDRS', '')
elif sys.platform.startswith('aix'): # pragma: no cover
_define_macro('_LINUX_SOURCE_COMPAT', 1)
if os.uname().sysname != 'OS400':
_add_library('perfstat')
elif WIN:
# All other gevent .pyd files link to the specific minor-version Python
# DLL, so we should do the same here. In virtual environments that don't
# contain the major-version python?.dll stub, _corecffi.pyd would otherwise
# cause the Windows DLL loader to search the entire PATH for a DLL with
# that name. This might end up bringing a second, ABI-incompatible Python
# version into the process, which can easily lead to crashes.
# See https://github.com/gevent/gevent/pull/1814/files
_define_macro('_CFFI_NO_LIMITED_API', 1)
_define_macro('_GNU_SOURCE', 1)
_define_macro('WIN32', 1)
_define_macro('_CRT_SECURE_NO_DEPRECATE', 1)
_define_macro('_CRT_NONSTDC_NO_DEPRECATE', 1)
_define_macro('_CRT_SECURE_NO_WARNINGS', 1)
_define_macro('_WIN32_WINNT', '0x0602')
_define_macro('WIN32_LEAN_AND_MEAN', 1)
# This value isn't available on the platform that we build and
# test Python 2.7 on. It's used for getting power management
# suspend/resume notifications, maybe for keeping timers accurate?
#
# TODO: This should be a more targeted check based on the platform
# version, but that's complicated because it depends on having a
# particular patch installed to the OS, and I don't know how to
# check for that...but we're dropping Python 2 support soon, so
# I suspect it really doesn't matter.
if PY2:
_define_macro('LOAD_LIBRARY_SEARCH_SYSTEM32', 0)
_add_library('advapi32')
_add_library('iphlpapi')
_add_library('psapi')
_add_library('shell32')
_add_library('user32')
_add_library('userenv')
_add_library('ws2_32')
if not LIBUV_EMBED:
del LIBUV_SOURCES[:]
del LIBUV_INCLUDE_DIRS[:]
_add_library('uv')
LIBUV_INCLUDE_DIRS.append(parentdir)
ffi.cdef(_cdef)
ffi.set_source(
'gevent.libuv._corecffi',
_source,
sources=LIBUV_SOURCES,
depends=LIBUV_SOURCES,
include_dirs=LIBUV_INCLUDE_DIRS,
libraries=list(LIBUV_LIBRARIES),
define_macros=list(LIBUV_MACROS),
extra_compile_args=list(_setuputils.IGNORE_THIRD_PARTY_WARNINGS),
)
if __name__ == '__main__':
# See notes in libev/_corecffi_build.py for how to test this.
#
# Other than the obvious directory changes, the changes are:
#
# CPPFLAGS=-Ideps/libuv/include/ -Isrc/gevent/
ffi.compile(verbose=True)
| 12,349 | 34.693642 | 110 | py |
gevent | gevent-master/scripts/gprospector.py | from __future__ import print_function
import re
import sys
from prospector.run import main
def _excepthook(e, t, tb):
while tb is not None:
frame = tb.tb_frame
print(frame.f_code, frame.f_code.co_name)
for n in ('self', 'node', 'elt'):
if n in frame.f_locals:
print(n, frame.f_locals[n])
print('---')
tb = tb.tb_next
sys.excepthook = _excepthook
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 540 | 22.521739 | 68 | py |
gevent | gevent-master/scripts/releases/appveyor-download.py | #!/usr/bin/env python
"""
Use the AppVeyor API to download Windows artifacts.
Taken from: https://bitbucket.org/ned/coveragepy/src/tip/ci/download_appveyor.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""
import argparse
import os
import zipfile
import requests
# To delete:
# DELETE https://ci.appveyor.com/api/projects/{accountName}/{projectSlug}/buildcache
# requests.delete(make_url('/projects/denik/gevent/buildcache'), headers=make_auth_headers)
def make_auth_headers(fname=".appveyor.token"):
"""Make the authentication headers needed to use the Appveyor API."""
if not os.path.exists(fname):
fname = os.path.expanduser("~/bin/appveyor-token")
if not os.path.exists(fname):
raise RuntimeError(
"Please create a file named `.appveyor.token` in the current directory. "
"You can get the token from https://ci.appveyor.com/api-token"
)
with open(fname) as f:
token = f.read().strip()
headers = {
'Authorization': 'Bearer {}'.format(token),
}
return headers
def make_url(url, **kwargs):
"""Build an Appveyor API url."""
return "https://ci.appveyor.com/api" + url.format(**kwargs)
def get_project_build(account_project, build_num):
"""Get the details of the latest Appveyor build."""
url = '/projects/{account_project}'
url_args = {'account_project': account_project}
if build_num:
url += '/build/{buildVersion}'
url_args['buildVersion'] = build_num
url = make_url(url, **url_args)
response = requests.get(url, headers=make_auth_headers())
return response.json()
def download_latest_artifacts(account_project, build_num):
"""Download all the artifacts from the latest build."""
build = get_project_build(account_project, build_num)
jobs = build['build']['jobs']
print("Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
for job in jobs:
name = job['name'].partition(':')[2].split(',')[0].strip()
print(" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
url = make_url("/buildjobs/{jobid}/artifacts", jobid=job['jobId'])
response = requests.get(url, headers=make_auth_headers())
artifacts = response.json()
for artifact in artifacts:
is_zip = artifact['type'] == "Zip"
filename = artifact['fileName']
print(" {0}, {1} bytes".format(filename, artifact['size']))
url = make_url(
"/buildjobs/{jobid}/artifacts/{filename}",
jobid=job['jobId'],
filename=filename
)
download_url(url, filename, make_auth_headers())
if is_zip:
unpack_zipfile(filename)
os.remove(filename)
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname, _ = os.path.split(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def download_url(url, filename, headers):
"""Download a file from `url` to `filename`."""
ensure_dirs(filename)
response = requests.get(url, headers=headers, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as f:
for chunk in response.iter_content(16 * 1024):
f.write(chunk)
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, 'rb') as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(" extracting {}".format(name))
ensure_dirs(name)
z.extract(name)
def main(argv=None):
import sys
argv = argv or sys.argv[1:]
parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.')
parser.add_argument(
'name',
metavar='ID',
help='Project ID in AppVeyor. Example: ionelmc/python-nameless'
)
parser.add_argument(
'build',
default=None,
nargs='?',
help=(
'The project build version. If not given, discovers the latest. '
'Note that this is not the build number. '
'Example: 1.0.2420'
)
)
args = parser.parse_args(argv)
download_latest_artifacts(args.name, args.build)
if __name__ == "__main__":
main()
| 4,494 | 31.810219 | 94 | py |
gevent | gevent-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# gevent documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 1 09:30:02 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
# Use the python versions instead of the cython compiled versions
# for better documentation extraction and ease of tweaking docs.
os.environ['PURE_PYTHON'] = '1'
sys.path.append(os.path.dirname(__file__)) # for mysphinxext
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# 1.8 was the last version that runs on Python 2; 2.0+ requires Python 3.
# `autodoc_default_options` was new in 1.8
needs_sphinx = "1.8"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode',
# Third-party
'repoze.sphinx.autointerface',
'sphinxcontrib.programoutput',
# Ours
]
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'greenlet': ('https://greenlet.readthedocs.io/en/latest/', None),
'zopeevent': ('https://zopeevent.readthedocs.io/en/latest/', None),
'zopecomponent': ('https://zopecomponent.readthedocs.io/en/latest/', None),
}
extlinks = {'issue': ('https://github.com/gevent/gevent/issues/%s',
'issue #%s'),
'pr': ('https://github.com/gevent/gevent/pull/%s',
'pull request #%s')}
# Sphinx 1.8+ prefers this to `autodoc_default_flags`. It's documented that
# either True or None mean the same thing as just setting the flag, but
# only None works in 1.8 (True works in 2.0)
autodoc_default_options = {
'members': None,
'show-inheritance': None,
}
autodoc_member_order = 'groupwise'
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'gevent'
copyright = u'2009-2023, gevent contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from gevent import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'perldoc'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['gevent.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# XXX: Our custom theme stopped working with Sphinx 7.
#html_theme = 'mytheme'
html_theme = "furo"
html_css_files = [
'custom.css',
]
html_theme_options = {
"sidebar_hide_name": True, # Because we show a logo
'light_css_variables': {
"color-brand-primary": "#7c9a5e",
"color-brand-content": "#7c9a5e",
"color-foreground-border": "#b7d897",
'font-stack': '"SF Pro",-apple-system,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',
'font-stack--monospace': '"JetBrainsMono", "JetBrains Mono", "JetBrains Mono Regular", "JetBrainsMono-Regular", ui-monospace, profont, monospace',
},
}
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Documentation'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/5564530.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# This is true by default in sphinx 1.6
html_use_smartypants = True
smartquotes = True # 1.7
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'contentstable': 'contentstable.html'}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'geventdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gevent.tex', u'gevent Documentation',
u'gevent contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
###############################################################################
# prevent some stuff from showing up in docs
import socket
import gevent.socket
for item in gevent.socket.__all__[:]:
if getattr(gevent.socket, item) is getattr(socket, item, None):
gevent.socket.__all__.remove(item)
if not hasattr(gevent.socket, '_fileobject'):
# Python 3 building Python 2 docs.
gevent.socket._fileobject = object()
| 8,686 | 31.535581 | 154 | py |
Voxurf | Voxurf-main/run.py | import os, sys, copy, glob, json, time, random, argparse, cv2
from shutil import copyfile
from tqdm import tqdm, trange
import math
import mmcv
import imageio
import numpy as np
import trimesh
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from datetime import datetime
from lib import utils, dtu_eval
# from torch.utils.tensorboard import SummaryWriter
from lib.load_data import load_data
from lib.utils import rgb_to_luminance, get_sobel, calc_grad, \
GradLoss, write_ply, load_point_cloud, get_root_logger
from torch_efficient_distloss import flatten_eff_distloss
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
def config_parser():
'''Define command line arguments
'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', required=True,
help='config file path')
parser.add_argument("--seed", type=int, default=777,
help='Random seed')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--no_reload_optimizer", action='store_true',
help='do not reload optimizer state from saved ckpt')
parser.add_argument("--ft_path", type=str, default='',
help='specific weights npy file to reload for coarse network')
parser.add_argument("--export_bbox_and_cams_only", type=str, default='',
help='export scene bbox and camera poses for debugging and 3d visualization')
parser.add_argument("--export_coarse_only", type=str, default='')
parser.add_argument("--export_fine_only", type=str, default='')
parser.add_argument("--mesh_from_sdf", action='store_true')
# testing options
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true')
parser.add_argument("--render_train", action='store_true')
parser.add_argument("--render_video", action='store_true')
parser.add_argument("--render_video_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
parser.add_argument("--eval_ssim", default=True)
parser.add_argument("--eval_lpips_alex", default=True)
parser.add_argument("--eval_lpips_vgg", default=True)
# logging/saving options
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_validate", type=int, default=10000)
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("-s", "--suffix", type=str, default="",
help='suffix for exp name')
parser.add_argument("-p", "--prefix", type=str, default="",
help='prefix for exp name')
parser.add_argument("--load_density_only", type=int, default=1)
parser.add_argument("--load_expname", type=str, default="") # dvgo_Statues_original
parser.add_argument("--sdf_mode", type=str, default="density")
parser.add_argument("--scene", type=str, default=0)
parser.add_argument("--no_dvgo_init", action='store_true')
parser.add_argument("--run_dvgo_init", action='store_true')
parser.add_argument("--interpolate", default='')
parser.add_argument("--extract_color", action='store_true')
return parser
@torch.no_grad()
@torch.no_grad()
def render_viewpoints(model, render_poses, HW, Ks, ndc, render_kwargs,
gt_imgs=None, masks=None, savedir=None, render_factor=0, idx=None,
eval_ssim=True, eval_lpips_alex=True, eval_lpips_vgg=True,
use_bar=True, step=0, rgb_only=False):
'''Render images for the given viewpoints; run evaluation if gt given.
'''
assert len(render_poses) == len(HW) and len(HW) == len(Ks)
if render_factor!=0:
HW = np.copy(HW)
Ks = np.copy(Ks)
HW //= render_factor
Ks[:, :2, :3] //= render_factor
rgbs = []
normals = []
ins = []
outs = []
disps = []
psnrs = []
fore_psnrs = []
bg_psnrs = []
ssims = []
lpips_alex = []
lpips_vgg = []
render_normal = True
split_bg = getattr(model, "bg_density", False)
for i, c2w in enumerate(tqdm(render_poses)):
H, W = HW[i]
K = Ks[i]
rays_o, rays_d, viewdirs = Model.get_rays_of_a_view(
H, W, K, c2w, ndc, inverse_y=render_kwargs['inverse_y'],
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
keys = ['rgb_marched', 'disp', 'alphainv_cum']
if render_normal:
keys.append('normal_marched')
if split_bg:
keys.extend(['in_marched', 'out_marched'])
rays_o = rays_o.flatten(0, -2)
rays_d = rays_d.flatten(0, -2)
viewdirs = viewdirs.flatten(0, -2)
render_result_chunks = [
{k: v for k, v in model(ro, rd, vd, **render_kwargs).items() if k in keys}
for ro, rd, vd in zip(rays_o.split(8192, 0), rays_d.split(8192, 0), viewdirs.split(8192, 0))
]
render_result = {
k: torch.cat([ret[k] for ret in render_result_chunks]).reshape(H,W,-1)
for k in render_result_chunks[0].keys()
}
rgb = render_result['rgb_marched'].cpu().numpy()
rgbs.append(rgb)
if rgb_only and savedir is not None:
imageio.imwrite(os.path.join(savedir, '{:03d}.png'.format(i)), utils.to8b(rgb))
continue
disp = render_result['disp'].cpu().numpy()
disps.append(disp)
if render_normal:
normal = render_result['normal_marched'].cpu().numpy()
normals.append(normal)
if split_bg:
inside = render_result['in_marched'].cpu().numpy()
ins.append(inside)
outside = render_result['out_marched'].cpu().numpy()
outs.append(outside)
if masks is not None:
if isinstance(masks[i], torch.Tensor):
mask = masks[i].cpu().numpy() #.reshape(H, W, 1)
else:
mask = masks[i] #.reshape(H, W, 1)
if mask.ndim == 2:
mask = mask.reshape(H, W, 1)
bg_rgb = rgb * (1 - mask)
bg_gt = gt_imgs[i] * (1 - mask)
else:
mask, bg_rgb, bg_gt = np.ones(rgb.shape[:2]), np.ones(rgb.shape), np.ones(rgb.shape)
if i==0:
logger.info('Testing {} {}'.format(rgb.shape, disp.shape))
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb - gt_imgs[i])))
back_p, fore_p = 0., 0.
if masks is not None:
back_p = -10. * np.log10(np.sum(np.square(bg_rgb - bg_gt))/np.sum(1-mask))
fore_p = -10. * np.log10(np.sum(np.square(rgb - gt_imgs[i]))/np.sum(mask))
error = 1 - np.exp(-20 * np.square(rgb - gt_imgs[i]).sum(-1))[...,None].repeat(3,-1)
print("{} | full-image psnr {:.2f} | foreground psnr {:.2f} | background psnr: {:.2f} ".format(i, p, fore_p, back_p))
psnrs.append(p)
fore_psnrs.append(fore_p)
bg_psnrs.append(back_p)
if eval_ssim:
ssims.append(utils.rgb_ssim(rgb, gt_imgs[i], max_val=1))
if eval_lpips_alex:
lpips_alex.append(utils.rgb_lpips(rgb, gt_imgs[i], net_name='alex', device='cpu'))
if eval_lpips_vgg:
lpips_vgg.append(utils.rgb_lpips(rgb, gt_imgs[i], net_name='vgg', device='cpu'))
if savedir is not None:
rgb8 = utils.to8b(rgbs[-1])
id = idx if idx is not None else i
step_pre = str(step) + '_' if step > 0 else ''
filename = os.path.join(savedir, step_pre+'{:03d}.png'.format(id))
rendername = os.path.join(savedir, step_pre + 'render_{:03d}.png'.format(id))
gtname = os.path.join(savedir, step_pre + 'gt_{:03d}.png'.format(id))
img8 = rgb8
if gt_imgs is not None:
error8 = utils.to8b(error)
gt8 = utils.to8b(gt_imgs[i])
imageio.imwrite(gtname, gt8)
img8 = np.concatenate([error8, rgb8, gt8], axis=0)
if split_bg and gt_imgs is not None:
in8 = utils.to8b(ins[-1])
out8 = utils.to8b(outs[-1])
img8_2 = np.concatenate([in8, out8], axis=1)
img8 = np.concatenate([rgb8, gt8], axis=1)
img8 = np.concatenate([img8, img8_2], axis=0)
imageio.imwrite(rendername, rgb8)
imageio.imwrite(filename, img8)
if render_normal:
rot = c2w[:3, :3].permute(1, 0).cpu().numpy()
normal = (rot @ normals[-1][..., None])[...,0]
normal = 0.5 - 0.5 * normal
if masks is not None:
normal = normal * mask.mean(-1)[...,None] + (1 - mask)
normal8 = utils.to8b(normal)
step_pre = str(step) + '_' if step > 0 else ''
filename = os.path.join(savedir, step_pre+'{:03d}_normal.png'.format(id))
imageio.imwrite(filename, normal8)
rgbs = np.array(rgbs)
disps = np.array(disps)
if len(psnrs):
logger.info('Testing psnr {:.2f} (avg) | foreground {:.2f} | background {:.2f}'.format(
np.mean(psnrs), np.mean(fore_psnrs), np.mean(bg_psnrs)))
if eval_ssim: logger.info('Testing ssim {} (avg)'.format(np.mean(ssims)))
if eval_lpips_vgg: logger.info('Testing lpips (vgg) {} (avg)'.format(np.mean(lpips_vgg)))
if eval_lpips_alex: logger.info('Testing lpips (alex) {} (avg)'.format(np.mean(lpips_alex)))
return rgbs, disps
def gen_poses_between(pose_0, pose_1, ratio):
pose_0 = np.linalg.inv(pose_0)
pose_1 = np.linalg.inv(pose_1)
rot_0 = pose_0[:3, :3]
rot_1 = pose_1[:3, :3]
rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
key_times = [0, 1]
slerp = Slerp(key_times, rots)
rot = slerp(ratio)
pose = np.diag([1.0, 1.0, 1.0, 1.0])
pose = pose.astype(np.float32)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
pose = np.linalg.inv(pose)
return pose
def interpolate_view(savedir, img_idx_0, img_idx_1, render_poses, HW, Ks, ndc, repeat=1, **render_kwargs):
render_poses = render_poses.cpu().numpy()
pose_0, pose_1 = render_poses[img_idx_0], render_poses[img_idx_1]
images = []
n_frames = 60
image_dir = os.path.join(savedir, 'images_full')
os.makedirs(image_dir, exist_ok=True)
poses = []
for i in range(n_frames):
new_pose = gen_poses_between(pose_0, pose_1, np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5)
poses.append(new_pose)
render_kwargs.update(dict(
savedir=image_dir,
eval_ssim=False, eval_lpips_alex=False, eval_lpips_vgg=False,
rgb_only=True,
))
HW = HW[:1].repeat(len(poses),0)
Ks = Ks[:1].repeat(len(poses),0)
rgbs, _ = render_viewpoints(render_poses=torch.from_numpy(np.asarray(poses)).cuda(),
HW=HW, Ks=Ks, ndc=ndc, **render_kwargs)
for i in range(n_frames):
images.append(rgbs[i])
for i in range(n_frames):
images.append(rgbs[n_frames - i - 1])
h, w, _ = images[0].shape
imageio.mimwrite(os.path.join(savedir, 'render_{}_{}.mp4'.format(img_idx_0, img_idx_1)),
utils.to8b(images), fps=30, quality=8)
def seed_everything():
'''Seed everything for better reproducibility.
(some pytorch operation is non-deterministic like the backprop of grid_samples)
'''
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
def load_everything(args, cfg):
'''Load images / poses / camera settings / data split.
'''
mode = getattr(cfg.data, 'mode', dict())
if 'train_all' in cfg:
mode.update(train_all=cfg.train_all)
print(" * * * Train with all the images: {} * * * ".format(cfg.train_all))
if 'reso_level' in cfg:
mode.update(reso_level=cfg.reso_level)
data_dict = load_data(cfg.data, **mode, white_bg=cfg.data.white_bkgd)
# remove useless field
kept_keys = {
'hwf', 'HW', 'Ks', 'near', 'far',
'i_train', 'i_val', 'i_test', 'irregular_shape',
'poses', 'render_poses', 'images', 'scale_mats_np', 'masks'}
for k in list(data_dict.keys()):
if k not in kept_keys:
data_dict.pop(k)
# construct data tensor
if data_dict['irregular_shape']:
data_dict['images'] = [torch.FloatTensor(im, device='cpu').cuda() for im in data_dict['images']]
data_dict['masks'] = [torch.FloatTensor(im, device='cpu').cuda() for im in data_dict['masks']]
else:
data_dict['images'] = torch.FloatTensor(data_dict['images'], device='cpu').cuda()
data_dict['masks'] = torch.FloatTensor(data_dict['masks'], device='cpu').cuda()
data_dict['poses'] = torch.Tensor(data_dict['poses'])
return data_dict
def compute_bbox_by_cam_frustrm(args, cfg, HW, Ks, poses, i_train, near, far, **kwargs):
logger.info('compute_bbox_by_cam_frustrm: start')
xyz_min = torch.Tensor([np.inf, np.inf, np.inf])
xyz_max = -xyz_min
for (H, W), K, c2w in zip(HW[i_train], Ks[i_train], poses[i_train]):
rays_o, rays_d, viewdirs = Model.get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w,
ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
pts_nf = torch.stack([rays_o+viewdirs*near, rays_o+viewdirs*far])
xyz_min = torch.minimum(xyz_min, pts_nf.amin((0,1,2)))
xyz_max = torch.maximum(xyz_max, pts_nf.amax((0,1,2)))
logger.info('compute_bbox_by_cam_frustrm: xyz_min {}'.format(xyz_min))
logger.info('compute_bbox_by_cam_frustrm: xyz_max {}'.format(xyz_max))
logger.info('compute_bbox_by_cam_frustrm: finish')
return xyz_min, xyz_max
@torch.no_grad()
def compute_bbox_by_coarse_geo(model_class, model_path, thres):
logger.info('compute_bbox_by_coarse_geo: start')
eps_time = time.time()
model = utils.load_model(model_class, model_path, strict=False)
interp = torch.stack(torch.meshgrid(
torch.linspace(0, 1, model.density.shape[2]),
torch.linspace(0, 1, model.density.shape[3]),
torch.linspace(0, 1, model.density.shape[4]),
), -1)
dense_xyz = model.xyz_min * (1-interp) + model.xyz_max * interp
density = model.grid_sampler(dense_xyz, model.density)
alpha = model.activate_density(density)
mask = (alpha > thres)
active_xyz = dense_xyz[mask]
xyz_min = active_xyz.amin(0)
xyz_max = active_xyz.amax(0)
logger.info('compute_bbox_by_coarse_geo: xyz_min {}'.format(xyz_min))
logger.info('compute_bbox_by_coarse_geo: xyz_max {}'.format(xyz_max))
eps_time = time.time() - eps_time
logger.info('compute_bbox_by_coarse_geo: finish (eps time: {} secs)'.format(eps_time))
return xyz_min, xyz_max
def scene_rep_reconstruction(args, cfg, cfg_model, cfg_train, xyz_min, xyz_max, data_dict, stage, coarse_ckpt_path=None, use_dvgo=False):
logger.info("= "*10 + "Begin training state [ {} ]".format(stage) + " ="*10)
# init
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if abs(cfg_model.world_bound_scale - 1) > 1e-9:
xyz_shift = (xyz_max - xyz_min) * (cfg_model.world_bound_scale - 1) / 2
xyz_min -= xyz_shift
xyz_max += xyz_shift
HW, Ks, near, far, i_train, i_val, i_test, poses, render_poses, images, masks = [
data_dict[k] for k in [
'HW', 'Ks', 'near', 'far', 'i_train', 'i_val', 'i_test', 'poses', 'render_poses', 'images', 'masks'
]
]
print("Train idx", i_train, "\nTest idx", i_test)
# find whether there is existing checkpoint path
last_ckpt_path = os.path.join(cfg.basedir, cfg.expname, f'{stage}_last.tar')
if args.no_reload:
reload_ckpt_path = None
elif args.ft_path:
reload_ckpt_path = args.ft_path
elif getattr(cfg_train, 'ft_path', ''):
reload_ckpt_path = cfg_train.ft_path
elif os.path.isfile(last_ckpt_path):
reload_ckpt_path = last_ckpt_path
else:
reload_ckpt_path = None
# init model
model_kwargs = copy.deepcopy(cfg_model)
scale_ratio = getattr(cfg_train, 'scale_ratio', 2)
num_voxels = model_kwargs.pop('num_voxels')
num_voxels_bg = model_kwargs.pop('num_voxels_bg', num_voxels)
if len(cfg_train.pg_scale) and not args.render_only:
deduce = (scale_ratio**len(cfg_train.pg_scale))
num_voxels = int(num_voxels / deduce)
num_voxels_bg = int(num_voxels_bg / deduce)
logger.info("\n" + "+ "*10 + "start with {} resolution deduction".format(deduce) + " +"*10 + "\n")
else:
deduce = 1
if use_dvgo:
# use dvgo init for the w/ mask setting
model = dvgo_ori.DirectVoxGO(
xyz_min=xyz_min, xyz_max=xyz_max,
num_voxels=num_voxels,
num_voxels_bg=num_voxels_bg,
mask_cache_path=coarse_ckpt_path,
exppath=os.path.join(cfg.basedir, cfg.expname),
**model_kwargs)
else:
model = Model.Voxurf(
xyz_min=xyz_min, xyz_max=xyz_max,
num_voxels=num_voxels,
num_voxels_bg=num_voxels_bg,
mask_cache_path=coarse_ckpt_path,
exppath=os.path.join(cfg.basedir, cfg.expname),
**model_kwargs)
if cfg_model.maskout_near_cam_vox:
model.maskout_near_cam_vox(poses[i_train,:3,3], near)
model = model.to(device)
# init optimizer
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
load_density_from = getattr(cfg_train, 'load_density_from', '')
load_sdf_from = getattr(cfg_train, 'load_sdf_from', '')
if load_density_from and stage == 'surf':
density_ckpt_path = os.path.join(cfg.basedir, load_density_from)
if args.load_density_only:
model = utils.load_grid_data(model, density_ckpt_path, deduce)
else:
reload_ckpt_path = density_ckpt_path
if reload_ckpt_path is None:
logger.info(f'scene_rep_reconstruction ({stage}): train from scratch')
start = 0
else:
logger.info(f'scene_rep_reconstruction ({stage}): reload from {reload_ckpt_path}')
model, optimizer, start = utils.load_checkpoint(
model, optimizer, reload_ckpt_path, args.no_reload_optimizer, strict=False)
logger.info("Restart from iteration {}, model sdf size: {}".format(start, model.sdf.grid.shape))
if reload_ckpt_path.split('/')[-1].split('_')[0] != stage:
start = 0
if cfg_train.get('load_param', False):
model, _, _ = utils.load_checkpoint(
model, None, cfg_train.load_sdf_from, True, strict=False)
# init sdf
if load_sdf_from:
if hasattr(model, 'init_sdf_from_sdf'):
sdf_reduce = cfg_train.get('sdf_reduce', 1.0)
if cfg_train.load_sdf_from == 'auto':
cfg_train.load_sdf_from = os.path.join(cfg.basedir, cfg.expname0, 'coarse', 'surf_last.tar')
if cfg_train.get('load_sdf_path', None):
cfg_train.load_sdf_from = cfg_train.load_sdf_path + 'scan_{}/surf_last.tar'.format(args.scene)
logger.info("\n" + "+ "*10 + "load sdf from: " + cfg_train.load_sdf_from + "+"*10 + "\n")
sdf0 = utils.load_grid_data(model, cfg_train.load_sdf_from, name='sdf', return_raw=True)
model.init_sdf_from_sdf(sdf0, smooth=False, reduce=sdf_reduce)
if cfg_train.get('load_bg_all', False):
bg_density0 = utils.load_grid_data(model, cfg_train.load_sdf_from, name='bg_density', return_raw=True)
model.init_bg_density_from_bg_density(bg_density0)
utils.load_weight_by_name(model, cfg_train.load_sdf_from, name='bg')
elif cfg_train.get('load_bg_density', False):
bg_density0 = utils.load_grid_data(model, cfg_train.load_sdf_from, name='bg_density', return_raw=True)
model.init_bg_density_from_bg_density(bg_density0)
else:
model = utils.load_grid_data(model, cfg_train.load_sdf_from, name='sdf')
smooth = getattr(model, 'init_sdf_smooth', False)
if smooth:
model.sdf = model.smooth_conv(model.sdf)
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
elif args.sdf_mode != "density" and load_density_from:
smooth = getattr(model, 'init_density_smooth', True)
model.init_sdf_from_density(smooth=smooth, reduce=1)
# have to recreate the optimizer
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
# initial mesh evaluation
# if stage == 'surf':
# gt_eval = 'dtu' in cfg.basedir
# validate_mesh(model, resolution=256, world_space=True, prefix="init", scale_mats_np=data_dict['scale_mats_np'], scene=args.scene, gt_eval=gt_eval)
# init rendering setup
render_kwargs = {
'near': data_dict['near'],
'far': data_dict['far'],
'bg': 1 if cfg.data.white_bkgd else 0,
'stepsize': cfg_model.stepsize,
'inverse_y': cfg.data.inverse_y,
'flip_x': cfg.data.flip_x,
'flip_y': cfg.data.flip_y,
}
# init batch rays sampler
def gather_training_rays():
if data_dict['irregular_shape']:
rgb_tr_ori = [images[i].to('cpu' if cfg.data.load2gpu_on_the_fly else device) for i in i_train]
mask_tr_ori = [masks[i].to('cpu' if cfg.data.load2gpu_on_the_fly else device) for i in i_train]
else:
rgb_tr_ori = images[i_train].to('cpu' if cfg.data.load2gpu_on_the_fly else device)
mask_tr_ori = masks[i_train].to('cpu' if cfg.data.load2gpu_on_the_fly else device)
if cfg_train.ray_sampler == 'in_maskcache':
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz = Model.get_training_rays_in_maskcache_sampling(
rgb_tr_ori=rgb_tr_ori,
train_poses=poses[i_train],
HW=HW[i_train], Ks=Ks[i_train],
ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y,
model=model, render_kwargs=render_kwargs,
)
elif cfg_train.ray_sampler == 'flatten':
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz = Model.get_training_rays_flatten(
rgb_tr_ori=rgb_tr_ori,
train_poses=poses[i_train],
HW=HW[i_train], Ks=Ks[i_train], ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
else:
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz = Model.get_training_rays(
rgb_tr=rgb_tr_ori,
train_poses=poses[i_train],
HW=HW[i_train], Ks=Ks[i_train], ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
index_generator = Model.batch_indices_generator(len(rgb_tr), cfg_train.N_rand)
if cfg_train.ray_sampler == 'patch':
# patch sampler contains lots of empty spaces, remove them.
index_generator = Model.batch_indices_generator(len(rgb_tr), 1)
batch_index_sampler = lambda: next(index_generator)
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz, batch_index_sampler
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz, batch_index_sampler = gather_training_rays()
if cfg_train.pervoxel_lr:
def per_voxel_init():
cnt = model.voxel_count_views(
rays_o_tr=rays_o_tr, rays_d_tr=rays_d_tr, imsz=imsz, near=near, far=far,
stepsize=cfg_model.stepsize, downrate=cfg_train.pervoxel_lr_downrate,
irregular_shape=data_dict['irregular_shape'])
optimizer.set_pervoxel_lr(cnt)
with torch.no_grad():
model.density[cnt <= 2] = -100
per_voxel_init()
# GOGO
psnr_lst = []
weight_lst = []
mask_lst = []
bg_mask_lst = []
weight_sum_lst = []
weight_nonzero_lst = []
s_val_lst = []
time0 = time.time()
logger.info("start: {} end: {}".format(1 + start, 1 + cfg_train.N_iters))
for global_step in trange(1+start, 1+cfg_train.N_iters):
# progress scaling checkpoint
if global_step in cfg_train.pg_scale:
if hasattr(model, 'num_voxels_bg'):
model.scale_volume_grid(model.num_voxels * scale_ratio, model.num_voxels_bg * scale_ratio)
else:
model.scale_volume_grid(model.num_voxels * scale_ratio)
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
# random sample rays
if cfg_train.ray_sampler in ['flatten', 'in_maskcache']:
sel_i = batch_index_sampler()
target = rgb_tr[sel_i]
rays_o = rays_o_tr[sel_i]
rays_d = rays_d_tr[sel_i]
viewdirs = viewdirs_tr[sel_i]
elif cfg_train.ray_sampler == 'patch':
sel_b = batch_index_sampler()
patch_size = cfg_train.N_patch
sel_r_start = torch.randint(rgb_tr.shape[1] - patch_size, [1])
sel_c_start = torch.randint(rgb_tr.shape[2] - patch_size, [1])
sel_r, sel_c = torch.meshgrid(torch.arange(sel_r_start[0], sel_r_start[0] + patch_size),
torch.arange(sel_c_start[0], sel_c_start[0] + patch_size))
sel_r, sel_c = sel_r.reshape(-1), sel_c.reshape(-1)
target = rgb_tr[sel_b, sel_r, sel_c]
rays_o = rays_o_tr[sel_b, sel_r, sel_c]
rays_d = rays_d_tr[sel_b, sel_r, sel_c]
viewdirs = viewdirs_tr[sel_b, sel_r, sel_c]
elif cfg_train.ray_sampler == 'random':
sel_b = torch.randint(rgb_tr.shape[0], [cfg_train.N_rand])
sel_r = torch.randint(rgb_tr.shape[1], [cfg_train.N_rand])
sel_c = torch.randint(rgb_tr.shape[2], [cfg_train.N_rand])
target = rgb_tr[sel_b, sel_r, sel_c]
rays_o = rays_o_tr[sel_b, sel_r, sel_c]
rays_d = rays_d_tr[sel_b, sel_r, sel_c]
viewdirs = viewdirs_tr[sel_b, sel_r, sel_c]
else:
raise NotImplementedError
if cfg.data.load2gpu_on_the_fly:
target = target.to(device)
rays_o = rays_o.to(device)
rays_d = rays_d.to(device)
viewdirs = viewdirs.to(device)
# volume rendering
render_result = model(rays_o, rays_d, viewdirs, global_step=global_step, **render_kwargs)
# gradient descent step
optimizer.zero_grad(set_to_none=True)
loss = cfg_train.weight_main * F.mse_loss(render_result['rgb_marched'], target)
psnr = utils.mse2psnr(loss.detach()).item()
if cfg_train.weight_entropy_last > 0:
pout = render_result['alphainv_cum'][...,-1].clamp(1e-6, 1-1e-6)
entropy_last_loss = -(pout*torch.log(pout) + (1-pout)*torch.log(1-pout)).mean()
loss += cfg_train.weight_entropy_last * entropy_last_loss
if cfg_train.weight_rgbper > 0:
rgbper = (render_result['raw_rgb'] - target.unsqueeze(-2)).pow(2).sum(-1)
rgbper_loss = (rgbper * render_result['weights'].detach()).sum(-1).mean()
loss += cfg_train.weight_rgbper * rgbper_loss
if global_step>cfg_train.tv_from and global_step<cfg_train.tv_end and global_step%cfg_train.tv_every==0:
if cfg_train.weight_tv_density>0:
tv_terms = getattr(cfg_train, 'tv_terms', dict())
sdf_tv, smooth_grad_tv = tv_terms['sdf_tv'], tv_terms['smooth_grad_tv']
if smooth_grad_tv > 0:
loss += cfg_train.weight_tv_density * model.density_total_variation(sdf_tv=0, smooth_grad_tv=smooth_grad_tv)
if getattr(cfg_train, 'ori_tv', False):
loss += cfg_train.weight_tv_density * model.density_total_variation(sdf_tv=sdf_tv, smooth_grad_tv=0)
weight_tv_k0 = getattr(cfg_train, 'weight_tv_k0')
if weight_tv_k0 > 0:
k0_tv_terms = getattr(cfg_train, 'k0_tv_terms', dict())
loss += cfg_train.weight_tv_k0 * model.k0_total_variation(**k0_tv_terms)
if getattr(tv_terms, 'bg_density_tv', 0):
loss += cfg_train.weight_tv_density * model.density_total_variation(sdf_tv=0, smooth_grad_tv=0, bg_density_tv=tv_terms['bg_density_tv'])
if getattr(cfg_train, 'ori_tv', False) and cfg_train.get('weight_bg_tv_k0', 0) >0 and global_step>cfg_train.tv_from and global_step%cfg_train.tv_every==0 and global_step<cfg_train.tv_end:
bg_k0_tv_terms = getattr(cfg_train, 'bg_k0_tv_terms', dict())
loss += cfg_train.get('weight_bg_tv_k0', 0) * model.bg_k0_total_variation(**bg_k0_tv_terms)
if getattr(cfg_train, 'weight_rgb0', 0.) > 0:
loss += F.mse_loss(render_result['rgb_marched0'], target) * cfg_train.weight_rgb0
loss.backward()
# make sure that density has no grad
if global_step>cfg_train.tv_from and global_step<cfg_train.tv_end and global_step%cfg_train.tv_every==0:
if not getattr(cfg_train, 'ori_tv', False):
if cfg_train.weight_tv_density>0:
tv_terms = getattr(cfg_train, 'tv_terms', dict())
sdf_tv = tv_terms['sdf_tv']
if sdf_tv > 0:
model.sdf_total_variation_add_grad(
cfg_train.weight_tv_density * sdf_tv / len(rays_o), global_step < cfg_train.tv_dense_before)
bg_density_tv = getattr(tv_terms, 'bg_density_tv', 0)
if bg_density_tv > 0:
model.bg_density_total_variation_add_grad(
cfg_train.weight_tv_density * bg_density_tv / len(rays_o), global_step < cfg_train.tv_dense_before)
if cfg_train.weight_tv_k0 > 0:
model.k0_total_variation_add_grad(
cfg_train.weight_tv_k0 / len(rays_o), global_step < cfg_train.tv_dense_before)
if getattr(cfg_train, 'weight_bg_tv_k0', 0) > 0:
model.bg_k0_total_variation_add_grad(
cfg_train.weight_bg_tv_k0 / len(rays_o), global_step < cfg_train.tv_dense_before)
optimizer.step()
wm = render_result['weights'].max(-1)[0]
ws = render_result['weights'].sum(-1)
if (wm>0).float().mean() > 0:
psnr_lst.append(psnr)
weight_lst.append(wm[wm>0].mean().detach().cpu().numpy())
weight_sum_lst.append(ws[ws>0].mean().detach().cpu().numpy())
weight_nonzero_lst.append((ws>0).float().mean().detach().cpu().numpy())
mask_lst.append(render_result['mask'].float().mean().detach().cpu().numpy())
if 'bg_mask' in render_result:
bg_mask_lst.append(render_result['bg_mask'].float().mean().detach().cpu().numpy())
s_val = render_result["s_val"] if "s_val" in render_result else 0
s_val_lst.append(s_val)
# writer.add_scalar('train/psnr', psnr, global_step)
# writer.add_scalar('train/s_val', s_val, global_step)
# writer.add_scalar('train/mask', mask_lst[-1], global_step)
global_step_ = global_step - 1
# update lr
N_iters = cfg_train.N_iters
if not getattr(cfg_train, 'cosine_lr', ''):
decay_steps = cfg_train.lrate_decay * 1000
decay_factor = 0.1 ** (1/decay_steps)
for i_opt_g, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = param_group['lr'] * decay_factor
else:
def cosine_lr_func(iter, warm_up_iters, warm_up_min_ratio, max_steps, const_warm_up=False, min_ratio=0):
if iter < warm_up_iters:
if not const_warm_up:
lr = warm_up_min_ratio + (1 - warm_up_min_ratio) * (iter / warm_up_iters)
else:
lr = warm_up_min_ratio
else:
lr = (1 + math.cos((iter - warm_up_iters) / (max_steps - warm_up_iters) * math.pi)) * 0.5 * (1 - min_ratio) + min_ratio
return lr
def extra_warm_up_func(iter, start_iter, warm_up_iters, warm_up_min_ratio):
if iter >= start_iter:
extra_lr = warm_up_min_ratio + (1 - warm_up_min_ratio) * (iter - start_iter) / warm_up_iters
return min(extra_lr, 1.0)
else:
return 1.0
warm_up_iters = cfg_train.cosine_lr_cfg.get('warm_up_iters', 0)
warm_up_min_ratio = cfg_train.cosine_lr_cfg.get('warm_up_min_ratio', 1.0)
const_warm_up = cfg_train.cosine_lr_cfg.get('const_warm_up', False)
cos_min_ratio = cfg_train.cosine_lr_cfg.get('cos_min_ratio', False)
if global_step == 0:
pre_decay_factor = 1.0
else:
pre_decay_factor = cosine_lr_func(global_step_ - 1, warm_up_iters, warm_up_min_ratio, N_iters, const_warm_up, cos_min_ratio)
pos_decay_factor = cosine_lr_func(global_step_, warm_up_iters, warm_up_min_ratio, N_iters, const_warm_up, cos_min_ratio)
decay_factor = pos_decay_factor / pre_decay_factor
for i_opt_g, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = param_group['lr'] * decay_factor
decay_step_module = getattr(cfg_train, 'decay_step_module', dict())
if global_step_ in decay_step_module:
for i_opt_g, param_group in enumerate(optimizer.param_groups):
if param_group['name'] in decay_step_module[global_step_]:
decay_factor = decay_step_module[global_step_][param_group['name']]
param_group['lr'] = param_group['lr'] * decay_factor
logger.info('- '*10 + '[Decay lrate] for {} by {}'.format(param_group['name'], decay_factor) + ' -'*10)
# update tv terms
tv_updates = getattr(cfg_train, 'tv_updates', dict())
if global_step_ in tv_updates:
for tv_term, value in tv_updates[global_step_].items():
setattr(cfg_train.tv_terms, tv_term, value)
logger.info('- '*10 + '[Update tv]: ' + str(tv_updates[global_step_]) + ' -'*10)
# update s_val func
s_updates = getattr(cfg_model, 's_updates', dict())
if global_step_ in s_updates:
for s_term, value in s_updates[global_step_].items():
setattr(model, s_term, value)
logger.info('- '*10 + '[Update s]: ' + str(s_updates[global_step_]) + ' -'*10)
# update smooth kernel
smooth_updates = getattr(cfg_model, 'smooth_updates', dict())
if global_step_ in smooth_updates:
model.init_smooth_conv(**smooth_updates[global_step_])
logger.info('- '*10 + '[Update smooth conv]: ' + str(smooth_updates[global_step_]) + ' -'*10)
# check log & save
if global_step%args.i_print==0:
eps_time = time.time() - time0
eps_time_str = f'{eps_time//3600:02.0f}:{eps_time//60%60:02.0f}:{eps_time%60:02.0f}'
bg_mask_mean = 0. if len(bg_mask_lst) == 0 else np.mean(bg_mask_lst)
logger.info(f'scene_rep_reconstruction ({stage}): iter {global_step:6d} / '
f'Loss: {loss.item():.9f} / PSNR: {np.mean(psnr_lst):5.2f} / '
f'Wmax: {np.mean(weight_lst):5.2f} / Wsum: {np.mean(weight_sum_lst):5.2f} / W>0: {np.mean(weight_nonzero_lst):5.2f}'
f' / s_val: {np.mean(s_val_lst):5.2g} / mask\%: {100*np.mean(mask_lst):1.2f} / bg_mask\%: {100*bg_mask_mean:1.2f} '
f'Eps: {eps_time_str}')
psnr_lst, weight_lst, weight_sum_lst, weight_nonzero_lst, mask_lst, bg_mask_lst, s_val_lst = [], [], [], [], [], [], []
# validate image
if global_step%args.i_validate==0 and global_step != cfg_train.N_iters and stage == 'surf' and 'fine' in args.sdf_mode:
render_viewpoints_kwargs = {
'model': model,
'ndc': cfg.data.ndc,
'render_kwargs': {
'near': data_dict['near'],
'far': data_dict['far'],
'bg': 1 if cfg.data.white_bkgd else 0,
'stepsize': cfg_model.stepsize,
'inverse_y': cfg.data.inverse_y,
'flip_x': cfg.data.flip_x,
'flip_y': cfg.data.flip_y,
'render_grad': True,
'render_depth': True,
'render_in_out': True,
},
}
validate_image(cfg, stage, global_step, data_dict, render_viewpoints_kwargs, eval_all=cfg_train.N_iters==global_step)
# validate mesh
prefix = args.prefix + '_' if args.prefix else ''
prefix += args.suffix + '_' if args.suffix else ''
if 'eval_iters' in cfg_train and stage == 'surf':
if global_step - start in cfg_train.eval_iters and stage == 'surf':
gt_eval = 'dtu' in cfg.basedir
cd = validate_mesh(model, resolution=256,
prefix="{}{}_fine".format(prefix, global_step),
gt_eval=gt_eval,
world_space=True,
scale_mats_np=data_dict['scale_mats_np'],
scene=args.scene)
# save checkpoints
if global_step == cfg_train.N_iters:
torch.save({
'global_step': global_step,
'model_kwargs': model.get_kwargs(),
'MaskCache_kwargs': model.get_MaskCache_kwargs(),
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, last_ckpt_path)
logger.info(f'scene_rep_reconstruction ({stage}): saved checkpoints at '+ last_ckpt_path)
# final mesh validation
if global_step == cfg_train.N_iters and stage == 'surf' and 'fine' in args.sdf_mode:
validate_mesh(model, 512, threshold=0.0, prefix="{}final".format(prefix), world_space=True,
scale_mats_np=data_dict['scale_mats_np'], gt_eval='dtu' in cfg.basedir, runtime=False, scene=args.scene)
def train(args, cfg, data_dict):
# init
logger.info('train: start')
eps_time = time.time()
with open(os.path.join(cfg.basedir, cfg.expname, 'args.txt'), 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
cfg.dump(os.path.join(cfg.basedir, cfg.expname, 'config.py'))
if args.run_dvgo_init:
# coarse geometry searching
eps_coarse = time.time()
xyz_min_coarse, xyz_max_coarse = compute_bbox_by_cam_frustrm(args=args, cfg=cfg, **data_dict)
scene_rep_reconstruction(
args=args, cfg=cfg,
cfg_model=cfg.coarse_model_and_render, cfg_train=cfg.coarse_train,
xyz_min=xyz_min_coarse, xyz_max=xyz_max_coarse,
data_dict=data_dict, stage='coarse', use_dvgo=True)
eps_coarse = time.time() - eps_coarse
eps_time_str = f'{eps_coarse//3600:02.0f}:{eps_coarse//60%60:02.0f}:{eps_coarse%60:02.0f}'
logger.info("+ "*10 + 'train: coarse geometry searching in' + eps_time_str + " +"*10 )
coarse_expname = cfg.expname0 + '/coarse'
coarse_ckpt_path = os.path.join(cfg.basedir, coarse_expname, f'coarse_last.tar')
logger.info("+ "*10 + 'coarse_expname' + coarse_expname + " +"*10)
if args.no_dvgo_init:
# for the w\o mask setting
box_size_ = cfg.surf_train.get('box_size', 1.5)
print(">>> box_size: ", box_size_)
xyz_min_fine, xyz_max_fine = torch.tensor([-box_size_,-box_size_,-box_size_]).cuda(), torch.tensor([box_size_, box_size_, box_size_]).cuda()
else:
xyz_min_fine, xyz_max_fine = compute_bbox_by_coarse_geo(
model_class=dvgo_ori.DirectVoxGO, model_path=coarse_ckpt_path,
thres=cfg.fine_model_and_render.bbox_thres)
if hasattr(cfg, 'surf_train'):
eps_surf = time.time()
scene_rep_reconstruction(
args=args, cfg=cfg,
cfg_model=cfg.surf_model_and_render, cfg_train=cfg.surf_train,
xyz_min=xyz_min_fine, xyz_max=xyz_max_fine,
data_dict=data_dict, stage='surf',
coarse_ckpt_path=coarse_ckpt_path)
eps_surf = time.time() - eps_surf
eps_time_str = f'{eps_surf//3600:02.0f}:{eps_surf//60%60:02.0f}:{eps_surf%60:02.0f}'
logger.info("+ "*10 + 'train: fine detail reconstruction in' + eps_time_str + " +"*10 )
eps_time = time.time() - eps_time
eps_time_str = f'{eps_time//3600:02.0f}:{eps_time//60%60:02.0f}:{eps_time%60:02.0f}'
logger.info('train: finish (eps time' + eps_time_str + ')')
def validate_image(cfg, stage, step, data_dict, render_viewpoints_kwargs, eval_all=True):
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_test_{stage}')
os.makedirs(testsavedir, exist_ok=True)
rand_idx = random.randint(0, len(data_dict['poses'][data_dict['i_test']])-1)
logger.info("validating test set idx: {}".format(rand_idx))
eval_lpips_alex = args.eval_lpips_alex and eval_all
eval_lpips_vgg = args.eval_lpips_alex and eval_all
rgbs, disps = render_viewpoints(
render_poses=data_dict['poses'][data_dict['i_test']][rand_idx][None],
HW=data_dict['HW'][data_dict['i_test']][rand_idx][None],
Ks=data_dict['Ks'][data_dict['i_test']][rand_idx][None],
gt_imgs=[data_dict['images'][i].cpu().numpy() for i in data_dict['i_test']][rand_idx][None],
masks=[data_dict['masks'][i].cpu().numpy() for i in data_dict['i_test']][rand_idx][None],
savedir=testsavedir,
eval_ssim=args.eval_ssim, eval_lpips_alex=eval_lpips_alex, eval_lpips_vgg=eval_lpips_vgg, idx=rand_idx, step=step,
**render_viewpoints_kwargs)
def validate_mesh(model, resolution=128, threshold=0.0, prefix="", world_space=False,
scale_mats_np=None, gt_eval=False, runtime=True, scene=122, smooth=True,
extract_color=False):
os.makedirs(os.path.join(cfg.basedir, cfg.expname, 'meshes'), exist_ok=True)
bound_min = model.xyz_min.clone().detach().float()
bound_max = model.xyz_max.clone().detach().float()
gt_path = os.path.join(cfg.data.datadir, "stl_total.ply") if gt_eval else ''
vertices0, triangles = model.extract_geometry(bound_min, bound_max, resolution=resolution,
threshold=threshold, scale_mats_np=scale_mats_np,
gt_path=gt_path, smooth=smooth,
)
if world_space and scale_mats_np is not None:
vertices = vertices0 * scale_mats_np[0, 0] + scale_mats_np[:3, 3][None]
else:
vertices = vertices0
if extract_color:
# use normal direction as the viewdir
ray_pts = torch.from_numpy(vertices0).cuda().float().split(8192 * 32, 0)
vertex_colors = [model.mesh_color_forward(pts) for pts in ray_pts]
vertex_colors = (torch.concat(vertex_colors).cpu().detach().numpy() * 255.).astype( np.uint8)
mesh = trimesh.Trimesh(vertices, triangles, vertex_colors=vertex_colors)
else:
mesh = trimesh.Trimesh(vertices, triangles)
mesh_path = os.path.join(cfg.basedir, cfg.expname, 'meshes', "{}_".format(scene)+prefix+'.ply')
mesh.export(mesh_path)
logger.info("mesh saved at " + mesh_path)
if gt_eval:
mean_d2s, mean_s2d, over_all = dtu_eval.eval(mesh_path, scene=scene, eval_dir=os.path.join(cfg.basedir, cfg.expname, 'meshes'),
dataset_dir='data/DTU', suffix=prefix+'eval', use_o3d=False, runtime=runtime)
res = "standard point cloud sampling" if not runtime else "down sampled point cloud for fast eval (NOT standard!):"
logger.info("mesh evaluation with {}".format(res))
logger.info(" [ d2s: {:.3f} | s2d: {:.3f} | mean: {:.3f} ]".format(mean_d2s, mean_s2d, over_all))
return over_all
return 0.
if __name__=='__main__':
# load setup
parser = config_parser()
args = parser.parse_args()
cfg = mmcv.Config.fromfile(args.config)
# reset the root by the scene id
if args.scene:
cfg.expname += "{}".format(args.scene)
cfg.data.datadir += "{}".format(args.scene)
cfg.expname0 = cfg.expname
cfg.expname = cfg.expname + '/' + cfg.exp_stage
if args.suffix:
cfg.expname += "_" + args.suffix
cfg.load_expname = args.load_expname if args.load_expname else cfg.expname
# set up tensorboard
writer_dir = os.path.join(cfg.basedir, cfg.expname0, 'logs_all', cfg.expname)
# writer = SummaryWriter(log_dir=writer_dir)
# set up the logger and tensorboard
cfg.basedir0 = cfg.basedir
if args.prefix:
cfg.basedir = os.path.join(cfg.basedir, args.prefix)
log_dir = os.path.join(cfg.basedir, cfg.expname, 'log')
os.makedirs(log_dir, exist_ok=True)
now = datetime.now()
time_str = now.strftime('%Y-%m-%d_%H-%M-%S')
logger = get_root_logger(logging.INFO, handlers=[
logging.FileHandler(os.path.join(log_dir, '{}_train.log').format(time_str))])
logger.info("+ "*10 + cfg.expname + " +"*10)
logger.info("+ "*10 + log_dir + " +"*10)
# set white or black color
if cfg.get('use_sp_color', False):
assert 'white_list' in cfg and 'black_list' in cfg
if int(args.scene) in cfg['white_list']:
assert args.scene not in cfg['black_list']
cfg.data.white_bkgd = True
logger.info("+ "*10 + str(args.scene) + ' white bg ' + " +"*10)
if int(args.scene) in cfg['black_list']:
assert args.scene not in cfg['white_list']
cfg.data.white_bkgd = False
logger.info("+ "*10 + str(args.scene) + ' black bg ' + " +"*10)
# init enviroment
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = torch.device('cuda')
else:
device = torch.device('cpu')
seed_everything()
if getattr(cfg, 'load_expname', None) is None:
cfg.load_expname = args.load_expname if args.load_expname else cfg.expname
logger.info(cfg.load_expname)
os.makedirs(os.path.join(cfg.basedir, cfg.expname, 'recording'), exist_ok=True)
if not args.render_only or args.mesh_from_sdf:
copyfile('run.py', os.path.join(cfg.basedir, cfg.expname, 'recording', 'run.py'))
copyfile(args.config, os.path.join(cfg.basedir, cfg.expname, 'recording', args.config.split('/')[-1]))
import lib.dvgo_ori as dvgo_ori
if args.sdf_mode == "voxurf_coarse":
import lib.voxurf_coarse as Model
copyfile('lib/voxurf_coarse.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_coarse.py'))
elif args.sdf_mode == "voxurf_fine":
import lib.voxurf_fine as Model
copyfile('lib/voxurf_fine.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_fine.py'))
elif args.sdf_mode == "voxurf_womask_coarse":
import lib.voxurf_womask_coarse as Model
copyfile('lib/voxurf_womask_coarse.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_womask_coarse.py'))
elif args.sdf_mode == "voxurf_womask_fine":
import lib.voxurf_womask_fine as Model
copyfile('lib/voxurf_womask_fine.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_womask_fine.py'))
else:
raise NameError
# load images / poses / camera settings / data split
data_dict = load_everything(args=args, cfg=cfg)
# export scene bbox and camera poses in 3d for debugging and visualization
if args.export_bbox_and_cams_only:
logger.info('Export bbox and cameras...')
xyz_min, xyz_max = compute_bbox_by_cam_frustrm(args=args, cfg=cfg, **data_dict)
poses, HW, Ks, i_train = data_dict['poses'], data_dict['HW'], data_dict['Ks'], data_dict['i_train']
near, far = data_dict['near'], data_dict['far']
cam_lst = []
for c2w, (H, W), K in zip(poses[i_train], HW[i_train], Ks[i_train]):
rays_o, rays_d, viewdirs = Model.get_rays_of_a_view(
H, W, K, c2w, cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y,)
cam_o = rays_o[0,0].cpu().numpy()
cam_d = rays_d[[0,0,-1,-1],[0,-1,0,-1]].cpu().numpy()
cam_lst.append(np.array([cam_o, *(cam_o+cam_d*max(near, far*0.05))]))
np.savez_compressed(args.export_bbox_and_cams_only,
xyz_min=xyz_min.cpu().numpy(), xyz_max=xyz_max.cpu().numpy(),
cam_lst=np.array(cam_lst))
logger.info('done')
sys.exit()
if args.mesh_from_sdf:
logger.info('Extracting mesh from sdf...')
with torch.no_grad():
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'surf_last.tar')
if os.path.exists(ckpt_path):
new_kwargs = cfg.surf_model_and_render
else:
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'fine_last.tar')
new_kwargs = cfg.fine_model_and_render
model = utils.load_model(Model.Voxurf, ckpt_path, new_kwargs).to(device)
prefix = args.prefix + '_' if args.prefix else ''
prefix += args.suffix + '_' if args.suffix else ''
gt_eval = 'dtu' in cfg.basedir
validate_mesh(model, 512, threshold=0.0, prefix="{}final_mesh".format(prefix), world_space=True,
scale_mats_np=data_dict['scale_mats_np'], gt_eval=gt_eval, runtime=False, scene=args.scene, extract_color=args.extract_color)
logger.info('done')
sys.exit()
# train
if not args.render_only:
train(args, cfg, data_dict)
# load model for rendring
if args.render_test or args.render_train or args.render_video or args.interpolate:
if args.ft_path:
ckpt_path = args.ft_path
new_kwargs = cfg.fine_model_and_render
elif hasattr(cfg, 'surf_train'):
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'surf_last.tar')
new_kwargs = cfg.surf_model_and_render
else:
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'fine_last.tar')
new_kwargs = cfg.fine_model_and_render
ckpt_name = ckpt_path.split('/')[-1][:-4]
print(">>> Loading from {}".format(ckpt_path))
model = utils.load_model(Model.Voxurf, ckpt_path, new_kwargs).to(device)
stepsize = cfg.fine_model_and_render.stepsize
render_viewpoints_kwargs = {
'model': model,
'ndc': cfg.data.ndc,
'render_kwargs': {
'near': data_dict['near'],
'far': data_dict['far'],
'bg': 1 if cfg.data.white_bkgd else 0,
'stepsize': stepsize,
'inverse_y': cfg.data.inverse_y,
'flip_x': cfg.data.flip_x,
'flip_y': cfg.data.flip_y,
'render_grad': True,
'render_depth': True,
'render_in_out': True,
},
}
if args.interpolate:
img_idx_0, img_idx_1 = args.interpolate.split('_')
img_idx_0 = int(img_idx_0)
img_idx_1 = int(img_idx_1)
savedir = os.path.join(cfg.basedir, cfg.expname, f'interpolate_{img_idx_0}_{img_idx_1}')
interpolate_view(savedir, img_idx_0, img_idx_1,
render_poses=data_dict['poses'],
HW=data_dict['HW'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
Ks=data_dict['Ks'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
render_factor=args.render_video_factor,
**render_viewpoints_kwargs
)
# render trainset and eval
if args.render_train:
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_train_{ckpt_name}')
os.makedirs(testsavedir, exist_ok=True)
rgbs, disps = render_viewpoints(
render_poses=data_dict['poses'][data_dict['i_train']],
HW=data_dict['HW'][data_dict['i_train']],
Ks=data_dict['Ks'][data_dict['i_train']],
gt_imgs=[data_dict['images'][i].cpu().numpy() for i in data_dict['i_train']],
masks=data_dict['masks'],
savedir=testsavedir,
eval_ssim=args.eval_ssim, eval_lpips_alex=args.eval_lpips_alex, eval_lpips_vgg=args.eval_lpips_vgg,
**render_viewpoints_kwargs)
imageio.mimwrite(os.path.join(testsavedir, 'video.rgb.mp4'), utils.to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(testsavedir, 'video.disp.mp4'), utils.to8b(disps / np.max(disps)), fps=30, quality=8)
# render testset and eval
if args.render_test:
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_test_{ckpt_name}')
os.makedirs(testsavedir, exist_ok=True)
rgbs, disps = render_viewpoints(
render_poses=data_dict['poses'][data_dict['i_test']],
HW=data_dict['HW'][data_dict['i_test']],
Ks=data_dict['Ks'][data_dict['i_test']],
gt_imgs=[data_dict['images'][i].cpu().numpy() for i in data_dict['i_test']],
masks=[data_dict['masks'][i].cpu().numpy() for i in data_dict['i_test']],
savedir=testsavedir,
eval_ssim=args.eval_ssim, eval_lpips_alex=args.eval_lpips_alex, eval_lpips_vgg=args.eval_lpips_vgg,
**render_viewpoints_kwargs)
imageio.mimwrite(os.path.join(testsavedir, 'video.rgb.mp4'), utils.to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(testsavedir, 'video.disp.mp4'), utils.to8b(disps / np.max(disps)), fps=30, quality=8)
# render video
if args.render_video:
assert 'dtu' not in cfg.basedir, 'please try --interpolate for the DTU dataset.'
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_video_{ckpt_name}')
os.makedirs(testsavedir, exist_ok=True)
rgbs, disps = render_viewpoints(
render_poses=torch.from_numpy(data_dict['render_poses']).cuda(),
HW=data_dict['HW'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
Ks=data_dict['Ks'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
render_factor=args.render_video_factor,
savedir=testsavedir,
**render_viewpoints_kwargs)
imageio.mimwrite(os.path.join(testsavedir, 'video.rgb.mp4'), utils.to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(testsavedir, 'video.disp.mp4'), utils.to8b(disps / np.max(disps)), fps=30, quality=8)
logger.info('Done')
| 56,095 | 49.130474 | 210 | py |
Voxurf | Voxurf-main/tools/vis_train.py | import argparse
import numpy as np
import open3d as o3d
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('path')
args = parser.parse_args()
data = np.load(args.path)
xyz_min = data['xyz_min']
xyz_max = data['xyz_max']
cam_lst = data['cam_lst']
# Outer aabb
aabb_01 = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 1],
[1, 1, 0]])
out_bbox = o3d.geometry.LineSet()
out_bbox.points = o3d.utility.Vector3dVector(xyz_min + aabb_01 * (xyz_max - xyz_min))
out_bbox.colors = o3d.utility.Vector3dVector([[1,0,0] for i in range(12)])
out_bbox.lines = o3d.utility.Vector2iVector([[0,1],[1,2],[2,3],[3,0],[4,5],[5,6],[6,7],[7,4],[0,4],[1,5],[2,6],[3,7]])
# Cameras
cam_frustrm_lst = []
for cam in cam_lst:
cam_frustrm = o3d.geometry.LineSet()
cam_frustrm.points = o3d.utility.Vector3dVector(cam)
if len(cam) == 5:
cam_frustrm.colors = o3d.utility.Vector3dVector([[0,0,0] for i in range(8)])
cam_frustrm.lines = o3d.utility.Vector2iVector([[0,1],[0,2],[0,3],[0,4],[1,2],[2,4],[4,3],[3,1]])
elif len(cam) == 8:
cam_frustrm.colors = o3d.utility.Vector3dVector([[0,0,0] for i in range(12)])
cam_frustrm.lines = o3d.utility.Vector2iVector([
[0,1],[1,3],[3,2],[2,0],
[4,5],[5,7],[7,6],[6,4],
[0,4],[1,5],[3,7],[2,6],
])
else:
raise NotImplementedError
cam_frustrm_lst.append(cam_frustrm)
# Show
o3d.visualization.draw_geometries([
o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=xyz_min),
out_bbox, *cam_frustrm_lst])
| 1,778 | 33.211538 | 118 | py |
Voxurf | Voxurf-main/tools/tools.py | import os
from shutil import copyfile
def find_and_rename(root):
dirs = os.listdir(root)
if not os.path.exists(os.path.join(root, 'explore')):
os.mkdir(os.path.join(root, 'explore'))
for dir in dirs:
if not os.path.isdir(os.path.join(root, dir)):
continue
name = os.path.join(root, dir, 'blended_images/00000000.jpg')
copyfile(name, os.path.join(root, 'explore', dir + '_00000000.jpg'))
for n in l:
if not os.path.exists(os.path.join('dataset_low_res', n[:-2])):
print(n)
| 542 | 30.941176 | 76 | py |
Voxurf | Voxurf-main/tools/vis_volume.py | import argparse
import numpy as np
import mcubes
import open3d as o3d
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('path')
parser.add_argument('thres', type=float)
parser.add_argument('--cam')
args = parser.parse_args()
data = np.load(args.path)
alpha = data['alpha']
rgb = data['rgb']
if rgb.shape[0] < rgb.shape[-1]:
alpha = np.transpose(alpha, (1,2,0))
rgb = np.transpose(rgb, (1,2,3,0))
print('Shape', alpha.shape, rgb.shape)
print('Active rate', (alpha > args.thres).mean())
print('Active nums', (alpha > args.thres).sum())
xyz_min = np.array([0,0,0])
xyz_max = np.array(alpha.shape)
if args.cam:
data = np.load(args.cam)
xyz_min = data['xyz_min']
xyz_max = data['xyz_max']
cam_lst = data['cam_lst']
cam_frustrm_lst = []
for cam in cam_lst:
cam_frustrm = o3d.geometry.LineSet()
cam_frustrm.points = o3d.utility.Vector3dVector(cam)
if len(cam) == 5:
cam_frustrm.colors = o3d.utility.Vector3dVector([[0.5,0.5,0.5] for i in range(8)])
cam_frustrm.lines = o3d.utility.Vector2iVector([[0,1],[0,2],[0,3],[0,4],[1,2],[2,4],[4,3],[3,1]])
elif len(cam) == 8:
cam_frustrm.colors = o3d.utility.Vector3dVector([[0.5,0.5,0.5] for i in range(12)])
cam_frustrm.lines = o3d.utility.Vector2iVector([
[0,1],[1,3],[3,2],[2,0],
[4,5],[5,7],[7,6],[6,4],
[0,4],[1,5],[3,7],[2,6],
])
cam_frustrm_lst.append(cam_frustrm)
else:
cam_frustrm_lst = []
aabb_01 = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 1],
[1, 1, 0]])
out_bbox = o3d.geometry.LineSet()
out_bbox.points = o3d.utility.Vector3dVector(xyz_min + aabb_01 * (xyz_max - xyz_min))
out_bbox.colors = o3d.utility.Vector3dVector([[1,0,0] for i in range(12)])
out_bbox.lines = o3d.utility.Vector2iVector([[0,1],[1,2],[2,3],[3,0],[4,5],[5,6],[6,7],[7,4],[0,4],[1,5],[2,6],[3,7]])
xyz = np.stack((alpha > args.thres).nonzero(), -1)
color = rgb[xyz[:,0], xyz[:,1], xyz[:,2]]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz / alpha.shape * (xyz_max - xyz_min) + xyz_min)
pcd.colors = o3d.utility.Vector3dVector(color[:, :3])
voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=max((xyz_max - xyz_min) / alpha.shape))
def change_background_to_black(vis):
opt = vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
return False
import ipdb; ipdb.set_trace()
def extract_mesh(grid_data, thrd=0., binary=False, output_name="vox2mesh.obj"):
if binary:
grid_data = mcubes.smooth(grid_data)
vertices, triangles = mcubes.marching_cubes(grid_data, thrd)
mcubes.export_obj(vertices, triangles, output_name)
print("extracted mesh saved at: ", output_name)
pass
# extract_mesh(alpha, args.thres, False, "vox2mesh_{}.obj".format(args.thres))
bina_grid = (alpha > args.thres)
# extract_mesh(bina_grid, 0, True, "vox2mesh_bina.obj")
o3d.visualization.draw_geometries_with_key_callbacks([
o3d.geometry.TriangleMesh.create_coordinate_frame(size=(xyz_max-xyz_min).min()*0.1, origin=xyz_min),
out_bbox, voxel_grid, *cam_frustrm_lst,
], {ord("K"): change_background_to_black})
| 3,444 | 35.648936 | 118 | py |
Voxurf | Voxurf-main/tools/preprocess/process_video.py | from rembg.bg import remove
import numpy as np
import io
from PIL import Image
import configargparse
import os
import cv2
import mmcv
from PIL import ImageFile
import argparse
ImageFile.LOAD_TRUNCATED_IMAGES = True
def add_white_bg(input_path, masks_out_path, white_bg=False):
if not os.path.exists(masks_out_path):
os.makedirs(masks_out_path)
for name in os.listdir(input_path):
dir = os.path.join(input_path, name)
im = Image.open(dir)
if white_bg:
mask = (np.array(im).mean(-1) != 255) * 255
else:
mask = (np.array(im)[:,:,-1] > 128) * 255
cv2.imwrite(os.path.join(masks_out_path, name), mask.astype(np.uint8))
print("Done with masks saved at {}.".format(masks_out_path))
def get_frames(filename='test.mp4', output_path='./', interval=10):
print("Spliting video to frames with an interval of {} ...".format(interval))
video = mmcv.VideoReader(filename)
# obtain basic information
print(len(video))
print(video.width, video.height, video.resolution, video.fps)
img = video[0:-1:interval]
if not os.path.exists(output_path):
os.makedirs(output_path)
for i in range(len(img)):
name = os.path.join(output_path, '%05d'%i + '.jpg')
cv2.imwrite(name, img[i])
print("Done with {} frames.".format(len(img)))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='get_frames')
parser.add_argument('--source_dir', type=str, help='data source folder for preprocess')
parser.add_argument('--video_path', type=str, help='video to process')
parser.add_argument('--img_folder', type=str, default='image')
parser.add_argument('--rmbg_img_folder', type=str, default='image_rmbg')
parser.add_argument('--interval', type=int, default=10)
parser.add_argument('--white_bg', action='store_true')
opt = parser.parse_args()
root = opt.source_dir
images_ori_path = os.path.join(root, opt.img_folder)
images_out_path = os.path.join(root, opt.rmbg_img_folder)
masks_out_path = os.path.join(root, 'mask')
if opt.mode == 'get_frames':
get_frames(opt.video_path, images_ori_path, interval=opt.interval)
elif opt.mode == 'get_masks':
# remove_bg(images_ori_path, images_out_path, masks_out_path)
add_white_bg(images_out_path, masks_out_path, opt.white_bg)
else:
raise NameError
| 2,455 | 34.085714 | 91 | py |
Voxurf | Voxurf-main/tools/preprocess/convert_cameras.py | import numpy as np
# import matplotlib.image as mpimg
# import matplotlib.pyplot as plt
# import cv2
# import argparse
# from glob import glob
import torch
import os
import argparse
import glob
import imageio
def _load_colmap(basedir, convert=True, suffix=''):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds{}.npy'.format(suffix)))
poses_arr = poses_arr[:, :15].reshape([-1, 3, 5]) # N x 3 x 5
if convert:
poses = poses_arr.transpose(1,2,0)
# from llff to opencv
poses = np.concatenate([poses[:, 1:2, :],
poses[:, 0:1, :],
-poses[:, 2:3, :],
poses[:, 3:4, :],
poses[:, 4:5, :]], 1)
poses_arr = poses.transpose(2,0,1)
poses = poses_arr[:,:,:4]
hwf = poses_arr[0,:3,-1]
H, W, focal = hwf
K = np.array([
[focal, 0, 0.5*W],
[0, focal, 0.5*H],
[0, 0, 1]
])
R = poses[:, :3, :3].transpose(0, 2, 1) # (B, 3, 3)
t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy() # (B, 3, 1)
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
# w2c = np.concatenate([w2c0, bottom], 1)
# P = np.matmul(K, w2c)
Ks = np.repeat(K[None,...], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks), torch.from_numpy(w2c0)).numpy()
P = np.concatenate([P0, bottom], 1)
# from opencv to opengl
# aa = np.linalg.inv(w2c) # the same as poses
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
def blendedmvs_to_NeuS(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
print(pose_paths)
import ipdb; ipdb.set_trace()
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
R = poses[:, :3, :3].transpose(0, 2, 1) # (B, 3, 3)
t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy() # (B, 3, 1)
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
Ks = np.repeat(K[None,:3,:3], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks).float(), torch.from_numpy(w2c0).float()).numpy()
P = np.concatenate([P0, bottom], 1)
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
print("done")
def MVS_to_NeuS(basedir, cam_dir='cams'):
camera_files = os.listdir(os.path.join(basedir,cam_dir))
poses = []
K = None
for i in range(len(camera_files)):
file = "{:>08d}_cam.txt".format(i)
camera_file = os.path.join(basedir, cam_dir, file)
intrinsics, extrinsics, depth_params = read_cam_file(camera_file)
poses.append(extrinsics[None,...])
K = intrinsics
poses = np.vstack(poses)
# MVS extrinsic is world2cam already
# R = poses[:,:3,:3].transpose(0, 2, 1)
# t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy()
R = poses[:,:3,:3]
t = poses[:,:3,3:]
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
Ks = np.repeat(K[None,...], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks), torch.from_numpy(w2c0)).numpy()
P = np.concatenate([P0, bottom], 1)
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
print('done, saved at', os.path.join(basedir, 'cameras.npz'))
def TAT0_to_NeuS(basedir, cam_dir='pose'):
poses = []
K = None
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
for i, file in enumerate(pose_paths):
pose = np.loadtxt(file).astype(np.float32)
# intrinsics, extrinsics, depth_params = read_cam_file(camera_file)
poses.append(np.linalg.inv(pose)[None,...])
poses = np.vstack(poses).astype(np.float32)
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
K = np.loadtxt(path_intrinsics)[:3,:3].astype(np.float32)
# MVS extrinsic is world2cam already
# R = poses[:,:3,:3].transpose(0, 2, 1)
# t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy()
R = poses[:,:3,:3]
t = poses[:,:3,3:]
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
Ks = np.repeat(K[None,...], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks), torch.from_numpy(w2c0)).numpy()
P = np.concatenate([P0, bottom], 1)
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
print('done, saved at', os.path.join(basedir, 'cameras.npz'))
def NeuS_to_MVS(basedir):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
bds = poses_arr[:, -2:].transpose([1,0])
near, far = bds[bds[:,0] > 0, 0].min() * 0.8, bds[bds[:,1] > 0, 1].max() * 1.2
poses_arr = poses_arr[:, :15].reshape([-1, 3, 5]) # N x 3 x 5
poses = poses_arr.transpose(1,2,0)
# from llff to opencv
poses = np.concatenate([poses[:, 1:2, :],
poses[:, 0:1, :],
-poses[:, 2:3, :],
poses[:, 3:4, :],
poses[:, 4:5, :]], 1)
poses_arr = poses.transpose(2,0,1)
# camera to world
poses = poses_arr[:,:,:4]
hwf = poses_arr[0,:3,-1]
H, W, focal = hwf
K = np.array([
[focal, 0, 0.5*W],
[0, focal, 0.5*H],
[0, 0, 1]
])
R = poses[:, :3, :3].transpose(0, 2, 1) # (B, 3, 3)
t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy() # (B, 3, 1)
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
P = np.concatenate([w2c0, bottom], 1)
intrinsics, extrinsics = K, P
if not os.path.exists(os.path.join(basedir, 'cams_1')):
os.mkdir(os.path.join(basedir, 'cams_1'))
for i in range(poses.shape[0]):
file = "{:>08d}_cam.txt".format(i)
camera_file = os.path.join(basedir, 'cams_1', file)
with open(camera_file, "w") as f:
f.write("extrinsic\n")
for l in extrinsics[i]:
seq = ["{:.6f} ".format(e) for e in l] + ['\n']
f.writelines( seq )
f.write("\nintrinsic\n")
for l in intrinsics:
seq = ["{:.6f} ".format(e) for e in l] + ['\n']
f.writelines(seq)
f.write("\n{:.2f} {:.2f}\n".format(near, far))
def read_cam_file(filename):
"""Read camera intrinsics, extrinsics, and depth values (min, max) from text file
Args:
filename: cam text file path string
Returns:
Tuple with intrinsics matrix (3x3), extrinsics matrix (4x4), and depth params vector (min and max) if exists
"""
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
# depth min and max: line 11
if len(lines) >= 12:
depth_params = np.fromstring(lines[11], dtype=np.float32, sep=' ')
else:
depth_params = np.empty(0)
return intrinsics, extrinsics, depth_params
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', type=str, default='', help='data source folder for preprocess')
parser.add_argument('--mode', type=str, default='colmap', help='what kind of source format to convert')
opt = parser.parse_args()
if opt.mode == 'colmap':
_load_colmap(opt.source_dir, True)
elif opt.mode == 'mvs2neus':
MVS_to_NeuS(opt.source_dir)
elif opt.mode == 'tat02neus':
TAT0_to_NeuS(opt.source_dir)
elif opt.mode == 'neus2mvs':
NeuS_to_MVS(opt.source_dir)
elif opt.mode == 'blendedmvs2neus':
blendedmvs_to_NeuS(opt.source_dir)
else:
raise NotImplementedError
| 9,091 | 38.021459 | 116 | py |
Voxurf | Voxurf-main/tools/preprocess/preprocess_cameras.py | import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import argparse
from glob import glob
import os
def get_Ps(cameras,number_of_cameras):
Ps = []
for i in range(0, number_of_cameras):
P = cameras['world_mat_%d' % i][:3, :].astype(np.float64)
Ps.append(P)
return np.array(Ps)
def glob_imgs(path):
imgs = []
for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:
imgs.extend(glob(os.path.join(path, ext)))
return imgs
#Gets the fundamental matrix that transforms points from the image of camera 2, to a line in the image of
#camera 1
def get_fundamental_matrix(P_1,P_2):
P_2_center=np.linalg.svd(P_2)[-1][-1, :]
epipole=P_1@P_2_center
epipole_cross=np.zeros((3,3))
epipole_cross[0,1]=-epipole[2]
epipole_cross[1, 0] = epipole[2]
epipole_cross[0,2]=epipole[1]
epipole_cross[2, 0] = -epipole[1]
epipole_cross[1, 2] = -epipole[0]
epipole_cross[2, 1] = epipole[0]
F = epipole_cross@P_1 @ np.linalg.pinv(P_2)
return F
# Given a point (curx,cury) in image 0, get the maximum and minimum
# possible depth of the point, considering the second image silhouette (index j)
def get_min_max_d(curx, cury, P_j, silhouette_j, P_0, Fj0, j):
# transfer point to line using the fundamental matrix:
cur_l_1=Fj0 @ np.array([curx,cury,1.0]).astype(np.float32)
cur_l_1 = cur_l_1 / np.linalg.norm(cur_l_1[:2])
# Distances of the silhouette points from the epipolar line:
dists = np.abs(silhouette_j.T @ cur_l_1)
relevant_matching_points_1 = silhouette_j[:, dists < 0.7]
if relevant_matching_points_1.shape[1]==0:
return (0.0,0.0)
X = cv2.triangulatePoints(P_0, P_j, np.tile(np.array([curx, cury]).astype(np.float32),
(relevant_matching_points_1.shape[1], 1)).T,
relevant_matching_points_1[:2, :])
depths = P_0[2] @ (X / X[3])
reldepth=depths >= 0
depths=depths[reldepth]
if depths.shape[0] == 0:
return (0.0, 0.0)
min_depth=depths.min()
max_depth = depths.max()
return min_depth,max_depth
#get all fundamental matrices that trasform points from camera 0 to lines in Ps
def get_fundamental_matrices(P_0, Ps):
Fs=[]
for i in range(0,Ps.shape[0]):
F_i0 = get_fundamental_matrix(Ps[i],P_0)
Fs.append(F_i0)
return np.array(Fs)
def get_all_mask_points(masks_dir):
mask_paths = sorted(glob_imgs(masks_dir))
mask_points_all=[]
mask_ims = []
h0, w0 = 0, 0
for i, path in enumerate(mask_paths):
img = mpimg.imread(path)
if img.ndim > 2:
cur_mask = img.max(axis=2) > 0.5
mask_points = np.where(img.max(axis=2) > 0.5)
else:
if i == 0:
h0, w0 = img.shape
else:
if img.shape != (h0, w0):
img = cv2.resize(img, (w0, h0))
cur_mask = img > 0.5
mask_points = np.where(img > 0.5)
xs = mask_points[1]
ys = mask_points[0]
mask_points_all.append(np.stack((xs,ys,np.ones_like(xs))).astype(np.float32))
mask_ims.append(cur_mask)
return mask_points_all,np.array(mask_ims)
def refine_visual_hull(masks, Ps, scale, center):
num_cam=masks.shape[0]
GRID_SIZE=100
MINIMAL_VIEWS=45 # Fitted for DTU, might need to change for different data.
im_height=masks.shape[1]
im_width = masks.shape[2]
xx, yy, zz = np.meshgrid(np.linspace(-scale, scale, GRID_SIZE), np.linspace(-scale, scale, GRID_SIZE),
np.linspace(-scale, scale, GRID_SIZE))
points = np.stack((xx.flatten(), yy.flatten(), zz.flatten()))
points = points + center[:, np.newaxis]
appears = np.zeros((GRID_SIZE*GRID_SIZE*GRID_SIZE, 1))
for i in range(num_cam):
proji = Ps[i] @ np.concatenate((points, np.ones((1, GRID_SIZE*GRID_SIZE*GRID_SIZE))), axis=0)
depths = proji[2]
proj_pixels = np.round(proji[:2] / depths).astype(np.int64)
relevant_inds = np.logical_and(proj_pixels[0] >= 0, proj_pixels[1] < im_height)
relevant_inds = np.logical_and(relevant_inds, proj_pixels[0] < im_width)
relevant_inds = np.logical_and(relevant_inds, proj_pixels[1] >= 0)
relevant_inds = np.logical_and(relevant_inds, depths > 0)
relevant_inds = np.where(relevant_inds)[0]
cur_mask = masks[i] > 0.5
relmask = cur_mask[proj_pixels[1, relevant_inds], proj_pixels[0, relevant_inds]]
relevant_inds = relevant_inds[relmask]
appears[relevant_inds] = appears[relevant_inds] + 1
final_points = points[:, (appears >= MINIMAL_VIEWS).flatten()]
centroid=final_points.mean(axis=1)
normalize = final_points - centroid[:, np.newaxis]
import ipdb; ipdb.set_trace
return centroid,np.sqrt((normalize ** 2).sum(axis=0)).mean() * 3,final_points.T
# the normaliztion script needs a set of 2D object masks and camera projection matrices (P_i=K_i[R_i |t_i] where [R_i |t_i] is world to camera transformation)
def get_normalization_function(Ps,mask_points_all,number_of_normalization_points,number_of_cameras,masks_all):
P_0 = Ps[0]
Fs = get_fundamental_matrices(P_0, Ps)
P_0_center = np.linalg.svd(P_0)[-1][-1, :]
P_0_center = P_0_center / P_0_center[3]
# Use image 0 as a references
xs = mask_points_all[0][0, :]
ys = mask_points_all[0][1, :]
counter = 0
all_Xs = []
# sample a subset of 2D points from camera 0
# number_of_normalization_points = 5000
indss = np.random.permutation(xs.shape[0])[:number_of_normalization_points]
for i in indss:
curx = xs[i]
cury = ys[i]
# for each point, check its min/max depth in all other cameras.
# If there is an intersection of relevant depth keep the point
observerved_in_all = True
max_d_all = 1e10
min_d_all = 1e-10
for j in range(1, number_of_cameras, 5):
min_d, max_d = get_min_max_d(curx, cury, Ps[j], mask_points_all[j], P_0, Fs[j], j)
if abs(min_d) < 0.00001:
observerved_in_all = False
break
max_d_all = np.min(np.array([max_d_all, max_d]))
min_d_all = np.max(np.array([min_d_all, min_d]))
# if max_d_all < min_d_all + 1e-2:
# observerved_in_all = False
# break
if observerved_in_all:
direction = np.linalg.inv(P_0[:3, :3]) @ np.array([curx, cury, 1.0])
all_Xs.append(P_0_center[:3] + direction * min_d_all)
all_Xs.append(P_0_center[:3] + direction * max_d_all)
counter = counter + 1
print("= = = = = Number of points:%d = = = = =" % counter)
centroid = np.array(all_Xs).mean(axis=0)
# mean_norm=np.linalg.norm(np.array(allXs)-centroid,axis=1).mean()
scale = np.array(all_Xs).std()
# OPTIONAL: refine the visual hull
centroid,scale,all_Xs = refine_visual_hull(masks_all, Ps, scale, centroid)
normalization = np.eye(4).astype(np.float32)
normalization[0, 3] = centroid[0]
normalization[1, 3] = centroid[1]
normalization[2, 3] = centroid[2]
normalization[0, 0] = scale
normalization[1, 1] = scale
normalization[2, 2] = scale
return normalization,all_Xs
def get_normalization(source_dir, use_linear_init=False):
print('Preprocessing', source_dir)
if use_linear_init:
#Since there is noise in the cameras, some of them will not apear in all the cameras, so we need more points
number_of_normalization_points=1000
cameras_filename = "cameras_linear_init"
else:
number_of_normalization_points = 100
cameras_filename = "cameras"
masks_dir='{0}/mask'.format(source_dir)
cameras=np.load('{0}/{1}.npz'.format(source_dir, cameras_filename))
mask_points_all,masks_all=get_all_mask_points(masks_dir)
number_of_cameras = len(masks_all)
Ps = get_Ps(cameras, number_of_cameras)
normalization,all_Xs=get_normalization_function(Ps, mask_points_all, number_of_normalization_points, number_of_cameras,masks_all)
cameras_new={}
for i in range(number_of_cameras):
cameras_new['scale_mat_%d' % i] = normalization
cameras_new['world_mat_%d' % i] = np.concatenate((Ps[i],np.array([[0,0,0,1.0]])),axis=0).astype(np.float32)
np.savez('{0}/{1}_sphere.npz'.format(source_dir, cameras_filename), **cameras_new)
print(normalization)
print('--------------------------------------------------------')
if False: #for debugging
for i in range(number_of_cameras):
plt.figure()
plt.imshow(mpimg.imread('%s/%03d.png' % (masks_path, i)))
xy = (Ps[i,:2, :] @ (np.concatenate((np.array(all_Xs), np.ones((len(all_Xs), 1))), axis=1).T)) / (
Ps[i,2, :] @ (np.concatenate((np.array(all_Xs), np.ones((len(all_Xs), 1))), axis=1).T))
plt.plot(xy[0, :], xy[1, :], '*')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', type=str, default='', help='data source folder for preprocess')
parser.add_argument('--dtu', default=False, action="store_true", help='If set, apply preprocess to all DTU scenes.')
parser.add_argument('--root', default=None, help='If set, apply preprocess to all DTU scenes.')
parser.add_argument('--use_linear_init', default=False, action="store_true", help='If set, preprocess for linear init cameras.')
opt = parser.parse_args()
root = '/mnt/SSD/nerf_data/CO3D_data/apple'
if opt.root is not None:
files = os.listdir(root)
all_failed = []
for file in files:
path = os.path.join(root, file)
if not os.path.isdir(path) or file == '/':
continue
# print(os.path.join(root, file))
try:
get_normalization(path, False)
except:
print("failed for {}".format(path))
all_failed.append(path.split("/")[-1])
print(len(all_failed))
import ipdb; ipdb.set_trace()
if opt.dtu:
source_dir = '../data/DTU'
scene_dirs = sorted(glob(os.path.join(source_dir, "scan*")))
for scene_dir in scene_dirs:
get_normalization(scene_dir,opt.use_linear_init)
else:
get_normalization(opt.source_dir, opt.use_linear_init)
print('Done!')
| 10,497 | 37.738007 | 158 | py |
Voxurf | Voxurf-main/tools/preprocess/colmap_poses/pose_utils.py | import numpy as np
import os
import sys
import imageio
import skimage.transform
from colmap_wrapper import run_colmap
import colmap_read_model as read_model
import argparse
def save_poses(basedir, poses, pts3d, perm):
pts_arr = []
vis_arr = []
for k in pts3d:
pts_arr.append(pts3d[k].xyz)
cams = [0] * poses.shape[-1]
for ind in pts3d[k].image_ids:
if len(cams) < ind - 1:
print('ERROR: the correct camera poses for current points cannot be accessed')
return
cams[ind-1] = 1
vis_arr.append(cams)
pts_arr = np.array(pts_arr)
vis_arr = np.array(vis_arr)
print( 'Points', pts_arr.shape, 'Visibility', vis_arr.shape )
zvals = np.sum(-(pts_arr[:, np.newaxis, :].transpose([2,0,1]) - poses[:3, 3:4, :]) * poses[:3, 2:3, :], 0)
valid_z = zvals[vis_arr==1]
print( 'Depth stats', valid_z.min(), valid_z.max(), valid_z.mean() )
save_arr = []
for i in perm:
vis = vis_arr[:, i]
zs = zvals[:, i]
zs = zs[vis==1]
close_depth, inf_depth = np.percentile(zs, .5), np.percentile(zs, 99.5)
# print( i, close_depth, inf_depth )
save_arr.append(np.concatenate([poses[..., i].ravel(), np.array([close_depth, inf_depth])], 0))
save_arr = np.array(save_arr)
np.save(os.path.join(basedir, 'poses_bounds.npy'), save_arr)
def minify_v0(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
def downsample(imgs, f):
sh = list(imgs.shape)
sh = sh[:-3] + [sh[-3]//f, f, sh[-2]//f, f, sh[-1]]
imgs = np.reshape(imgs, sh)
imgs = np.mean(imgs, (-2, -4))
return imgs
imgdir = os.path.join(basedir, 'image')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgs = np.stack([imageio.imread(img)/255. for img in imgs], 0)
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
if isinstance(r, int):
imgs_down = downsample(imgs, r)
else:
imgs_down = skimage.transform.resize(imgs, [imgs.shape[0], r[0], r[1], imgs.shape[-1]],
order=1, mode='constant', cval=0, clip=True, preserve_range=False,
anti_aliasing=True, anti_aliasing_sigma=None)
os.makedirs(imgdir)
for i in range(imgs_down.shape[0]):
imageio.imwrite(os.path.join(imgdir, 'image{:03d}.png'.format(i)), (255*imgs_down[i]).astype(np.uint8))
def minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
imgdir = os.path.join(basedir, 'image')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(int(100./r))
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split('.')[-1]
args = ' '.join(['mogrify', '-resize', resizearg, '-format', 'png', '*.{}'.format(ext)])
print(args)
os.chdir(imgdir)
check_output(args, shell=True)
os.chdir(wd)
if ext != 'png':
check_output('rm {}/*.{}'.format(imgdir, ext), shell=True)
print('Removed duplicates')
print('Done')
def load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1,2,0])
bds = poses_arr[:, -2:].transpose([1,0])
img0 = [os.path.join(basedir, 'image', f) for f in sorted(os.listdir(os.path.join(basedir, 'image'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = imageio.imread(img0).shape
sfx = ''
if factor is not None:
sfx = '_{}'.format(factor)
minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, 'image' + sfx)
if not os.path.exists(imgdir):
print( imgdir, 'does not exist, returning' )
return
imgfiles = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir)) if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')]
if poses.shape[-1] != len(imgfiles):
print( 'Mismatch between imgs {} and poses {} !!!!'.format(len(imgfiles), poses.shape[-1]) )
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1./factor
if not load_imgs:
return poses, bds
# imgs = [imageio.imread(f, ignoregamma=True)[...,:3]/255. for f in imgfiles]
def imread(f):
if f.endswith('png'):
return imageio.imread(f, ignoregamma=True)
else:
return imageio.imread(f)
imgs = imgs = [imread(f)[...,:3]/255. for f in imgfiles]
imgs = np.stack(imgs, -1)
print('Loaded image data', imgs.shape, poses[:,-1,0])
return poses, bds, imgs
def load_colmap_data(realdir):
camerasfile = os.path.join(realdir, 'cameras.bin')
camdata = read_model.read_cameras_binary(camerasfile)
# cam = camdata[camdata.keys()[0]]
list_of_keys = list(camdata.keys())
cam = camdata[list_of_keys[0]]
print( 'Cameras', len(cam))
h, w, f = cam.height, cam.width, cam.params[0]
# w, h, f = factor * w, factor * h, factor * f
hwf = np.array([h,w,f]).reshape([3,1])
imagesfile = os.path.join(realdir, 'images.bin')
imdata = read_model.read_images_binary(imagesfile)
w2c_mats = []
bottom = np.array([0,0,0,1.]).reshape([1,4])
names = [imdata[k].name for k in imdata]
print( 'Images #', len(names))
perm = np.argsort(names)
for k in imdata:
im = imdata[k]
R = im.qvec2rotmat()
t = im.tvec.reshape([3,1])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
w2c_mats.append(m)
w2c_mats = np.stack(w2c_mats, 0)
c2w_mats = np.linalg.inv(w2c_mats)
poses = c2w_mats[:, :3, :4].transpose([1,2,0])
poses = np.concatenate([poses, np.tile(hwf[..., np.newaxis], [1,1,poses.shape[-1]])], 1)
points3dfile = os.path.join(realdir, 'points3D.bin')
pts3d = read_model.read_points3d_binary(points3dfile)
# must switch to [-u, r, -t] from [r, -u, t], NOT [r, u, -t]
poses = np.concatenate([poses[:, 1:2, :], poses[:, 0:1, :], -poses[:, 2:3, :], poses[:, 3:4, :], poses[:, 4:5, :]], 1)
return poses, pts3d, perm
def gen_poses(basedir, match_type, factors=None):
files_needed = ['{}.bin'.format(f) for f in ['cameras', 'images', 'points3D']]
if os.path.exists(os.path.join(basedir, 'sparse/0')):
files_had = os.listdir(os.path.join(basedir, 'sparse/0'))
realdir = os.path.join(basedir, 'sparse/0')
elif os.path.exists(os.path.join(basedir, 'sparse')):
files_had = os.listdir(os.path.join(basedir, 'sparse'))
realdir = os.path.join(basedir, 'sparse')
else:
files_had = []
realdir = os.path.join(basedir, 'sparse/0')
if not all([f in files_had for f in files_needed]):
print( 'Need to run COLMAP' )
run_colmap(basedir, match_type)
else:
print('Don\'t need to run COLMAP')
print( 'Post-colmap')
poses, pts3d, perm = load_colmap_data(realdir)
save_poses(basedir, poses, pts3d, perm)
if factors is not None:
print( 'Factors:', factors)
minify(basedir, factors)
print( 'Done with imgs2poses' )
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', type=str, default='', help='data source folder for preprocess')
opt = parser.parse_args()
gen_poses(basedir=opt.source_dir, match_type='exhaustive_matcher') | 10,078 | 33.875433 | 139 | py |
Voxurf | Voxurf-main/tools/preprocess/colmap_poses/colmap_read_model.py | # Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Johannes L. Schoenberger (jsch at inf.ethz.ch)
import os
import sys
import collections
import numpy as np
import struct
CameraModel = collections.namedtuple(
"CameraModel", ["model_id", "model_name", "num_params"])
Camera = collections.namedtuple(
"Camera", ["id", "model", "width", "height", "params"])
BaseImage = collections.namedtuple(
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
Point3D = collections.namedtuple(
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
class Image(BaseImage):
def qvec2rotmat(self):
return qvec2rotmat(self.qvec)
CAMERA_MODELS = {
CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
CameraModel(model_id=3, model_name="RADIAL", num_params=5),
CameraModel(model_id=4, model_name="OPENCV", num_params=8),
CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
CameraModel(model_id=7, model_name="FOV", num_params=5),
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
}
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) \
for camera_model in CAMERA_MODELS])
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
"""Read and unpack the next bytes from a binary file.
:param fid:
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
:param endian_character: Any of {@, =, <, >, !}
:return: Tuple of read and unpacked values.
"""
data = fid.read(num_bytes)
return struct.unpack(endian_character + format_char_sequence, data)
def read_cameras_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasText(const std::string& path)
void Reconstruction::ReadCamerasText(const std::string& path)
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
def read_cameras_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for camera_line_index in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
points3D = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
point3D_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_points3d_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
points3D = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for point_line_index in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
point3D_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D[point3D_id] = Point3D(
id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_model(path, ext):
if ext == ".txt":
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
images = read_images_text(os.path.join(path, "images" + ext))
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
else:
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
images = read_images_binary(os.path.join(path, "images" + ext))
points3D = read_points3d_binary(os.path.join(path, "points3D") + ext)
return cameras, images, points3D
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
def main(output_path=None):
if len(sys.argv) != 3:
print("Usage: python read_model.py path/to/model/folder [.txt,.bin]")
return
cameras, images, points3D = read_model(path=sys.argv[1], ext=sys.argv[2])
print("num_cameras:", len(cameras))
print("num_images:", len(images))
print("num_points3D:", len(points3D))
def check_colmap_gt(gt_dir, colmap_dir, ext, log_num=17):
from plyfile import PlyData
filename = os.path.join(gt_dir, 'stl_total.ply')
plydata = PlyData.read(filename)
num_points = 2 ** log_num
skip = len(plydata['vertex']) // num_points
idx = np.arange(len(plydata['vertex']))[::skip]
gt_points = np.vstack([[v[0],v[1],v[2]] for v in plydata['vertex'][idx]])
cameras, images, points3D = read_model(path=colmap_dir, ext=ext)
points = np.asarray([data.xyz for _, data in points3D.items()])
# have to transform them into the same space
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
check_colmap_gt(gt_dir='/home/twu/NeuS/public_data/dtu_scan32_mvs-anno',
colmap_dir='/home/twu/NeuS/public_data/dtu_scan32_colmap/sparse', ext='.bin')
exit()
main()
| 14,086 | 41.558912 | 97 | py |
Voxurf | Voxurf-main/tools/preprocess/colmap_poses/colmap_wrapper.py | import os
import subprocess
# $ DATASET_PATH=/path/to/dataset
# $ colmap feature_extractor \
# --database_path $DATASET_PATH/database.db \
# --image_path $DATASET_PATH/images
# $ colmap exhaustive_matcher \
# --database_path $DATASET_PATH/database.db
# $ mkdir $DATASET_PATH/sparse
# $ colmap mapper \
# --database_path $DATASET_PATH/database.db \
# --image_path $DATASET_PATH/images \
# --output_path $DATASET_PATH/sparse
# $ mkdir $DATASET_PATH/dense
def run_colmap(basedir, match_type):
logfile_name = os.path.join(basedir, 'colmap_output.txt')
logfile = open(logfile_name, 'w')
feature_extractor_args = [
'colmap', 'feature_extractor',
'--database_path', os.path.join(basedir, 'database.db'),
'--image_path', os.path.join(basedir, 'image'),
'--ImageReader.single_camera', '1',
# '--SiftExtraction.use_gpu', '0',
]
feat_output = ( subprocess.check_output(feature_extractor_args, universal_newlines=True) )
logfile.write(feat_output)
print('Features extracted')
exhaustive_matcher_args = [
'colmap', match_type,
'--database_path', os.path.join(basedir, 'database.db'),
]
match_output = ( subprocess.check_output(exhaustive_matcher_args, universal_newlines=True) )
logfile.write(match_output)
print('Features matched')
p = os.path.join(basedir, 'sparse')
if not os.path.exists(p):
os.makedirs(p)
# mapper_args = [
# 'colmap', 'mapper',
# '--database_path', os.path.join(basedir, 'database.db'),
# '--image_path', os.path.join(basedir, 'images'),
# '--output_path', os.path.join(basedir, 'sparse'),
# '--Mapper.num_threads', '16',
# '--Mapper.init_min_tri_angle', '4',
# ]
mapper_args = [
'colmap', 'mapper',
'--database_path', os.path.join(basedir, 'database.db'),
'--image_path', os.path.join(basedir, 'image'),
'--output_path', os.path.join(basedir, 'sparse'), # --export_path changed to --output_path in colmap 3.6
'--Mapper.num_threads', '16',
'--Mapper.init_min_tri_angle', '4',
'--Mapper.multiple_models', '0',
'--Mapper.extract_colors', '0',
]
map_output = ( subprocess.check_output(mapper_args, universal_newlines=True) )
logfile.write(map_output)
logfile.close()
print('Sparse map created')
print( 'Finished running COLMAP, see {} for logs'.format(logfile_name) )
| 2,569 | 31.531646 | 116 | py |
Voxurf | Voxurf-main/tools/preprocess/colmap_poses/__init__.py | 0 | 0 | 0 | py | |
Voxurf | Voxurf-main/configs/default_fine_s.py | import os
from copy import deepcopy
expname = None # experiment name
basedir = os.path.join('.', 'logs') # where to store ckpts and logs
''' Template of data options
'''
data = dict(
datadir=None, # path to dataset root folder
dataset_type=None, # blender | nsvf | blendedmvs | tankstemple | deepvoxels | co3d
inverse_y=False, # intrinsict mode (to support blendedmvs, nsvf, tankstemple)
flip_x=False, # to support co3d
flip_y=False, # to support co3d
annot_path='', # to support co3d
split_path='', # to support co3d
sequence_name='', # to support co3d
load2gpu_on_the_fly=False, # do not load all images into gpu (to save gpu memory)
testskip=1, # subsample testset to preview results
white_bkgd=False, # use white background (note that some dataset don't provide alpha and with blended bg color)
half_res=False, # [TODO]
factor=4, # [TODO]
# Below are forward-facing llff specific settings. Not support yet.
ndc=False, # use ndc coordinate (only for forward-facing; not support yet)
spherify=False, # inward-facing
llffhold=8, # testsplit
load_depths=False, # load depth
movie_render_kwargs=dict(),
)
''' Template of training options
'''
coarse_train = dict(
N_iters=10000, # number of optimization steps
N_rand=8192, # batch size (number of random rays per optimization step)
lrate_density=1e-1, # lr of density voxel grid
lrate_k0=1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # lr of the mlp to predict view-dependent color
lrate_decay=20, # lr decay by 0.1 after every lrate_decay*1000 steps
pervoxel_lr=True, # view-count-based lr
pervoxel_lr_downrate=1, # downsampled image for computing view-count-based lr
ray_sampler='random', # ray sampling strategies
weight_main=1.0, # weight of photometric loss
weight_entropy_last=0.01, # weight of background entropy loss
weight_rgbper=0.1, # weight of per-point rgb loss
tv_every=1, # count total variation loss every tv_every step
tv_from=0, # count total variation loss from tv_from step
tv_end=20000, # count total variation loss from tv_from step
weight_tv_density=0.0, # weight of total variation loss of density voxel grid
weight_tv_k0=0.0, # weight of total variation loss of color/feature voxel grid
pg_scale=[], # checkpoints for progressive scaling
save_iter=10000
)
fine_train = deepcopy(coarse_train)
fine_train.update(dict(
N_iters=20000,
N_rand=8192,
pervoxel_lr=False,
ray_sampler='in_maskcache',
weight_entropy_last=0.001,
weight_rgbper=0.01,
pg_scale=[1000, 2000, 3000],
# pg_scale = []
))
surf_train = deepcopy(fine_train)
surf_train.update(dict(
weight_rgbper=0.0, # weight of per-point rgb loss
lrate_sdf=2e-3, # lr of sdf voxel grid
pg_scale = [],
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=1,
grad_norm=0,
grad_tv=0
),
lrate_sdfnet=1e-3,
# weight_diffnorm=1,
))
''' Template of model and rendering options
'''
coarse_model_and_render = dict(
num_voxels=1024000, # expected number of voxel
num_voxels_base=1024000, # to rescale delta distance
nearest=False, # nearest interpolation
pre_act_density=False, # pre-activated trilinear interpolation
in_act_density=False, # in-activated trilinear interpolation
bbox_thres=1e-3, # threshold to determine known free-space in the fine stage
mask_cache_thres=1e-3, # threshold to determine a tighten BBox in the fine stage
rgbnet_dim=0, # feature voxel grid dim
rgbnet_full_implicit=False, # let the colors MLP ignore feature voxel grid
rgbnet_direct=True, # set to False to treat the first 3 dim of feature voxel grid as diffuse rgb
rgbnet_depth=3, # depth of the colors MLP (there are rgbnet_depth-1 intermediate features)
rgbnet_width=128, # width of the colors MLP
alpha_init=1e-6, # set the alpha values everywhere at the start of training
fast_color_thres=0, # threshold of alpha value to skip the fine stage sampled point
maskout_near_cam_vox=True, # maskout grid points that between cameras and their near planes
world_bound_scale=1, # rescale the BBox enclosing the scene
stepsize=0.5, # sampling stepsize in volume rendering
sdfnet_dim=0,
)
fine_model_and_render = deepcopy(coarse_model_and_render)
fine_model_and_render.update(dict(
num_voxels=160**3,
num_voxels_base=160**3,
rgbnet_dim=12,
alpha_init=1e-2,
fast_color_thres=1e-4,
maskout_near_cam_vox=False,
world_bound_scale=1.05,
))
surf_model_and_render = deepcopy(fine_model_and_render)
surf_model_and_render.update(dict(
geo_rgb_dim=3,
sdfnet_dim=12, # feature voxel grid dim
sdfnet_depth=3, # depth of the colors MLP (there are rgbnet_depth-1 intermediate features)
sdfnet_width=128, # width of the colors MLP
sdf_refine=True,
alpha_refine=True,
displace_step=0.1,
rgbnet_dim=12,
rgbnet_direct=True,
# surface_sampling=True,
# n_importance=128,
# up_sample_steps=4,
#
rgbnet_full_implicit=False,
# s_ratio=1000,
# s_start=0.5,
# stepsize=2
))
del deepcopy
| 5,897 | 40.829787 | 127 | py |
Voxurf | Voxurf-main/configs/default.py | import os
from copy import deepcopy
expname = None # experiment name
basedir = os.path.join('.', 'logs') # where to store ckpts and logs
''' Template of data options
'''
data = dict(
datadir=None, # path to dataset root folder
dataset_type=None, # blender | nsvf | blendedmvs | tankstemple | deepvoxels | co3d
inverse_y=False, # intrinsict mode (to support blendedmvs, nsvf, tankstemple)
flip_x=False, # to support co3d
flip_y=False, # to support co3d
annot_path='', # to support co3d
split_path='', # to support co3d
sequence_name='', # to support co3d
load2gpu_on_the_fly=False, # do not load all images into gpu (to save gpu memory)
testskip=1, # subsample testset to preview results
white_bkgd=False, # use white background (note that some dataset don't provide alpha and with blended bg color)
half_res=False, # [TODO]
factor=4, # [TODO]
# Below are forward-facing llff specific settings. Not support yet.
ndc=False, # use ndc coordinate (only for forward-facing; not support yet)
spherify=False, # inward-facing
llffhold=8, # testsplit
load_depths=False, # load depth
)
''' Template of training options
'''
coarse_train = dict(
N_iters=10000, # number of optimization steps
N_rand=8192, # batch size (number of random rays per optimization step)
lrate_density=1e-1, # lr of density voxel grid
lrate_k0=1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # lr of the mlp to predict view-dependent color
lrate_decay=20, # lr decay by 0.1 after every lrate_decay*1000 steps
pervoxel_lr=True, # view-count-based lr
pervoxel_lr_downrate=1, # downsampled image for computing view-count-based lr
ray_sampler='random', # ray sampling strategies
weight_main=1.0, # weight of photometric loss
weight_entropy_last=0.01, # weight of background entropy loss
weight_rgbper=0.1, # weight of per-point rgb loss
tv_every=1, # count total variation loss every tv_every step
tv_from=0, # count total variation loss from tv_from step
weight_tv_density=0.0, # weight of total variation loss of density voxel grid
weight_tv_k0=0.0, # weight of total variation loss of color/feature voxel grid
pg_scale=[], # checkpoints for progressive scaling
save_iter=10000
)
fine_train = deepcopy(coarse_train)
fine_train.update(dict(
N_iters=20000,
pervoxel_lr=False,
ray_sampler='in_maskcache',
weight_entropy_last=0.001,
weight_rgbper=0.01,
pg_scale=[1000, 2000, 3000],
))
''' Template of model and rendering options
'''
coarse_model_and_render = dict(
num_voxels=1024000, # expected number of voxel
num_voxels_base=1024000, # to rescale delta distance
nearest=False, # nearest interpolation
pre_act_density=False, # pre-activated trilinear interpolation
in_act_density=False, # in-activated trilinear interpolation
bbox_thres=1e-3, # threshold to determine known free-space in the fine stage
mask_cache_thres=1e-3, # threshold to determine a tighten BBox in the fine stage
rgbnet_dim=0, # feature voxel grid dim
rgbnet_full_implicit=False, # let the colors MLP ignore feature voxel grid
rgbnet_direct=True, # set to False to treat the first 3 dim of feature voxel grid as diffuse rgb
rgbnet_depth=3, # depth of the colors MLP (there are rgbnet_depth-1 intermediate features)
rgbnet_width=128, # width of the colors MLP
alpha_init=1e-6, # set the alpha values everywhere at the start of training
fast_color_thres=0, # threshold of alpha value to skip the fine stage sampled point
maskout_near_cam_vox=True, # maskout grid points that between cameras and their near planes
world_bound_scale=1, # rescale the BBox enclosing the scene
stepsize=0.5, # sampling stepsize in volume rendering
)
fine_model_and_render = deepcopy(coarse_model_and_render)
fine_model_and_render.update(dict(
num_voxels=160**3,
num_voxels_base=160**3,
rgbnet_dim=12,
alpha_init=1e-2,
fast_color_thres=1e-4,
maskout_near_cam_vox=False,
world_bound_scale=1.05,
))
del deepcopy
| 4,745 | 46.939394 | 127 | py |
Voxurf | Voxurf-main/configs/dtu_e2e_womask/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'dtu')
train_all = True
reso_level = 2
exp_stage = 'coarse'
use_sp_color = False
white_list = [24, 40, 110]
black_list = [37, 55, 63, 65, 69, 83, 97, 105, 106, 114, 118, 122]
data = dict(
datadir=os.path.join('.', 'data', 'DTU', 'dtu_scan'),
dataset_type='dtu',
inverse_y=True,
white_bkgd= False,
mode=dict(
train_all=False,
wmask=False,
),
)
surf_train=dict(
load_density_from='',
box_size=1.,
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_entropy_last=0.0,
weight_tv_density=0.01,
ori_tv=True,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_dense_before=20000,
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=0.1,
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_bg_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_bg_rgbnet=1e-3,
lrate_bg_density=0.1,
lrate_density=0,
ray_sampler='random',
weight_nearclip=0.,
weight_distortion=0.,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
num_voxels_bg=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=3,
geo_rgb_dim=3,
combine=False,
use_bound_mask=False,
bg_fast_color_thres=1e-4,
fast_color_thres=1e-4,
smooth_ksize=5,
smooth_sigma=0.8,
sdf_thr=0.5,
tv_in_sphere=False,
use_cosine_sdf=True,
cosine_sdf_mini_ratio=0.1,
smooth_scale=True,
s_ratio=200,
s_start=0.5,
bg_rgbnet_dim=0,
)
| 1,955 | 20.977528 | 126 | py |
Voxurf | Voxurf-main/configs/dtu_e2e_womask/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'dtu')
train_all = False
reso_level = 1
exp_stage = 'fine'
use_sp_color = False
white_list = [24, 40, 110]
black_list = [37, 55, 63, 65, 69, 83, 97, 105, 106, 114, 118, 122]
data = dict(
datadir=os.path.join('.', 'data', 'DTU', 'dtu_scan'),
dataset_type='dtu',
inverse_y=True,
white_bkgd=False,
mode=dict(
train_all=False,
wmask=False,
),
)
surf_train=dict(
load_density_from=None,
load_sdf_from = 'auto',
load_bg_density=True,
ori_tv=False,
box_size=1.,
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_end=30000,
tv_dense_before=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
bg_density_tv=0.01,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
15000:dict(bg_density=0.1)
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
lrate_bg_rgbnet=1e-3,
lrate_bg_density=5e-3,
lrate_bg_k0=1e-1,
ray_sampler='random',
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
num_voxels_bg=160**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default is True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
octave_feat=(),
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5, # whole ray
use_layer_norm=True,
s_ratio=50,
s_start=0.05,
)
| 2,892 | 26.292453 | 140 | py |
Voxurf | Voxurf-main/configs/dtu_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'dtu')
train_all = True
reso_level = 2
exp_stage = 'coarse'
use_sp_color = True
white_list = [24, 40, 110]
black_list = [37, 55, 63, 65, 69, 83, 97, 105, 106, 114, 118, 122]
data = dict(
datadir=os.path.join('.', 'data', 'DTU', 'dtu_scan'),
dataset_type='dtu',
inverse_y=True,
white_bkgd= False
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,671 | 20.714286 | 126 | py |
Voxurf | Voxurf-main/configs/dtu_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'dtu')
train_all = False
reso_level = 1
exp_stage = 'fine'
use_sp_color = True
white_list = [24, 40, 110]
black_list = [37, 55, 63, 65, 69, 83, 97, 105, 106, 114, 118, 122]
data = dict(
datadir=os.path.join('.', 'data', 'DTU', 'dtu_scan'),
dataset_type='dtu',
inverse_y=True,
white_bkgd=False,
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default = True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5,
s_ratio=50,
s_start=0.05,
)
| 2,507 | 26.866667 | 140 | py |
Voxurf | Voxurf-main/configs/deepvoxels_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'deepvoxels')
train_all = True
reso_level = 2
exp_stage = 'coarse'
data = dict(
datadir=os.path.join('.', 'data', 'deepvoxels'),
dataset_type='deepvoxels',
# inverse_y=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,566 | 20.465753 | 126 | py |
Voxurf | Voxurf-main/configs/deepvoxels_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'deepvoxels')
train_all = False
reso_level = 1
exp_stage = 'fine'
data = dict(
datadir=os.path.join('.', 'data', 'deepvoxels'),
dataset_type='deepvoxels',
# inverse_y=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto', # './logs/dtu/scan40_smooth_sdf/surf_001000.tar',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default is True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5, # whole ray
s_ratio=50,
s_start=0.05,
)
| 2,473 | 27.436782 | 140 | py |
Voxurf | Voxurf-main/configs/blendedmvs_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'blended_mvs')
train_all = True
reso_level = 2
exp_stage = 'coarse'
data = dict(
datadir=os.path.join('.', 'data', 'BlendedMVS'),
dataset_type='blendedmvs',
inverse_y=True,
white_bkgd=True, # need to manually adjust the background color for BlendedMVS
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,627 | 21.30137 | 126 | py |
Voxurf | Voxurf-main/configs/blendedmvs_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'blended_mvs')
train_all = False
reso_level = 1
exp_stage = 'fine'
data = dict(
datadir=os.path.join('.', 'data', 'BlendedMVS'),
dataset_type='blendedmvs',
inverse_y=True,
white_bkgd=True, # need to manually adjust the background color for BlendedMVS
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto', # './logs/dtu/scan40_smooth_sdf/surf_001000.tar',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
# CUDA_VISIBLE_DEVICES=3 python run_surf_new.py --config configs/dtu_test/head_test.py --render_test -s head_test_geo19 --no_reload --sdf_mode neus_v4 --scene 122
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default is True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5, # whole ray
s_ratio=50,
s_start=0.05,
)
| 2,697 | 29.659091 | 162 | py |
Voxurf | Voxurf-main/configs/tanks_and_temple_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'tanks_and_temple')
train_all = True
reso_level = 2
exp_stage = 'coarse'
data = dict(
datadir=os.path.join('.', 'data', 'TanksAndTemple'),
dataset_type='tankstemple',
inverse_y=True,
load2gpu_on_the_fly=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,605 | 20.702703 | 126 | py |
Voxurf | Voxurf-main/configs/tanks_and_temple_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'tanks_and_temple')
train_all = False
reso_level = 1
exp_stage = 'fine'
data = dict(
datadir=os.path.join('.', 'data', 'TanksAndTemple'),
dataset_type='tankstemple',
inverse_y=True,
load2gpu_on_the_fly=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default = True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5,
s_ratio=50,
s_start=0.05,
)
| 2,442 | 26.761364 | 140 | py |
Voxurf | Voxurf-main/configs/mobilebrick_e2e_womask/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = ''
basedir = os.path.join('.', 'logs', 'mobile_brick')
train_all = True
reso_level = 2
exp_stage = 'coarse'
use_sp_color = False
data = dict(
datadir=os.path.join('.', 'data', 'mobile_brick', 'test'),
dataset_type='mobile_brick',
inverse_y=True,
white_bkgd= False,
mode=dict(
train_all=False,
wmask=False,
),
)
surf_train=dict(
load_density_from='',
box_size=1.,
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_entropy_last=0.0,
weight_tv_density=0.01,
ori_tv=True,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_dense_before=20000,
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=0.1,
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_bg_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_bg_rgbnet=1e-3,
lrate_bg_density=0.1,
lrate_density=0,
ray_sampler='random',
weight_nearclip=0.,
weight_distortion=0.,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
num_voxels_bg=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=3,
geo_rgb_dim=3,
combine=False,
use_bound_mask=False,
bg_fast_color_thres=1e-4,
fast_color_thres=1e-4,
smooth_ksize=5,
smooth_sigma=0.8,
sdf_thr=0.5,
tv_in_sphere=False,
use_cosine_sdf=True,
cosine_sdf_mini_ratio=0.1,
smooth_scale=True,
s_ratio=200,
s_start=0.5,
bg_rgbnet_dim=0,
)
| 1,880 | 20.62069 | 126 | py |
Voxurf | Voxurf-main/configs/mobilebrick_e2e_womask/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = ''
basedir = os.path.join('.', 'logs', 'mobile_brick')
train_all = False
reso_level = 1
exp_stage = 'fine'
use_sp_color = False
data = dict(
datadir=os.path.join('.', 'data', 'mobile_brick', 'test'),
dataset_type='mobile_brick',
inverse_y=True,
white_bkgd= False,
mode=dict(
train_all=False,
wmask=False,
),
)
surf_train=dict(
load_density_from=None,
load_sdf_from = 'auto',
load_bg_density=True,
ori_tv=False,
box_size=1.,
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_end=30000,
tv_dense_before=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
bg_density_tv=0.01,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
15000:dict(bg_density=0.1)
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
lrate_bg_rgbnet=1e-3,
lrate_bg_density=5e-3,
lrate_bg_k0=1e-1,
ray_sampler='random',
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
num_voxels_bg=160**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default is True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
octave_feat=(),
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5, # whole ray
use_layer_norm=True,
s_ratio=50,
s_start=0.05,
)
| 2,818 | 26.105769 | 140 | py |
Voxurf | Voxurf-main/configs/custom_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'custom')
train_all = True
reso_level = 2
exp_stage = 'coarse'
data = dict(
datadir=os.path.join('.', 'data'),
dataset_type='dtu',
inverse_y=True,
white_bkgd= False
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,540 | 20.109589 | 126 | py |
Voxurf | Voxurf-main/configs/custom_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'custom')
train_all = False
reso_level = 1
exp_stage = 'fine'
data = dict(
datadir=os.path.join('.', 'data'),
dataset_type='dtu',
inverse_y=True,
white_bkgd=False,
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default = True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5,
s_ratio=50,
s_start=0.05,
)
| 2,377 | 26.333333 | 140 | py |
Voxurf | Voxurf-main/configs/nvsf_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'nsvf')
train_all = True
reso_level = 2
exp_stage = 'coarse'
data = dict(
datadir=os.path.join('.', 'data', 'Synthetic_NSVF'),
dataset_type='nsvf',
inverse_y=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,557 | 20.054054 | 126 | py |
Voxurf | Voxurf-main/configs/nvsf_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'nsvf')
train_all = False
reso_level = 1
exp_stage = 'fine'
data = dict(
datadir=os.path.join('.', 'data', 'Synthetic_NSVF'),
dataset_type='nsvf',
inverse_y=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default = True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5,
s_ratio=50,
s_start=0.05,
)
| 2,394 | 26.215909 | 140 | py |
Voxurf | Voxurf-main/configs/nerf_synthetic_e2e/coarse.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'nerf_synthetic')
train_all = True
reso_level = 2
exp_stage = 'coarse'
data = dict(
datadir=os.path.join('.', 'data', 'nerf_synthetic'),
dataset_type='blender',
# inverse_y=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from='',
pg_filter=[1000,],
tv_add_grad_new=True,
ori_tv=True,
weight_main=1, # this is for rgb_add
N_iters=10000,
lrate_decay=20,
weight_tv_k0=0.01,
weight_tv_density=0.001,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
smooth_grad_tv=0.05,
),
tv_updates={
1000:dict(
sdf_tv=0.1,
# grad_tv=10,
smooth_grad_tv=0.2
),
},
tv_dense_before=20000,
lrate_sdf=0.1,
decay_step_module={
1000:dict(sdf=0.1),
5000:dict(sdf=0.5),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_rgb_addnet=1e-3, # 1e-3,
)
surf_model_and_render=dict(
num_voxels=96**3,
num_voxels_base=96**3,
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
posbase_pe=5,
viewbase_pe=1,
add_posbase_pe=5,
add_viewbase_pe=4,
rgb_add_res=True,
rgbnet_depth=3,
geo_rgb_dim=3,
smooth_ksize=5,
smooth_sigma=0.8,
s_ratio=50,
s_start=0.2,
)
| 1,571 | 20.534247 | 126 | py |
Voxurf | Voxurf-main/configs/nerf_synthetic_e2e/fine.py | import os
_base_ = os.path.join('..', 'default_fine_s.py')
expname = 'scan'
basedir = os.path.join('.', 'logs', 'nerf_synthetic')
train_all = False
reso_level = 1
exp_stage = 'fine'
data = dict(
datadir=os.path.join('.', 'data', 'nerf_synthetic'),
dataset_type='blender',
# inverse_y=True,
white_bkgd=True,
)
surf_train=dict(
load_density_from=None,
load_sdf_from='auto', # './logs/dtu/scan40_smooth_sdf/surf_001000.tar',
pg_scale=[15000],
scale_ratio=4.096,
weight_rgb0=0.5, # this is for the first rgbnet
weight_main=1, # this is for k_rgbnet, which is the final output
sdf_reduce=0.3,
N_iters=20000,
lrate_decay=20,
# eval_iters=[100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 17000, 18000, 19000, 20000, 25000, 30000, 35000],
tv_dense_before=20000,
tv_end=30000,
tv_every=3,
weight_tv_density=0.01,
tv_terms=dict(
sdf_tv=0.1,
grad_tv=0,
grad_norm=0,
smooth_grad_tv=0.05,
),
cosine_lr=True,
cosine_lr_cfg=dict(
warm_up_iters=0,
const_warm_up=True,
warm_up_min_ratio=1.0),
lrate_sdf=5e-3,
decay_step_module={
15000:dict(sdf=0.1),
},
lrate_k0=1e-1, #1e-1, # lr of color/feature voxel grid
lrate_rgbnet=1e-3 * 3, # 1e-3, # lr of the mlp to predict view-dependent color
lrate_k_rgbnet=1e-3,
)
# CUDA_VISIBLE_DEVICES=3 python run_surf_new.py --config configs/dtu_test/head_test.py --render_test -s head_test_geo19 --no_reload --sdf_mode neus_v4 --scene 122
surf_model_and_render=dict(
num_voxels=256**3,
num_voxels_base=256**3,
posbase_pe=5,
viewbase_pe=1,
k_posbase_pe=5, # default = 5
k_viewbase_pe=1, # default = 4
k_res=True, # default is True
rgbnet_full_implicit=False, # by using a full mlp without local feature for rgb, the info for the geometry would be better
rgbnet_depth=4,
k_rgbnet_depth=4, # deeper is better
k_grad_feat=(1.0,), # default = 0 | or set as 3 to feed in the normal itself | or set as geo_rgb_dim to feed in the hierarchical normal
k_sdf_feat=(), # default = 0 | or we could set it as feat_rgb_dim so that it takes in the feature
rgbnet_dim=6, # larger is better
rgbnet_width=192,
center_sdf=True,
k_center_sdf=False,
grad_feat=(0.5, 1.0, 1.5, 2.0,),
sdf_feat=(0.5, 1.0, 1.5, 2.0,),
octave_use_corner=False,
use_grad_norm=True,
use_mlp_residual=False,
surface_sampling=False,
use_trimap=False,
n_importance=64,
up_sample_steps=1,
stepsize=0.5, # whole ray
s_ratio=50,
s_start=0.05,
)
| 2,641 | 29.022727 | 162 | py |
Voxurf | Voxurf-main/lib/load_dtu.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_dtu_data(basedir, normalize=True, reso_level=2, mask=True, white_bg=True):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*png')))
if len(rgb_paths) == 0:
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*jpg')))
if len(rgb_paths) == 0:
rgb_paths = sorted(glob(os.path.join(basedir, 'rgb', '*png')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
if len(mask_paths) == 0:
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*jpg')))
render_cameras_name = 'cameras_sphere.npz' if normalize else 'cameras_large.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normalize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normalize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
all_intrinsics.append(intrinsics)
all_poses.append(pose)
if len(mask_paths) > 0:
mask_ = (imageio.imread(mask_paths[i]) / 255.).astype(np.float32)
if mask_.ndim == 3:
all_masks.append(mask_[...,:3])
else:
all_masks.append(mask_[...,None])
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0]
print("Date original shape: ", H, W)
masks = np.stack(all_masks, 0)
if mask:
assert len(mask_paths) > 0
bg = 1. if white_bg else 0.
imgs = imgs * masks + bg * (1 - masks)
if reso_level > 1:
H, W = int(H / reso_level), int(W / reso_level)
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
if masks is not None:
masks = F.interpolate(torch.from_numpy(masks).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K[:2] /= reso_level
focal /= reso_level
# this is to randomly fetch images.
i_test = [8, 13, 16, 21, 26, 31, 34]
if len(imgs) * 0.1 >= 8:
print("add 56 to test set")
i_test.append(56)
i_test = [i for i in i_test if i < len(imgs)]
i_val = i_test
i_train = list(set(np.arange(len(imgs))) - set(i_test))
i_split = [np.array(i_train), np.array(i_val), np.array(i_test)]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split, scale_mats_np[0], masks
class Dataset:
def __init__(self, conf):
super(Dataset, self).__init__()
print('Load data: Begin')
self.device = torch.device('cuda')
self.conf = conf
self.data_dir = conf.get_string('data_dir')
self.render_cameras_name = conf.get_string('render_cameras_name')
self.object_cameras_name = conf.get_string('object_cameras_name')
self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)
self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)
camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))
self.camera_dict = camera_dict
self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))
self.n_images = len(self.images_lis)
self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0
self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))
self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0
# world_mat is a projection matrix from world to image
self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.scale_mats_np = []
# scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.
self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.intrinsics_all = []
self.pose_all = []
for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
self.intrinsics_all.append(torch.from_numpy(intrinsics).float())
self.pose_all.append(torch.from_numpy(pose).float())
self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]
self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]
self.focal = self.intrinsics_all[0][0, 0]
self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]
self.H, self.W = self.images.shape[1], self.images.shape[2]
self.image_pixels = self.H * self.W
object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])
# Object scale mat: region of interest to **extract mesh**
object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']
object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]
object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]
self.object_bbox_min = object_bbox_min[:3, 0]
self.object_bbox_max = object_bbox_max[:3, 0]
print('Load data: End')
def near_far_from_sphere(self, rays_o, rays_d):
a = torch.sum(rays_d**2, dim=-1, keepdim=True)
b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
mid = 0.5 * (-b) / a
near = mid - 1.0
far = mid + 1.0
return near, far
def image_at(self, idx, resolution_level):
img = cv.imread(self.images_lis[idx])
return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255) | 7,606 | 41.497207 | 120 | py |
Voxurf | Voxurf-main/lib/dvgo_ori.py | import os
import time
import functools
import numpy as np
import cv2
import mcubes
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
'''Model'''
class DirectVoxGO(torch.nn.Module):
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
nearest=False, pre_act_density=False, in_act_density=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4,
**kwargs):
super(DirectVoxGO, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.nearest = nearest
self.pre_act_density = pre_act_density
self.in_act_density = in_act_density
if self.pre_act_density:
print('dvgo: using pre_act_density may results in worse quality !!')
if self.in_act_density:
print('dvgo: using in_act_density may results in worse quality !!')
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
# init density voxel grid
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.rgbnet_full_implicit = rgbnet_full_implicit
if rgbnet_dim <= 0:
# color voxel grid (coarse stage)
self.k0_dim = 3
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
self.rgbnet = None
else:
# feature voxel grid + shallow MLP (fine stage)
if self.rgbnet_full_implicit:
self.k0_dim = 0
else:
self.k0_dim = rgbnet_dim
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
dim0 = (3+3*posbase_pe*2) + (3+3*viewbase_pe*2)
if self.rgbnet_full_implicit:
pass
elif rgbnet_direct:
dim0 += self.k0_dim
else:
dim0 += self.k0_dim-3
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('dvgo: feature voxel grid', self.k0.shape)
print('dvgo: mlp', self.rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
def inside_sphere(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
sphere_mask = (torch.linalg.norm(self_grid_xyz, ord=2, dim=-1, keepdim=True) < 1.0).reshape(*self.density.shape)
self.density[~sphere_mask] = -100
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dvgo: voxel_size ', self.voxel_size)
print('dvgo: world_size ', self.world_size)
print('dvgo: voxel_size_base ', self.voxel_size_base)
print('dvgo: voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'pre_act_density': self.pre_act_density,
'in_act_density': self.in_act_density,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest,
'pre_act_density': self.pre_act_density,
'in_act_density': self.in_act_density,
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
self.density[~self.nonempty_mask] = -100
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
self.density[nearest_dist[None,None] <= near] = -100
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dvgo: scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
self.density = torch.nn.Parameter(
F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
if self.k0_dim > 0:
self.k0 = torch.nn.Parameter(
F.interpolate(self.k0.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
else:
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
if self.mask_cache is not None:
self._set_nonempty_mask()
print('dvgo: scale_volume_grid finish')
def voxel_count_views(self, rays_o_tr, rays_d_tr, imsz, near, far, stepsize, downrate=1, irregular_shape=False):
print('dvgo: voxel_count_views start')
eps_time = time.time()
N_samples = int(np.linalg.norm(np.array(self.density.shape[2:])+1) / stepsize) + 1
rng = torch.arange(N_samples)[None].float()
count = torch.zeros_like(self.density.detach())
device = rng.device
for rays_o_, rays_d_ in zip(rays_o_tr.split(imsz), rays_d_tr.split(imsz)):
ones = torch.ones_like(self.density).requires_grad_()
if irregular_shape:
rays_o_ = rays_o_.split(10000)
rays_d_ = rays_d_.split(10000)
else:
rays_o_ = rays_o_[::downrate, ::downrate].to(device).flatten(0,-2).split(10000)
rays_d_ = rays_d_[::downrate, ::downrate].to(device).flatten(0,-2).split(10000)
for rays_o, rays_d in zip(rays_o_, rays_d_):
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
self.grid_sampler(rays_pts, ones).sum().backward()
with torch.no_grad():
count += (ones.grad > 1)
eps_time = time.time() - eps_time
print('dvgo: voxel_count_views finish (eps time:', eps_time, 'sec)')
return count
def density_total_variation(self):
tv = total_variation(self.activate_density(self.density, 1), self.nonempty_mask)
return tv
def k0_total_variation(self):
if self.rgbnet is not None:
v = self.k0
else:
v = torch.sigmoid(self.k0)
return total_variation(v, self.nonempty_mask)
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(-F.softplus(density + self.act_shift) * interval)
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
ret_lst = [
# TODO: use `rearrange' to make it readable
F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners).reshape(grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze()
for grid in grids
]
if len(ret_lst) == 1:
return ret_lst[0]
return ret_lst
def sample_ray(self, rays_o, rays_d, near, far, stepsize, is_train=False, near_far=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
N_samples = int(np.linalg.norm(np.array(self.density.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
# import ipdb; ipdb.set_trace()
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
# import ipdb; ipdb.set_trace()
return rays_pts, mask_outbbox
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
ret_dict = {}
# sample points on rays
rays_pts, mask_outbbox = self.sample_ray(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
interval = render_kwargs['stepsize'] * self.voxel_size_ratio
# update mask for query points in known free space
if self.mask_cache is not None:
mask_outbbox[~mask_outbbox] |= (~self.mask_cache(rays_pts[~mask_outbbox]))
# query for alpha
alpha = torch.zeros_like(rays_pts[...,0])
vis_density = torch.zeros_like(rays_pts[...,0])
if self.pre_act_density:
# pre-activation
density = None
alpha[~mask_outbbox] = self.grid_sampler(
rays_pts[~mask_outbbox], self.activate_density(self.density, interval))
elif self.in_act_density:
# in-activation
density = self.grid_sampler(rays_pts[~mask_outbbox], F.softplus(self.density + self.act_shift))
alpha[~mask_outbbox] = 1 - torch.exp(-density * interval)
else:
# post-activation
density = self.grid_sampler(rays_pts[~mask_outbbox], self.density)
alpha[~mask_outbbox] = self.activate_density(density, interval)
# compute accumulated transmittance
weights, alphainv_cum = get_ray_marching_ray(alpha)
# import ipdb; ipdb.set_trace()
vis_density[~mask_outbbox] = density
# if global_step is not None:
# if global_step % 100 == 0:
# self.visualize_weight(vis_density, alpha, weights, step=global_step)
# query for color
mask = (weights > self.fast_color_thres)
k0 = torch.zeros(*weights.shape, self.k0_dim).to(weights)
if not self.rgbnet_full_implicit:
k0[mask] = self.grid_sampler(rays_pts[mask], self.k0)
if self.rgbnet is None:
# no view-depend effect
rgb = torch.sigmoid(k0)
else:
# view-dependent color emission
if self.rgbnet_direct:
k0_view = k0
else:
k0_view = k0[..., 3:]
k0_diffuse = k0[..., :3]
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat([viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rays_xyz = (rays_pts[mask] - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat = torch.cat([
k0_view[mask],
xyz_emb,
# TODO: use `rearrange' to make it readable
viewdirs_emb.flatten(0,-2).unsqueeze(-2).repeat(1,weights.shape[-1],1)[mask.flatten(0,-2)]
], -1)
rgb_logit = torch.zeros(*weights.shape, 3).to(weights)
rgb_logit[mask] = self.rgbnet(rgb_feat)
if self.rgbnet_direct:
rgb = torch.sigmoid(rgb_logit)
else:
rgb_logit[mask] = rgb_logit[mask] + k0_diffuse[mask]
rgb = torch.sigmoid(rgb_logit)
# Ray marching
rgb_marched = (weights[...,None] * rgb).sum(-2) + alphainv_cum[...,[-1]] * render_kwargs['bg']
rgb_marched = rgb_marched.clamp(0, 1)
depth = (rays_o[...,None,:] - rays_pts).norm(dim=-1)
depth = (weights * depth).sum(-1) + alphainv_cum[...,-1] * render_kwargs['far']
disp = 1 / depth
ret_dict.update({
'alphainv_cum': alphainv_cum,
'weights': weights,
'rgb_marched': rgb_marched,
'raw_alpha': alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
})
return ret_dict
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, mode="density"):
if mode == "density":
query_func = lambda pts: self.activate_density(self.grid_sampler(pts, self.density))
threshold = 0.001
elif mode == "neus":
query_func = lambda pts: self.grid_sampler(pts, - self.sdf)
threshold = 0.0
else:
raise NameError
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def visualize_density_sdf(self, root='', iter=0, idxs=None):
if idxs is None:
if self.density.shape[2] < 100:
idxs = [self.density.shape[2] // 2]
else:
idxs = [60]
os.makedirs(os.path.join(root, "debug_figs"), exist_ok=True)
for i in idxs:
# density_img = self.density[0,0,i].cpu().detach().numpy()
# density_img = density_img - density_img.min()
# density_img = (density_img / density_img.max()) * 255
# cv2.imwrite(os.path.join(root, "debug_figs/density_{}_{}.png".format(iter, i)), density_img)
alpha = 1 - torch.exp(-F.softplus(self.density + self.act_shift)).cpu().detach().numpy()
# alpha_img = (alpha > 0.001) * 255
alpha_img = alpha[0,0,i] * 255
cv2.imwrite(os.path.join(root, "debug_figs/density_alpha_{}_{}.png".format(iter, i)), alpha_img)
# sdf_img = self.sdf[0,0,i].cpu().detach().numpy()
# sdf_img = (sdf_img + 1 / 2).clip(0,1) * 255
# cv2.imwrite(os.path.join(root, "debug_figs/sdf_{}_{}.png".format(iter, i)), sdf_img)
# print("{:.7f}, {:.7f}, {:.7f}".format(self.sdf[0,0,i].mean(), self.sdf.mean(), self.grad_conv.weight.data.mean()))
def visualize_weight(self, density, alpha, weight, root='', step=0, thrd=0.001):
idxs = weight.sum(-1).sort()[-1][-100:]
idxs = [idxs[i] for i in [0, 20, 40, 60, 80]]
density[density<-5] = -5
plt.figure(figsize=(20,4))
for n, i in enumerate(idxs):
# vis = (weight[i] > thrd).cpu().numpy()
vis = np.arange(weight.shape[1])
# import ipdb; ipdb.set_trace()
ax1 = plt.subplot(1, 5, n+1)
ax1.plot(density.detach().cpu().numpy()[i][vis], label='density')
ax2 = ax1.twinx()
ax2.plot(alpha.detach().cpu().numpy()[i][vis], color='green', label='alpha')
ax2.plot(weight.detach().cpu().numpy()[i][vis], color='red', label='weight')
plt.legend()
plt.savefig(os.path.join(root, "debug_figs/weight_{}.png".format(step)))
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
self.pre_act_density = st['MaskCache_kwargs'].get('pre_act_density', False)
self.in_act_density = st['MaskCache_kwargs'].get('in_act_density', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
elif self.pre_act_density:
alpha = 1 - torch.exp(-F.softplus(self.density + self.act_shift) * self.voxel_size_ratio)
alpha = F.grid_sample(self.density, ind_norm, align_corners=True)
elif self.in_act_density:
density = F.grid_sample(F.softplus(self.density + self.act_shift), ind_norm, align_corners=True)
alpha = 1 - torch.exp(-density * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def cumprod_exclusive(p):
# Not sure why: it will be slow at the end of training if clamping at 1e-10 is not applied
return torch.cat([torch.ones_like(p[...,[0]]), p.clamp_min(1e-10).cumprod(-1)], -1)
def get_ray_marching_ray(alpha):
alphainv_cum = cumprod_exclusive(1-alpha)
weights = alpha * alphainv_cum[..., :-1]
return weights, alphainv_cum
def total_variation(v, mask=None):
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,3], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
rays_pts, mask_outbbox = model.sample_ray(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs)
mask_outbbox[~mask_outbbox] |= (~model.mask_cache(rays_pts[~mask_outbbox]))
mask[i:i+CHUNK] &= (~mask_outbbox).any(-1).to(DEVICE)
# import ipdb; ipdb.set_trace()
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
def extract_fields(bound_min, bound_max, resolution, query_func, N = 64):
X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs)
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val
return u
def extract_geometry(bound_min, bound_max, resolution, threshold, query_func, N = 64):
print('threshold: {}'.format(threshold))
u = extract_fields(bound_min, bound_max, resolution, query_func, N)
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices, triangles
| 32,546 | 45.231534 | 149 | py |
Voxurf | Voxurf-main/lib/load_nsvf.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
def load_nsvf_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], [], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
with open(os.path.join(basedir, 'intrinsics.txt')) as f:
focal = float(f.readline().split()[0])
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)
return imgs, poses, render_poses, [H, W, focal], i_split
| 1,712 | 26.629032 | 115 | py |
Voxurf | Voxurf-main/lib/voxurf_womask_coarse.py | import os
import time
import functools
import numpy as np
import cv2
import math
import random
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
import copy
# import MinkowskiEngine as Me
from . import grid
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
ub360_utils_cuda = load(
name='ub360_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/ub360_utils.cpp', 'cuda/ub360_utils_kernel.cu']],
verbose=True)
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/render_utils.cpp', 'cuda/render_utils_kernel.cu']],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_bg=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0, bg_fast_color_thres=0,
rgbnet_dim=0, bg_rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4, bg_posbase_pe=5, bg_viewbase_pe=4,
geo_rgb_dim=3,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_ksize=0, smooth_sigma=1, smooth_scale=False,
bg_rgbnet_width=128, bg_rgbnet_depth=4,
sdf_thr=1.0, tv_in_sphere=True,
init_ball_scale=0.5, use_layer_norm=False, bg_use_layer_norm=False, set_sphere_freq=10000,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.bg_fast_color_thres = bg_fast_color_thres
self.nearest = nearest
self.set_sphere_freq = set_sphere_freq
self.sdf_thr = sdf_thr
self.tv_in_sphere = tv_in_sphere
self.init_ball_scale = init_ball_scale
self.use_layer_norm = use_layer_norm
self.bg_use_layer_norm = bg_use_layer_norm
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels, num_voxels_bg)
# init density voxel grid
# self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
self.bg_density = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
if self.sdf_init_mode == "ball_init":
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
x_min, y_min, z_min = self.xyz_min.cpu().numpy()
x_max, y_max, z_max = self.xyz_max.cpu().numpy()
x, y, z = np.mgrid[x_min:x_max:self.world_size[0].item() * 1j, y_min:y_max:self.world_size[1].item() * 1j, z_min:z_max:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 - self.init_ball_scale).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
self.smooth_scale = smooth_scale
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.rgbnet_full_implicit = rgbnet_full_implicit
if self.rgbnet_full_implicit:
self.k0_dim = 0
else:
self.k0_dim = rgbnet_dim
self.bg_k0_dim = bg_rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.bg_k0 = grid.create_grid(
'DenseGrid', channels=self.bg_k0_dim, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(bg_posbase_pe)]))
self.register_buffer('bg_posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
self.register_buffer('bg_viewfreq', torch.FloatTensor([(2**i) for i in range(bg_viewbase_pe)]))
self.use_xyz = posbase_pe >= 0
self.bg_use_xyz = bg_posbase_pe >= 0
self.use_view = viewbase_pe >= 0
self.bg_use_view = bg_viewbase_pe >= 0
dim0 = 0
if self.use_xyz:
dim0 += (3 + 3 * posbase_pe * 2)
if self.use_view >= 0:
dim0 += (3 + 3 * viewbase_pe * 2)
if self.rgbnet_full_implicit:
pass
elif rgbnet_direct:
dim0 += self.k0_dim
else:
dim0 += self.k0_dim-3
self.geo_rgb_dim = geo_rgb_dim
if self.geo_rgb_dim:
dim0 += self.geo_rgb_dim
if not self.use_layer_norm:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
dim0 = 0
if self.bg_use_xyz:
dim0 += (3 + 3 * bg_posbase_pe * 2)
if self.bg_use_view:
dim0 += (3 + 3 * bg_viewbase_pe * 2)
if self.rgbnet_full_implicit:
pass
elif rgbnet_direct:
dim0 += self.bg_k0_dim
else:
dim0 += self.bg_k0_dim-3
if not self.bg_use_layer_norm:
self.bg_rgbnet = nn.Sequential(
nn.Linear(dim0, bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
else:
self.bg_rgbnet = nn.Sequential(
nn.Linear(dim0, bg_rgbnet_width), nn.LayerNorm(bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width), nn.LayerNorm(bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
nn.init.constant_(self.bg_rgbnet[-1].bias, 0)
if self.bg_k0 is not None:
print('background feature voxel grid', self.bg_k0.grid.shape)
print('background mlp', self.bg_rgbnet)
# do not use mask cache
self.mask_cache_path = None
self.mask_cache_thres = mask_cache_thres
self.mask_cache = None
# grad conv to calculate gradient
self.init_gradient_conv()
self.grad_mode = grad_mode
self.nonempty_mask = None
self._set_sphere_nonempty_mask()
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
# fixme: a better operator?
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
# print("- "*10 + "init gradient conv done" + " -"*10)
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
# print(kernel)
return m
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def _set_grid_resolution(self, num_voxels, num_voxels_bg=0):
# Determine grid resolution
if num_voxels_bg == 0:
num_voxels_bg = num_voxels
self.num_voxels = num_voxels
self.num_voxels_bg = num_voxels_bg
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.voxel_size_bg = ((self.xyz_max - self.xyz_min).prod() / num_voxels_bg).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_size_bg = ((self.xyz_max - self.xyz_min) / self.voxel_size_bg).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('voxel_size ', self.voxel_size)
print('voxel_size_bg ', self.voxel_size_bg)
print('world_size ', self.world_size)
print('world_size_bg ', self.world_size_bg)
print('voxel_size_base ', self.voxel_size_base)
print('voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'geo_rgb_dim':self.geo_rgb_dim,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest,
}
@torch.no_grad()
def _set_nonempty_mask(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
# self.bg_density.grid[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
print('- '*10, 'setting mask cache!', ' -'*10)
@torch.no_grad()
def _set_sphere_nonempty_mask(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = (self_grid_xyz[...,0] ** 2 + self_grid_xyz[...,1] ** 2 + self_grid_xyz[...,2] ** 2) < 1.
nonempty_mask = nonempty_mask[None, None]
self.sphere_mask = nonempty_mask
self.sdf.grid[~self.sphere_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
# self.sdf.grid[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels, num_voxels_bg=0):
print('scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels, num_voxels_bg)
print('scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
if num_voxels_bg > 0:
ori_world_size_bg = self.world_size_bg
print('scale_volume_grid scale [background] world_size from', ori_world_size_bg, 'to', self.world_size_bg)
self.sdf.scale_volume_grid(self.world_size)
self.bg_density.scale_volume_grid(self.world_size)
if self.k0_dim > 0:
self.k0.scale_volume_grid(self.world_size)
if self.bg_k0_dim > 0:
self.bg_k0.scale_volume_grid(self.world_size)
if self.mask_cache is not None:
self._set_nonempty_mask()
if self.smooth_scale:
m = self._gaussian_3dconv(ksize=5, sigma=1)
with torch.no_grad():
self.sdf.grid = torch.nn.Parameter(m(self.sdf.grid.data)).cuda()
print('scale_volume_grid finish')
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, bg_density_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
if bg_density_tv > 0:
tv += total_variation(self.bg_density) / 2 / self.voxel_size * bg_density_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.k0.grid
else:
v = torch.sigmoid(self.k0.grid)
tv = 0
if k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def bg_k0_total_variation(self, bg_k0_tv=1., bg_k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.bg_k0
else:
v = torch.sigmoid(self.bg_k0.grid)
tv = 0
if bg_k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if bg_k0_grad_tv > 0:
raise NotImplementedError
return tv
def activate_density(self, density, interval=None, s=1):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(- s * F.softplus(density + self.act_shift) * interval)
def neus_sdf_gradient(self, mode=None, sdf=None):
# the gradient grid from the sdf grid
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
""""""
# use sobel operator for gradient seems basically the same as the naive solution
for param in self.grad_conv.parameters():
assert not param.requires_grad
pass
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# dist is a constant in this impelmentation
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, smooth=False, displace=0.):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if displace !=0:
ind_norm[...,:] += displace * self.voxel_size
# TODO: use `rearrange' to make it readable
if smooth:
grid = self.smooth_conv(grids[0])
else:
grid = grids[0]
ret_lst = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners
).reshape(grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze()
return ret_lst
def sample_ray_cuda(self, rays_o, rays_d, near, far, stepsize, maskout=True, use_bg=False, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
if not use_bg:
stepdist = stepsize * self.voxel_size
else:
stepdist = stepsize * self.voxel_size_bg
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
if maskout:
if not use_bg:
mask_inbbox = ~mask_outbbox
else:
mask_inbbox = mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def sample_ray_ori(self, rays_o, rays_d, near, far, stepsize, is_train=False, use_bg=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
if use_bg:
N_samples = int(np.linalg.norm(np.array(self.bg_density.grid.shape[2:])+1) / stepsize) + 1
else:
N_samples = int(np.linalg.norm(np.array(self.sdf.grid.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
if use_bg:
step = stepsize * self.voxel_size_bg * rng
else:
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
return rays_pts, mask_outbbox, step
def outside_sphere_trans(self, pts, pts_norm=None, filtered=False):
# r^2 = x^2 + y^2 + z^2; x = x / r^2
out_pts = pts.clone()
if filtered:
out_pts = out_pts / pts_norm ** 2
return out_pts
if pts_norm is None:
pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0)
out_pts[~inside_sphere[...,0]] = out_pts[~inside_sphere[...,0]] / pts_norm[~inside_sphere[...,0]] ** 2
out_pts[inside_sphere[...,0]] = -10
return out_pts, ~inside_sphere
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
if global_step is not None:
if global_step in [1, 100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 16000, 17000, 18000, 19000, 20000] or global_step % self.set_sphere_freq == 0:
self._set_sphere_nonempty_mask()
ret_dict = {}
N = len(rays_o)
# sample points on rays
inner_pts, inner_ray_id, inner_step_id, mask_outbbox, N_steps = self.sample_ray_cuda(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
pts_norm = torch.linalg.norm(inner_pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0)[:,0]
inner_pts, inner_ray_id, inner_step_id = \
inner_pts[inside_sphere], inner_ray_id[inside_sphere], inner_step_id[inside_sphere]
bg_render_kwargs = copy.deepcopy(render_kwargs)
# old sample ray
outer_pts_org, bg_mask_outbbox, bg_step = self.sample_ray_ori(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, use_bg=True, **bg_render_kwargs)
outer_ray_id, outer_step_id = create_full_step_id(outer_pts_org.shape[:2])
bg_pts_norm = torch.linalg.norm(outer_pts_org, ord=2, dim=-1, keepdim=True)
bg_inside_sphere = (bg_pts_norm < 1.0)[...,0]
outer_pts = self.outside_sphere_trans(outer_pts_org, bg_pts_norm, filtered=True)
bg_mask = ~bg_inside_sphere & bg_mask_outbbox
dist_thres = self.voxel_size * render_kwargs['stepsize'] * 0.95
dist = (outer_pts[:, 1:] - outer_pts[:, :-1]).norm(dim=-1)
dist_mask = ub360_utils_cuda.cumdist_thres(dist, dist_thres)
bg_mask[:,1:] &= dist_mask
outer_pts, outer_ray_id, outer_step_id = \
outer_pts[bg_mask], outer_ray_id[bg_mask.view(-1)], outer_step_id[bg_mask.view(-1)]
outer_pts_org = outer_pts_org[bg_mask]
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
sdf = self.grid_sampler(inner_pts, sdf_grid)
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
gradient = self.grid_sampler(inner_pts, self.gradient)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, in_alpha = self.neus_alpha_from_sdf_scatter(viewdirs, inner_ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
in_weights, in_alphainv_last = Alphas2Weights.apply(in_alpha, inner_ray_id, N)
if self.fast_color_thres > 0:
mask = in_weights > self.fast_color_thres
inner_pts = inner_pts[mask]
inner_ray_id = inner_ray_id[mask]
inner_step_id = inner_step_id[mask]
in_alpha = in_alpha[mask]
gradient = gradient[mask]
in_weights, in_alphainv_last = Alphas2Weights.apply(in_alpha, inner_ray_id, N)
bg_interval = bg_render_kwargs['stepsize'] * self.voxel_size_ratio
bg_density = self.bg_density(outer_pts)
bg_alpha = self.activate_density(bg_density, bg_interval)
if self.bg_fast_color_thres > 0:
mask = bg_alpha > self.fast_color_thres
outer_pts = outer_pts[mask]
outer_ray_id = outer_ray_id[mask]
bg_alpha = bg_alpha[mask]
outer_pts_org = outer_pts_org[mask]
bg_weights, bg_alphainv_last = Alphas2Weights.apply(bg_alpha, outer_ray_id, N)
rgb_feat = []
if not self.rgbnet_full_implicit:
k0 = self.k0(inner_pts)
rgb_feat.append(k0)
if self.use_xyz:
rays_xyz = (inner_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2)[inner_ray_id])
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
# outside
bg_rgb_feat = []
if self.bg_k0_dim > 0:
bg_k0 = self.bg_k0(outer_pts)
bg_rgb_feat.append(bg_k0)
if self.bg_use_xyz:
bg_rays_xyz = (outer_pts_org - self.xyz_min) / (self.xyz_max - self.xyz_min)
bg_xyz_emb = (bg_rays_xyz.unsqueeze(-1) * self.bg_posfreq).flatten(-2)
bg_xyz_emb = torch.cat(
[bg_rays_xyz, bg_xyz_emb.sin(), bg_xyz_emb.cos()], -1)
bg_rgb_feat.append(bg_xyz_emb)
if self.bg_use_view:
bg_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.bg_viewfreq).flatten(-2)
bg_viewdirs_emb = torch.cat(
[viewdirs, bg_viewdirs_emb.sin(), bg_viewdirs_emb.cos()], -1)
bg_rgb_feat.append(bg_viewdirs_emb.flatten(0, -2)[outer_ray_id])
bg_rgb_feat = torch.cat(bg_rgb_feat, -1)
bg_rgb_logit = self.bg_rgbnet(bg_rgb_feat)
bg_rgb = torch.sigmoid(bg_rgb_logit)
in_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * rgb),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
bg_marched = segment_coo(
src=(bg_weights.unsqueeze(-1) * bg_rgb),
index=outer_ray_id, out=torch.zeros([N, 3]), reduce='sum')
cum_in_weights = segment_coo(
src=(in_weights.unsqueeze(-1)),
index=inner_ray_id, out=torch.zeros([N, 1]), reduce='sum')
rgb_marched = in_marched + (1 - cum_in_weights) * bg_marched
rgb_marched = rgb_marched.clamp(0, 1)
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * normal),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
if render_kwargs.get('render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(in_weights * inner_step_id * dist),
index=inner_ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = None
ret_dict.update({
'alphainv_cum': in_alphainv_last,
'weights': in_weights,
'bg_weights': bg_weights,
'pts_norm': pts_norm,
'rgb_marched': rgb_marched,
'in_marched': in_marched,
'out_marched': bg_marched,
'normal_marched': normal_marched,
'raw_alpha': in_alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"s_val": s_val,
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
### coarse-stage geometry and texture are low in resolution
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
gradient = self.grid_sampler(ray_pts, self.gradient).reshape(-1, 3)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
rgb_feat = []
if not self.rgbnet_full_implicit:
k0 = self.k0(ray_pts)
rgb_feat.append(k0)
if self.use_xyz:
rays_xyz = (ray_pts - self.xyz_min) / (
self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2))
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, **kwargs):
self._set_sphere_nonempty_mask()
sdf_grid = self.sdf.grid.clone()
if self.smooth_sdf:
sdf_grid = self.smooth_conv(sdf_grid)
else:
sdf_grid = sdf_grid
query_func = lambda pts: self.grid_sampler(pts, - sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def visualize_density_sdf(self, root='', iter=0, idxs=None):
if idxs is None:
if self.bg_density.grid.shape[2] < 100:
idxs = [self.bg_density.grid.shape[2] // 2]
else:
idxs = [60]
os.makedirs(os.path.join(root, "debug_figs"), exist_ok=True)
for i in idxs:
sdf_img = self.sdf.grid[0,0,i].cpu().detach().numpy()
sdf_img = (sdf_img + 1 / 2).clip(0,1) * 255
cv2.imwrite(os.path.join(root, "debug_figs/sdf_{}_{}.png".format(iter, i)), sdf_img)
def visualize_weight(self, weight1, weight2, thrd=0.001):
idxs = weight1.sum(-1).sort()[-1][-100:]
for i in idxs:
plt.figure()
vis = weight1[i] > thrd
plt.plot(weight1.detach().cpu().numpy()[i][vis])
plt.plot(weight2.detach().cpu().numpy()[i][vis])
plt.savefig("weight_{}.png".format(i))
''' Misc
'''
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
@functools.lru_cache(maxsize=128)
def create_full_step_id(shape):
ray_id = torch.arange(shape[0]).view(-1,1).expand(shape).flatten()
step_id = torch.arange(shape[1]).view(1,-1).expand(shape).flatten()
return ray_id, step_id
| 44,856 | 42.720273 | 162 | py |
Voxurf | Voxurf-main/lib/load_blendedmvs.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
def load_blendedmvs_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
""" delete single side """
# ref_pos = poses[0][:,-1]
# dist = ((poses[:,:,-1] - ref_pos[None]) ** 2).sum(-1)
# i_select = np.argsort(dist)[:20]
# i_split[0] = i_select.tolist()
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
render_poses = torch.Tensor(np.loadtxt(os.path.join(basedir, 'test_traj.txt')).reshape(-1,4,4).astype(np.float32))
return imgs, poses, render_poses, [H, W, focal], K, i_split
| 1,312 | 30.261905 | 118 | py |
Voxurf | Voxurf-main/lib/voxurf_womask_fine.py | import os
import time
import functools
import numpy as np
from copy import deepcopy
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
import math
import random
import copy
from . import grid
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
ub360_utils_cuda = load(
name='ub360_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/ub360_utils.cpp', 'cuda/ub360_utils_kernel.cu']],
verbose=True)
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/render_utils.cpp', 'cuda/render_utils_kernel.cu']],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_bg=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0, bg_fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4,
center_sdf=False, grad_feat=(1.0,), sdf_feat=(),
use_layer_norm=False,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_sdf=False,
smooth_ksize=0, smooth_sigma=1,
k_rgbnet_depth=3, k_res=False, k_posbase_pe=5, k_viewbase_pe=4,
k_center_sdf=False, k_grad_feat=(1.0,), k_sdf_feat=(),
smooth_scale=True, use_grad_norm=True,
use_rgb_k=True, k_detach_1=True, k_detach_2=True,
use_rgbnet_k0=False,
bg_rgbnet_dim=12, bg_posbase_pe=5, bg_viewbase_pe=4,
bg_rgbnet_width=128, bg_rgbnet_depth=3, tv_in_sphere=False,
init_ball_scale=0.5, init_bg_density_fix=False, set_sphere_freq=20000,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.bg_fast_color_thres = bg_fast_color_thres
self.nearest = nearest
self.set_sphere_freq = set_sphere_freq
self.init_bg_density_fix = init_bg_density_fix
self.tv_in_sphere = tv_in_sphere
self.init_ball_scale = init_ball_scale
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.smooth_sdf = smooth_sdf
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels, num_voxels_bg)
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
self.bg_density = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
if self.sdf_init_mode == "ball_init":
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
x_min, y_min, z_min = self.xyz_min.cpu().numpy()
x_max, y_max, z_max = self.xyz_max.cpu().numpy()
x, y, z = np.mgrid[x_min:x_max:self.world_size[0].item() * 1j, y_min:y_max:self.world_size[1].item() * 1j, z_min:z_max:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 - self.init_ball_scale).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
self.smooth_scale = smooth_scale
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.k0_dim = rgbnet_dim
self.bg_k0_dim = bg_rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.bg_k0 = grid.create_grid(
'DenseGrid', channels=self.bg_k0_dim, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
self.use_xyz = posbase_pe >= 0
self.use_view = viewbase_pe >= 0
dim0 = 0
if self.use_xyz:
dim0 += (3 + 3 * posbase_pe * 2)
if self.use_view:
dim0 += (3 + 3 * viewbase_pe * 2)
self.use_grad_norm = use_grad_norm
self.center_sdf = center_sdf
self.grad_feat = grad_feat
self.sdf_feat = sdf_feat
self.use_rgb_k = use_rgb_k
self.k_detach_1 = k_detach_1
self.k_detach_2 = k_detach_2
self.use_rgbnet_k0 = use_rgbnet_k0
self.use_layer_norm = use_layer_norm
dim0 += len(self.grad_feat) * 3
dim0 += len(self.sdf_feat) * 6
if self.use_rgbnet_k0:
dim0 += self.k0_dim
if self.center_sdf:
dim0 += 1
if not self.use_layer_norm:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
# the second rgb net
self.k_res = k_res
self.k_center_sdf = k_center_sdf
self.k_grad_feat = k_grad_feat
self.k_sdf_feat = k_sdf_feat
self.register_buffer('k_posfreq', torch.FloatTensor([(2**i) for i in range(k_posbase_pe)]))
self.register_buffer('k_viewfreq', torch.FloatTensor([(2**i) for i in range(k_viewbase_pe)]))
self.k_use_xyz = k_posbase_pe >= 0
self.k_use_view = k_viewbase_pe >= 0
k_dim0 = (3+3*k_posbase_pe*2) + (3+3*k_viewbase_pe*2) + self.k0_dim
if self.k_res:
k_dim0 += 3
if self.k_center_sdf:
k_dim0 += 1
k_dim0 += len(self.k_grad_feat) * 3
k_dim0 += len(self.k_sdf_feat) * 6
if not self.use_layer_norm:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.bg_k0.grid.shape)
print('k_rgbnet mlp', self.k_rgbnet)
self.bg_rgbnet_width=bg_rgbnet_width
self.bg_rgbnet_depth=bg_rgbnet_depth
self.register_buffer('bg_posfreq', torch.FloatTensor([(2**i) for i in range(bg_posbase_pe)]))
self.register_buffer('bg_viewfreq', torch.FloatTensor([(2**i) for i in range(bg_viewbase_pe)]))
self.bg_use_xyz = bg_posbase_pe >= 0
self.bg_use_view = bg_viewbase_pe >= 0
bg_dim0 = (3+3*bg_posbase_pe*2) + (3+3*bg_viewbase_pe*2) + self.bg_k0_dim
if not self.use_layer_norm:
self.bg_rgbnet = nn.Sequential(
nn.Linear(bg_dim0, bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
else:
self.bg_rgbnet = nn.Sequential(
nn.Linear(bg_dim0, bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
nn.init.constant_(self.bg_rgbnet[-1].bias, 0)
print('bg mlp', self.bg_rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
mask_cache_path = None
mask_cache_thres = None
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
self._set_sphere_nonempty_mask()
# grad conv to calculate gradient
self.init_gradient_conv()
print(" ",self.xyz_min)
self.grad_mode = grad_mode
self.global_step = 0
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
# fixme: a better operator?
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
# print("- "*10 + "init gradient conv done" + " -"*10)
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
# print(kernel)
return m
def init_smooth_conv_test_k3(self, ksize=3, sigma=0.4):
self.smooth_conv_test_k3 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv_test_k5(self, ksize=5, sigma=0.4):
self.smooth_conv_test_k5 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_sdf_from_sdf(self, sdf0=None, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from sdf" + " -"*3, "\n")
if sdf0.shape != self.sdf.grid.shape:
sdf0 = F.interpolate(sdf0, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(sdf0 / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data).to(self.sdf.grid) / reduce
else:
self.sdf.grid.data = sdf0.to(self.sdf.grid) / reduce # + self.act_shift
if self.mask_cache is not None:
self._set_nonempty_mask()
if self.smooth_scale:
m = self._gaussian_3dconv(ksize=5, sigma=1)
with torch.no_grad():
self.sdf.grid = torch.nn.Parameter(m(self.sdf.grid.data)).cuda()
self.gradient = self.neus_sdf_gradient()
def init_bg_density_from_bg_density(self, bg_density):
print("\n", "- "*3 + "initing bg_density from bg_density" + " -"*3, "\n")
if bg_density.shape != self.bg_density.grid.shape:
if self.init_bg_density_fix:
bg_density = F.interpolate(bg_density, size=tuple(self.world_size_bg), mode='trilinear', align_corners=True)
else:
bg_density = F.interpolate(bg_density, size=tuple(self.world_size), mode='trilinear', align_corners=True)
self.bg_density.grid.data = bg_density.to(self.bg_density.grid)
if self.mask_cache is not None:
self._set_nonempty_mask()
def _set_grid_resolution(self, num_voxels, num_voxels_bg=0):
# Determine grid resolution
if num_voxels_bg == 0:
num_voxels_bg = num_voxels
self.num_voxels = num_voxels
self.num_voxels_bg = num_voxels_bg
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.voxel_size_bg = ((self.xyz_max - self.xyz_min).prod() / num_voxels_bg).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_size_bg = ((self.xyz_max - self.xyz_min) / self.voxel_size_bg).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('voxel_size ', self.voxel_size)
print('voxel_size_bg ', self.voxel_size_bg)
print('world_size ', self.world_size)
print('world_size_bg ', self.world_size_bg)
print('voxel_size_base ', self.voxel_size_base)
print('voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'grad_feat': self.grad_feat,
'sdf_feat': self.sdf_feat,
'k_grad_feat': self.k_grad_feat,
'k_sdf_feat': self.k_sdf_feat,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest,
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
# self.bg_density.grid[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
print('- '*10, 'setting mask cache!', ' -'*10)
@torch.no_grad()
def _set_sphere_nonempty_mask(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = (self_grid_xyz[...,0] ** 2 + self_grid_xyz[...,1] ** 2 + self_grid_xyz[...,2] ** 2) < 1.
nonempty_mask = nonempty_mask[None, None]
self.sphere_mask = nonempty_mask
self.sdf.grid[~self.sphere_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
# self.bg_density.grid[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels, num_voxels_bg=0):
print('scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels, num_voxels_bg)
print('scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
if num_voxels_bg > 0:
ori_world_size_bg = self.world_size_bg
print('scale_volume_grid scale [background] world_size from', ori_world_size_bg, 'to', self.world_size_bg)
self.sdf.scale_volume_grid(self.world_size)
self.bg_density.scale_volume_grid(self.world_size_bg)
if self.k0_dim > 0:
self.k0.scale_volume_grid(self.world_size)
if self.bg_k0_dim > 0:
self.bg_k0.scale_volume_grid(self.world_size_bg)
self.density = torch.nn.Parameter(
F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
if self.mask_cache is not None:
self._set_nonempty_mask()
self._set_sphere_nonempty_mask()
print('scale_volume_grid finish')
def bg_density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.bg_density.total_variation_add_grad(w, w, w, dense_mode)
def sdf_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.sdf.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.k0.total_variation_add_grad(w, w, w, dense_mode)
def bg_k0_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.bg_k0.total_variation_add_grad(w, w, w, dense_mode)
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, bg_density_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
if bg_density_tv > 0:
tv += total_variation(self.bg_density.grid) / 2 / self.voxel_size * bg_density_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.k0.grid
else:
v = torch.sigmoid(self.k0.grid)
tv = 0
if k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def bg_k0_total_variation(self, bg_k0_tv=1., bg_k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.bg_k0.grid
else:
v = torch.sigmoid(self.bg_k0.grid)
tv = 0
if bg_k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if bg_k0_grad_tv > 0:
raise NotImplementedError
return tv
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(-F.softplus(density + self.act_shift) * interval)
def activate_sdf(self, sdf, s=10):
return sdf
def neus_sdf_gradient(self, mode=None, sdf=None):
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
""""""
# use sobel operator for gradient seems basically the same as the naive solution
for param in self.grad_conv.parameters():
assert not param.requires_grad
pass
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# dist is a constant in this impelmentation
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def neus_alpha_from_sdf(self, viewdirs, steps, sdf, gradients, global_step, is_train, use_mid=True):
ori_shape = viewdirs.shape
n_samples = steps.shape[-1]
# force s_val value to change with global step
if is_train:
batch_size = steps.shape[0]
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
assert len(viewdirs.size()) > 2
dirs = viewdirs.reshape(-1, 3)
steps = steps.reshape(-1, n_samples)
batch_size = dirs.shape[0]
s_val = 0
if steps.shape[0] == 1:
steps = steps.repeat(batch_size,1)
dirs = viewdirs.unsqueeze(-2)
inv_s = torch.ones(1).cuda() / self.s_val # * torch.exp(-inv_s)
inv_s = inv_s.expand(batch_size * n_samples, 1)
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
iter_cos = iter_cos.reshape(-1,1)
sdf = sdf.reshape(-1, 1)
# calculate dist from steps / z_vals
dists = steps[..., 1:] - steps[..., :-1]
dists = torch.cat([dists, torch.Tensor([dists.mean()]).expand(dists[..., :1].shape)], -1)
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
else:
estimated_next_sdf = torch.cat([sdf[...,1:], sdf[...,-1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[...,:1], sdf[...,:-1]], -1).reshape(-1, 1)
# when the interval is not the same, the inv_s should not be the same? or it should be
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
if not is_train:
alpha = alpha.reshape((ori_shape[0], ori_shape[1], n_samples))
return s_val, alpha #, weights
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, sample_ret=True, sample_grad=False, displace=0.1, smooth=False):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
if smooth:
grid = self.smooth_conv(grids[0])
grids[0] = grid
outs = []
if sample_ret:
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid = grids[0]
ret = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners).reshape(
grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze(-1)
outs.append(ret)
if sample_grad:
grid = grids[0]
feat, grad = self.sample_sdfs(xyz, grid, displace_list=[1.0], use_grad_norm=False)
feat = torch.cat([feat[:, 4:6], feat[:, 2:4], feat[:, 0:2]], dim=-1)
grad = torch.cat([grad[:, [2]], grad[:, [1]], grad[:, [0]]], dim=-1)
outs.append(grad)
outs.append(feat)
if len(outs) == 1:
return outs[0]
else:
return outs
def sample_sdfs(self, xyz, *grids, displace_list, mode='bilinear', align_corners=True, use_grad_norm=False):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
grid = grids[0]
# ind from xyz to zyx !!!!!
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid_size = grid.size()[-3:]
size_factor_zyx = torch.tensor([grid_size[2], grid_size[1], grid_size[0]]).cuda()
ind = ((ind_norm + 1) / 2) * (size_factor_zyx - 1)
offset = torch.tensor([[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1]]).cuda()
displace = torch.tensor(displace_list).cuda()
offset = offset[:, None, :] * displace[None, :, None]
all_ind = ind.unsqueeze(-2) + offset.view(-1, 3)
all_ind = all_ind.view(1, 1, 1, -1, 3)
all_ind[..., 0] = all_ind[..., 0].clamp(min=0, max=size_factor_zyx[0] - 1)
all_ind[..., 1] = all_ind[..., 1].clamp(min=0, max=size_factor_zyx[1] - 1)
all_ind[..., 2] = all_ind[..., 2].clamp(min=0, max=size_factor_zyx[2] - 1)
all_ind_norm = (all_ind / (size_factor_zyx-1)) * 2 - 1
feat = F.grid_sample(grid, all_ind_norm, mode=mode, align_corners=align_corners)
all_ind = all_ind.view(1, 1, 1, -1, 6, len(displace_list), 3)
diff = all_ind[:, :, :, :, 1::2, :, :] - all_ind[:, :, :, :, 0::2, :, :]
diff, _ = diff.max(dim=-1)
feat_ = feat.view(1, 1, 1, -1, 6, len(displace_list))
feat_diff = feat_[:, :, :, :, 1::2, :] - feat_[:, :, :, :, 0::2, :]
grad = feat_diff / diff / self.voxel_size
feat = feat.view(shape[-1], 6, len(displace_list))
grad = grad.view(shape[-1], 3, len(displace_list))
if use_grad_norm:
grad = grad / (grad.norm(dim=1, keepdim=True) + 1e-5)
feat = feat.view(shape[-1], 6 * len(displace_list))
grad = grad.view(shape[-1], 3 * len(displace_list))
return feat, grad
def sample_ray_cuda(self, rays_o, rays_d, near, far, stepsize, maskout=True, use_bg=False, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
if not use_bg:
stepdist = stepsize * self.voxel_size
else:
stepdist = stepsize * self.voxel_size_bg
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
if maskout:
if not use_bg:
mask_inbbox = ~mask_outbbox
else:
mask_inbbox = mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def sample_ray_ori(self, rays_o, rays_d, near, far, stepsize, is_train=False, use_bg=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
if use_bg:
N_samples = int(np.linalg.norm(np.array(self.bg_density.grid.shape[2:])+1) / stepsize) + 1
else:
N_samples = int(np.linalg.norm(np.array(self.sdf.grid.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
if use_bg:
step = stepsize * self.voxel_size_bg * rng
else:
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
return rays_pts, mask_outbbox, step
def outside_sphere_trans(self, pts, pts_norm=None, filtered=False):
# r^2 = x^2 + y^2 + z^2; x = x / r^2
out_pts = pts.clone()
if filtered:
out_pts = out_pts / pts_norm ** 2
return out_pts
if pts_norm is None:
pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0)
out_pts[~inside_sphere[...,0]] = out_pts[~inside_sphere[...,0]] / pts_norm[~inside_sphere[...,0]] ** 2
out_pts[inside_sphere[...,0]] = -10
return out_pts, ~inside_sphere
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
if global_step is not None:
if global_step in [1, 100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 16000, 17000, 18000, 19000, 20000] or global_step % self.set_sphere_freq == 0:
self._set_sphere_nonempty_mask()
ret_dict = {}
N = len(rays_o)
# sample points on rays
# inner_pts, inner_ray_id, inner_step_id, mask_outbbox, N_steps = self.sample_ray_cuda(
# rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
inner_pts, mask_outbbox, step = self.sample_ray_ori(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, use_bg=False, **render_kwargs)
inner_ray_id, inner_step_id = create_full_step_id(inner_pts.shape[:2])
pts_norm = torch.linalg.norm(inner_pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0).view(-1)
inner_pts, inner_ray_id, inner_step_id = \
inner_pts.view(-1, 3)[inside_sphere], inner_ray_id[inside_sphere], inner_step_id[inside_sphere]
bg_render_kwargs = copy.deepcopy(render_kwargs)
''' old sample ray '''
outer_pts_org, bg_mask_outbbox, bg_step = self.sample_ray_ori(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, use_bg=False, **bg_render_kwargs)
outer_ray_id, outer_step_id = create_full_step_id(outer_pts_org.shape[:2])
bg_pts_norm = torch.linalg.norm(outer_pts_org, ord=2, dim=-1, keepdim=True)
bg_inside_sphere = (bg_pts_norm < 1.0)[...,0]
outer_pts = self.outside_sphere_trans(outer_pts_org, bg_pts_norm, filtered=True)
bg_mask = ~bg_inside_sphere
dist_thres = self.voxel_size * render_kwargs['stepsize'] * 0.5
dist = (outer_pts[:, 1:] - outer_pts[:, :-1]).norm(dim=-1)
dist_mask = ub360_utils_cuda.cumdist_thres(dist, dist_thres)
bg_mask[:,1:] &= dist_mask
outer_pts, outer_ray_id, outer_step_id = \
outer_pts[bg_mask], outer_ray_id[bg_mask.view(-1)], outer_step_id[bg_mask.view(-1)]
outer_pts_org = outer_pts_org[bg_mask]
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
sdf, gradient, feat = self.grid_sampler(inner_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, in_alpha = self.neus_alpha_from_sdf_scatter(viewdirs, inner_ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
in_weights, in_alphainv_last = Alphas2Weights.apply(in_alpha, inner_ray_id, N)
if self.fast_color_thres > 0:
mask = in_weights > self.fast_color_thres
in_weights = in_weights[mask]
inner_pts = inner_pts[mask]
inner_ray_id = inner_ray_id[mask]
inner_step_id = inner_step_id[mask]
in_alpha = in_alpha[mask]
gradient = gradient[mask]
sdf = sdf[mask]
# assert self.separate_modelling
bg_interval = bg_render_kwargs['stepsize'] * self.voxel_size_ratio
bg_density = self.bg_density(outer_pts)
bg_alpha = self.activate_density(bg_density, bg_interval)
bg_weights, bg_alphainv_last = Alphas2Weights.apply(bg_alpha, outer_ray_id, N)
if self.bg_fast_color_thres > 0:
mask = bg_weights > self.fast_color_thres
outer_pts = outer_pts[mask]
outer_ray_id = outer_ray_id[mask]
outer_step_id = outer_step_id[mask]
bg_alpha = bg_alpha[mask]
outer_pts_org = outer_pts_org[mask]
bg_weights = bg_weights[mask]
rgb_feat = []
k_rgb_feat = []
if self.k0_dim > 0:
k0 = self.k0(inner_pts)
if self.use_rgbnet_k0:
rgb_feat.append(k0)
k_rgb_feat.append(k0)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(inner_pts, sdf_grid, displace_list=all_grad_inds_, use_grad_norm=self.use_grad_norm)
rgb_feat.append(all_feat)
rgb_feat.append(all_grad)
else:
all_feat, all_grad = None, None
if self.use_xyz:
rays_xyz = (inner_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2)[inner_ray_id])
if self.center_sdf:
rgb_feat.append(sdf[:, None])
rgb_feat = torch.cat(rgb_feat, -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
# outside
bg_rgb_feat = []
if self.bg_k0_dim > 0:
bg_k0 = self.bg_k0(outer_pts)
bg_rgb_feat.append(bg_k0)
if self.bg_use_xyz:
bg_rays_xyz = (outer_pts_org - self.xyz_min) / (self.xyz_max - self.xyz_min)
bg_xyz_emb = (bg_rays_xyz.unsqueeze(-1) * self.bg_posfreq).flatten(-2)
bg_xyz_emb = torch.cat(
[bg_rays_xyz, bg_xyz_emb.sin(), bg_xyz_emb.cos()], -1)
bg_rgb_feat.append(bg_xyz_emb)
if self.bg_use_view:
bg_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.bg_viewfreq).flatten(-2)
bg_viewdirs_emb = torch.cat(
[viewdirs, bg_viewdirs_emb.sin(), bg_viewdirs_emb.cos()], -1)
bg_rgb_feat.append(bg_viewdirs_emb.flatten(0, -2)[outer_ray_id])
bg_rgb_feat = torch.cat(bg_rgb_feat, -1)
bg_rgb_logit = self.bg_rgbnet(bg_rgb_feat)
bg_rgb = torch.sigmoid(bg_rgb_logit)
in_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * rgb),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
bg_marched = segment_coo(
src=(bg_weights.unsqueeze(-1) * bg_rgb),
index=outer_ray_id, out=torch.zeros([N, 3]), reduce='sum')
cum_in_weights = segment_coo(
src=(in_weights.unsqueeze(-1)),
index=inner_ray_id, out=torch.zeros([N, 1]), reduce='sum')
bg_marched = bg_marched.clamp(0, 1)
in_marched = in_marched.clamp(0, 1)
rgb_marched = in_marched + (1 - cum_in_weights) * bg_marched
rgb_marched = rgb_marched.clamp(0, 1)
in_marched = in_marched
if self.use_rgb_k:
if self.k_use_xyz:
k_rays_xyz = (inner_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
k_xyz_emb = (k_rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(-2)
k_xyz_emb = torch.cat([k_rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()], -1)
k_rgb_feat.append(k_xyz_emb)
if self.k_use_view:
k_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(-2)
k_viewdirs_emb = torch.cat(
[viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat.append(k_viewdirs_emb.flatten(0, -2)[inner_ray_id])
if self.k_center_sdf:
k_rgb_feat.append(sdf[:, None])
k_rgb_feat.append(gradient)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat.append(color_feat.detach())
else:
k_rgb_feat.append(color_feat)
k_rgb_feat = torch.cat(k_rgb_feat, -1)
k_rgb_logit = self.k_rgbnet(k_rgb_feat)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + k_rgb_logit
else:
k_rgb_logit = rgb_logit + k_rgb_logit
k_rgb = torch.sigmoid(k_rgb_logit)
k_in_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * k_rgb),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
k_in_marched = k_in_marched.clamp(0, 1)
k_rgb_marched = k_in_marched + (1 - cum_in_weights) * bg_marched
k_rgb_marched = k_rgb_marched.clamp(0, 1)
in_marched = k_in_marched
else:
k_rgb_marched = None
# Ray marching
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * normal),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
out_marched = bg_marched
if render_kwargs.get('render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(in_weights * inner_step_id * dist),
index=inner_ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = 0
ret_dict.update({
'alphainv_cum': (1 - cum_in_weights)[..., 0],
'weights': in_weights,
'bg_weights': bg_weights,
'pts_norm': pts_norm,
'rgb_marched': rgb_marched,
# 'k_rgb_marched': k_rgb_marched,
'in_marched': in_marched,
'out_marched': out_marched,
'normal_marched': normal_marched,
'raw_alpha': in_alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"gradient_error": None,
"s_val": s_val,
})
if self.use_rgb_k:
ret_dict.update({
'rgb_marched': k_rgb_marched,
'rgb_marched0': rgb_marched,
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
# self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
# gradient = self.grid_sampler(ray_pts, self.gradient).reshape(-1, 3)
sdf, gradient, feat = self.grid_sampler(ray_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
rgb_feat = []
k_rgb_feat = []
if self.k0_dim > 0:
k0 = self.k0(ray_pts)
if self.use_rgbnet_k0:
rgb_feat.append(k0)
k_rgb_feat.append(k0)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(ray_pts, sdf_grid,
displace_list=all_grad_inds_,
use_grad_norm=self.use_grad_norm)
rgb_feat.append(all_feat)
rgb_feat.append(all_grad)
if self.use_xyz:
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2))
if self.center_sdf:
rgb_feat.append(sdf[:, None])
rgb_feat = torch.cat(rgb_feat, -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
if self.use_rgb_k:
if self.k_use_xyz:
k_rays_xyz = (ray_pts - self.xyz_min) / (
self.xyz_max - self.xyz_min)
k_xyz_emb = (k_rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(
-2)
k_xyz_emb = torch.cat(
[k_rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()], -1)
k_rgb_feat.append(k_xyz_emb)
if self.k_use_view:
k_viewdirs_emb = (
viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(
-2)
k_viewdirs_emb = torch.cat(
[viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat.append(k_viewdirs_emb.flatten(0, -2))
if self.k_center_sdf:
k_rgb_feat.append(sdf[:, None])
k_rgb_feat.append(gradient)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat.append(color_feat.detach())
else:
k_rgb_feat.append(color_feat)
k_rgb_feat = torch.cat(k_rgb_feat, -1)
k_rgb_logit = self.k_rgbnet(k_rgb_feat)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + k_rgb_logit
else:
k_rgb_logit = rgb_logit + k_rgb_logit
rgb = torch.sigmoid(k_rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, smooth=True, sigma=0.5, **kwargs):
self._set_sphere_nonempty_mask()
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
if smooth:
self.init_smooth_conv_test_k3(sigma=sigma)
sdf_grid = self.smooth_conv_test_k3(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
query_func = lambda pts: self.grid_sampler(pts, - sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def visualize_density_sdf(self, root='', iter=0, idxs=None):
if idxs is None:
if self.density.grid.shape[2] < 100:
idxs = [self.density.grid.hape[2] // 2]
else:
idxs = [60]
os.makedirs(os.path.join(root, "debug_figs"), exist_ok=True)
for i in idxs:
sdf_img = self.sdf.grid[0,0,i].cpu().detach().numpy()
sdf_img = (sdf_img + 1 / 2).clip(0,1) * 255
cv2.imwrite(os.path.join(root, "debug_figs/sdf_{}_{}.png".format(iter, i)), sdf_img)
def visualize_weight(self, weight1, weight2, thrd=0.001):
idxs = weight1.sum(-1).sort()[-1][-100:]
for i in idxs:
plt.figure()
vis = weight1[i] > thrd
plt.plot(weight1.detach().cpu().numpy()[i][vis])
plt.plot(weight2.detach().cpu().numpy()[i][vis])
plt.savefig("weight_{}.png".format(i))
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def cumprod_exclusive(p):
# Not sure why: it will be slow at the end of training if clamping at 1e-10 is not applied
return torch.cat([torch.ones_like(p[...,[0]]), p.clamp_min(1e-10).cumprod(-1)], -1)
def get_ray_marching_ray(alpha):
alphainv_cum = cumprod_exclusive(1-alpha)
weights = alpha * alphainv_cum[..., :-1]
return weights, alphainv_cum
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
def total_variation_step2(v, mask=None):
tv2 = (v[:,:,2:,:,:] - v[:,:,:-2,:,:]).abs() / 2
tv3 = (v[:,:,:,2:,:] - v[:,:,:,:-2,:]).abs() / 2
tv4 = (v[:,:,:,:,2:] - v[:,:,:,:,:-2]).abs() / 2
if mask is not None:
tv2 = tv2[mask[:,:,:-2] & mask[:,:,2:]]
tv3 = tv3[mask[:,:,:,:-2] & mask[:,:,:,2:]]
tv4 = tv4[mask[:,:,:,:,:-2] & mask[:,:,:,:,2:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs, rgbnet_sup_reduce=1):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
# rgb_tr_new_res = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
rays_pts, mask_outbbox, _ = model.sample_ray_ori(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs)
mask_outbbox[~mask_outbbox] |= (~model.mask_cache(rays_pts[~mask_outbbox]))
mask[i:i+CHUNK] &= (~mask_outbbox).any(-1).to(DEVICE)
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
# rgb_tr_new_res[top:top+n].copy_(img_new_res[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
@functools.lru_cache(maxsize=128)
def create_full_step_id(shape):
ray_id = torch.arange(shape[0]).view(-1,1).expand(shape).flatten()
step_id = torch.arange(shape[1]).view(1,-1).expand(shape).flatten()
return ray_id, step_id
| 65,267 | 43.61244 | 162 | py |
Voxurf | Voxurf-main/lib/voxurf_coarse.py | import os
import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
from . import grid
from lib.dvgo_ori import extract_geometry
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/render_utils.cpp', 'cuda/render_utils_kernel.cu']],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4, geo_rgb_dim=3,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_ksize=0, smooth_sigma=1,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.nearest = nearest
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
# init density voxel grid
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
if self.sdf_init_mode == "ball_init":
x, y, z = np.mgrid[-1.0:1.0:self.world_size[0].item() * 1j, -1.0:1.0:self.world_size[1].item() * 1j, -1.0:1.0:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 -1).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf.grid = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.rgbnet_full_implicit = rgbnet_full_implicit
if rgbnet_dim <= 0:
# color voxel grid (dvgo coarse stage)
self.k0_dim = 3
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
self.rgbnet = None
else:
if self.rgbnet_full_implicit:
self.k0_dim = 0
else:
self.k0_dim = rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
dim0 = (3+3*posbase_pe*2) + (3+3*viewbase_pe*2)
if rgbnet_direct:
dim0 += self.k0_dim
else:
dim0 += self.k0_dim-3
self.geo_rgb_dim = geo_rgb_dim
if self.geo_rgb_dim:
dim0 += self.geo_rgb_dim
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
# grad conv to calculate gradient
self.init_gradient_conv()
self.grad_mode = grad_mode
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
return m
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_sdf_from_density(self, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from density" + " -"*3, "\n")
self.s = torch.nn.Parameter(torch.ones(1)) * 10
if zero2neg:
self.density.data[self.density.data==0] = -100
if self.density.shape != self.sdf.grid.shape:
self.density.data = F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
# first build the smoothing kernel
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(-torch.tanh(self.density.data) / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data)
else:
self.sdf.grid.data = -torch.tanh(self.density.data) / reduce # + self.act_shift
self.gradient = self.neus_sdf_gradient()
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('voxel_size ', self.voxel_size)
print('world_size ', self.world_size)
print('voxel_size_base ', self.voxel_size_base)
print('voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'geo_rgb_dim':self.geo_rgb_dim,
# 's_start': self.s_start,
# 's_ratio': self.s_ratio,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
self.density[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
self.density[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
self.sdf.scale_volume_grid(self.world_size)
if self.k0_dim > 0:
self.k0.scale_volume_grid(self.world_size)
if self.mask_cache is not None:
self._set_nonempty_mask()
print('scale_volume_grid finish')
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, sdf_thrd=0.999):
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, self.nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[self.nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
if self.rgbnet is not None:
v = self.k0.grid
else:
v = torch.sigmoid(self.k0.grid)
tv = 0
if k0_tv > 0:
tv += total_variation(v, self.nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def neus_sdf_gradient(self, mode=None, sdf=None):
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
# use sobel operator for gradient seems basically the same as the naive solution
for param in self.grad_conv.parameters():
assert not param.requires_grad
pass
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def neus_alpha_from_sdf(self, viewdirs, steps, sdf, gradients, global_step, is_train, use_mid=True):
ori_shape = viewdirs.shape
n_samples = steps.shape[-1]
# force s_val value to change with global step
if is_train:
batch_size = steps.shape[0]
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
dirs = viewdirs.reshape(-1, 3)
steps = steps.reshape(-1, n_samples)
batch_size = dirs.shape[0]
s_val = 0
if steps.shape[0] == 1:
steps = steps.repeat(batch_size,1)
dirs = viewdirs.unsqueeze(-2)
inv_s = torch.ones(1).cuda() / self.s_val # * torch.exp(-inv_s)
inv_s = inv_s.expand(batch_size * n_samples, 1)
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
iter_cos = iter_cos.reshape(-1,1)
sdf = sdf.reshape(-1, 1)
# calculate dist from steps / z_vals
dists = steps[..., 1:] - steps[..., :-1]
dists = torch.cat([dists, torch.Tensor([dists.mean()]).expand(dists[..., :1].shape)], -1)
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
else:
estimated_next_sdf = torch.cat([sdf[...,1:], sdf[...,-1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[...,:1], sdf[...,:-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
return s_val, alpha
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, smooth=False, displace=0.):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if displace !=0:
ind_norm[...,:] += displace * self.voxel_size
# TODO: use `rearrange' to make it readable
if smooth:
grid = self.smooth_conv(grids[0])
else:
grid = grids[0]
ret_lst = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners
).reshape(grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze()
return ret_lst
def sample_ray_cuda(self, rays_o, rays_d, near, far, stepsize, maskout=True, use_bg=False, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
if not use_bg:
stepdist = stepsize * self.voxel_size
else:
stepdist = stepsize * self.voxel_size_bg
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
if maskout:
if not use_bg:
mask_inbbox = ~mask_outbbox
else:
mask_inbbox = mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def sample_ray_ori(self, rays_o, rays_d, near, far, stepsize, is_train=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
N_samples = int(np.linalg.norm(np.array(self.sdf.grid.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
return rays_pts, mask_outbbox, step
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
ret_dict = {}
N = len(rays_o)
ray_pts, ray_id, step_id, mask_outbbox, N_steps = self.sample_ray_cuda(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
# interval = render_kwargs['stepsize'] * self.voxel_size_ratio
# skip known free space
if self.mask_cache is not None:
mask = self.mask_cache(ray_pts)
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
mask_outbbox[~mask_outbbox] |= ~mask
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
sdf = self.grid_sampler(ray_pts, sdf_grid)
self.gradient = self.neus_sdf_gradient(sdf=self.sdf.grid)
gradient = self.grid_sampler(ray_pts, self.gradient)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, alpha = self.neus_alpha_from_sdf_scatter(viewdirs, ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
weights, alphainv_last = Alphas2Weights.apply(alpha, ray_id, N)
if self.fast_color_thres > 0:
mask = weights > self.fast_color_thres
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
alpha = alpha[mask]
gradient = gradient[mask]
weights, alphainv_last = Alphas2Weights.apply(alpha, ray_id, N)
rgb_feat = []
if not self.rgbnet_full_implicit:
k0 = self.k0(ray_pts)
rgb_feat.append(k0)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2)[ray_id])
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
rgb_marched = segment_coo(
src=(weights.unsqueeze(-1) * rgb),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum')
cum_weights = segment_coo(
src=(weights.unsqueeze(-1)),
index=ray_id, out=torch.zeros([N, 1]), reduce='sum')
# Ray marching
rgb_marched = rgb_marched + (1 - cum_weights) * render_kwargs['bg']
# rgb_marched = rgb_marched + alphainv_last.unsqueeze(-1) * render_kwargs['bg']
rgb_marched = rgb_marched.clamp(0, 1)
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(weights.unsqueeze(-1) * normal),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
if getattr(render_kwargs, 'render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(weights * step_id * dist),
index=ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = None
ret_dict.update({
'alphainv_cum': alphainv_last,
'weights': weights,
'rgb_marched': rgb_marched,
'normal_marched': normal_marched,
'raw_alpha': alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"gradient_error": None,
"s_val": s_val
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
### coarse-stage geometry and texture are low in resolution
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
gradient = self.grid_sampler(ray_pts, self.gradient).reshape(-1, 3)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
rgb_feat = []
k0 = self.k0(ray_pts)
rgb_feat.append(k0)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2))
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, **kwargs):
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
# self._set_nonempty_mask()
query_func = lambda pts: self.grid_sampler(pts, -sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.sum() + tv3.sum() + tv4.sum()) / 3 / mask.sum()
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,3], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
rays_pts, mask_outbbox, _ = model.sample_ray_ori(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs)
mask_outbbox[~mask_outbbox] |= (~model.mask_cache(rays_pts[~mask_outbbox]))
mask[i:i+CHUNK] &= (~mask_outbbox).any(-1).to(DEVICE)
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
| 40,171 | 42.760349 | 153 | py |
Voxurf | Voxurf-main/lib/load_mobilebrick.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_mobilebrick_data(basedir, normalize=True, reso_level=2, mask=False, white_bg=False):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*jpg')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
render_cameras_name = 'cameras.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normalize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normalize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
all_intrinsics.append(intrinsics)
all_poses.append(pose)
if len(mask_paths) > 0:
mask_ = (imageio.imread(mask_paths[i]) / 255.).astype(np.float32)
if mask_.ndim == 3:
all_masks.append(mask_[...,:3])
else:
all_masks.append(mask_[...,None])
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0]
print("Date original shape: ", H, W)
masks = np.stack(all_masks, 0)
if mask:
assert len(mask_paths) > 0
bg = 1. if white_bg else 0.
imgs = imgs * masks + bg * (1 - masks)
if reso_level > 1:
H, W = int(H / reso_level), int(W / reso_level)
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
if masks is not None:
masks = F.interpolate(torch.from_numpy(masks).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K[:2] /= reso_level
focal /= reso_level
# this is to randomly fetch images.
i_test = [i for i in range(len(imgs)) if (i-3)%8==0]
i_val = i_test
i_train = list(set(np.arange(len(imgs))) - set(i_test))
i_split = [np.array(i_train), np.array(i_val), np.array(i_test)]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split, scale_mats_np[0], masks
| 3,429 | 34.360825 | 114 | py |
Voxurf | Voxurf-main/lib/utils.py | import os, math
import numpy as np
import scipy.signal
from typing import List, Optional
from torch import Tensor
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
from plyfile import PlyData, PlyElement
import matplotlib.cm as cm
import matplotlib as matplotlib
import imageio
import logging
from torch.jit._builtins import math
from skimage import measure
import trimesh
from . import grid
def get_root_logger(log_level=logging.INFO, handlers=()):
logger = logging.getLogger()
if not logger.hasHandlers():
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=log_level)
for handler in handlers:
logger.addHandler(handler)
logger.setLevel(log_level)
return logger
def file_backup(backup_dir):
dir_lis = self.conf['general.recording']
os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True)
for dir_name in dir_lis:
cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name)
os.makedirs(cur_dir, exist_ok=True)
files = os.listdir(dir_name)
for f_name in files:
if f_name[-3:] == '.py':
copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name))
copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf'))
''' Misc
'''
mse2psnr = lambda x : -10. * torch.log10(x)
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
''' Extend Adam to support per-voxel learning rate
'''
class Adam(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
self.per_lr = None
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def set_pervoxel_lr(self, count):
assert self.param_groups[0]['params'][0].shape == count.shape
self.per_lr = count.float() / count.max()
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
per_lrs = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
if self.per_lr is not None and p.shape == self.per_lr.shape:
per_lrs.append(self.per_lr)
else:
per_lrs.append(None)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
per_lrs=per_lrs)
return loss
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
per_lrs):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
per_lr = per_lrs[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
if per_lr is not None:
param.addcdiv_(exp_avg * per_lr, denom, value=-step_size)
else:
param.addcdiv_(exp_avg, denom, value=-step_size)
def create_optimizer_or_freeze_model(model, cfg_train, global_step):
decay_steps = cfg_train.lrate_decay * 1000
decay_factor = 0.1 ** (global_step/decay_steps)
param_group = []
for k in cfg_train.keys():
if not k.startswith('lrate_'):
continue
k = k[len('lrate_'):]
if not hasattr(model, k):
continue
param = getattr(model, k)
if param is None:
print(f'create_optimizer_or_freeze_model: param {k} not exist')
continue
lr = getattr(cfg_train, f'lrate_{k}') * decay_factor
if lr > 0:
print(f'create_optimizer_or_freeze_model: param {k} lr {lr}')
if isinstance(param, nn.Module):
param = param.parameters()
param_group.append({'params': param, 'lr': lr, 'name':k})
else:
print(f'create_optimizer_or_freeze_model: param {k} freeze')
param.requires_grad = False
return Adam(param_group, betas=(0.9,0.99))
''' Checkpoint utils
'''
def load_checkpoint(model, optimizer, ckpt_path, no_reload_optimizer, strict=True):
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
if model.rgbnet[0].weight.shape != ckpt['model_state_dict']['rgbnet.0.weight'].shape:
tmp_weight = torch.zeros(model.rgbnet[0].weight.shape)
h = ckpt['model_state_dict']['rgbnet.0.weight'].shape[-1]
tmp_weight[:,:h] = ckpt['model_state_dict']['rgbnet.0.weight']
ckpt['model_state_dict']['rgbnet.0.weight'] = tmp_weight
model.load_state_dict(ckpt['model_state_dict'], strict=strict)
if not no_reload_optimizer:
try:
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
except:
print("Failed to load optimizer state dict")
if strict:
raise ValueError
else:
print("Skip!")
return model, optimizer, start
def load_grid_data(model, ckpt_path, deduce=1, name='density', return_raw=False):
ckpt = torch.load(ckpt_path)
module = getattr(model, name)
print(">>> {} loaded from ".format(name), ckpt_path)
if name not in ckpt['model_state_dict']:
name = name + '.grid'
if return_raw:
return ckpt['model_state_dict'][name]
else:
if isinstance(module, grid.DenseGrid):
module.grid.data = ckpt['model_state_dict'][name]
else:
module.data = ckpt['model_state_dict'][name]
return model
def load_weight_by_name(model, ckpt_path, deduce=1, name='density', return_raw=False):
ckpt = torch.load(ckpt_path)
for n, module in model.named_parameters():
if name in n:
if n in ckpt['model_state_dict']:
module.data = ckpt['model_state_dict'][n]
print('load {} to model'.format(n))
print(">>> data with name {} are loaded from ".format(name), ckpt_path)
return model
def load_model(model_class, ckpt_path, new_kwargs=None, strict=False):
ckpt = torch.load(ckpt_path)
if new_kwargs is not None:
for k, v in new_kwargs.items():
if k in ckpt['model_kwargs']:
if ckpt['model_kwargs'][k] != v:
print('updating {} from {} to {}'.format(k, ckpt['model_kwargs'][k], v))
ckpt['model_kwargs'].update(new_kwargs)
model = model_class(**ckpt['model_kwargs'])
try:
model.load_state_dict(ckpt['model_state_dict'], strict=True)
print(">>> Checkpoint loaded successfully from {}".format(ckpt_path))
except Exception as e:
print(e)
if strict:
print(">>> Failed to load checkpoint correctly.")
model.load_state_dict(ckpt['model_state_dict'], strict=True)
else:
model.load_state_dict(ckpt['model_state_dict'], strict=False)
print(">>> Checkpoint loaded without strict matching from {}".format(ckpt_path))
return model
def color_map_color(value, cmap_name='coolwarm', vmin=0, vmax=1):
# norm = plt.Normalize(vmin, vmax)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = cm.get_cmap(cmap_name) # PiYG
rgb = cmap(norm(abs(value)))[:,:3] # will return rgba, we take only first 3 so we get rgb
return rgb
def analyze_point_cloud(filename=None, log_num=18, rand_offset=False,
query_func=None, scale_mats_np=None, save_root=''):
plydata = PlyData.read(filename)
num_points = 2 ** log_num
skip = len(plydata['vertex']) // num_points
idx = np.arange(len(plydata['vertex']))[::skip]
if rand_offset:
rand = np.random.randint(skip)
idx[:-1] += rand
points = np.vstack([[v[0],v[1],v[2]] for v in plydata['vertex'][idx]])
if query_func is None:
return points
if scale_mats_np is not None:
point_ = (points - scale_mats_np[:3,3]) / scale_mats_np[0,0]
else:
point_ = points
batch_size = 8192
sdfs = []
for i in range(int(np.ceil(len(points) / batch_size))):
pts = torch.from_numpy(point_[i*batch_size : (i+1)*batch_size]).cuda()
sdf = -query_func(pts)
sdfs.append(sdf.cpu().numpy())
sdfs = np.hstack(sdfs)
colors = (color_map_color(sdfs * 0.5 + 0.5) * 255).astype(np.uint8)
vertexs = np.array([tuple(v) for v in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
PlyData([el]).write(os.path.join(save_root, "gt_pcd_eval.ply"))
print(">>> Points saved at {}".format(os.path.join(save_root, "gt_pcd_eval.ply")))
return
def load_point_cloud(filename=None, log_num=17, rand_offset=False, load_normal=False, save_root=''):
plydata = PlyData.read(filename)
num_points = 2 ** log_num
if log_num > 0:
skip = len(plydata['vertex']) // num_points
else:
skip = 1
idx = np.arange(len(plydata['vertex']))[::skip]
if rand_offset:
rand = np.random.randint(skip)
idx[:-1] += rand
points = np.vstack([[v[0],v[1],v[2]] for v in plydata['vertex'][idx]])
if load_normal:
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
radius = 3
# import ipdb; ipdb.set_trace()
pcd.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=radius,
max_nn=30))
normals = np.asarray(pcd.normals)
normals[:,2], normals[:,2] = normals[:,1], normals[:,2]
else:
normals = points / np.linalg.norm(points, 2, -1, True)
colors = ((normals * 0.5 + 0.5) * 255).astype(np.uint8)
vertexs = np.array([tuple(v) for v in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
PlyData([el]).write(os.path.join(save_root, "est_normal.ply"))
print(">>> Points saved at {}".format(os.path.join(save_root, "est_normal.ply")))
exit()
return
# def write_ply(points, normals=None, colors=None, save_root=''):
# # from plyfile import PlyData, PlyElement
# vertexs = np.array([tuple(v) for v in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
# if colors is not None:
# vertex_colors = np.array([tuple(v) for v in colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
# vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
# for prop in vertex_colors.dtype.names:
# vertex_all[prop] = vertex_colors[prop]
# else:
# vertex_all = np.empty(len(vertexs), vertexs.dtype.descr)
# for prop in vertexs.dtype.names:
# vertex_all[prop] = vertexs[prop]
# el = PlyElement.describe(vertex_all, 'vertex')
# PlyData([el]).write(os.path.join(save_root, "tmp.ply"))
def write_ply(points, filename, colors=None, normals=None):
vertex = np.array([tuple(p) for p in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
n = len(vertex)
desc = vertex.dtype.descr
if normals is not None:
vertex_normal = np.array([tuple(n) for n in normals], dtype=[('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4')])
assert len(vertex_normal) == n
desc = desc + vertex_normal.dtype.descr
if colors is not None:
vertex_color = np.array([tuple(c * 255) for c in colors],
dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
assert len(vertex_color) == n
desc = desc + vertex_color.dtype.descr
vertex_all = np.empty(n, dtype=desc)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
if normals is not None:
for prop in vertex_normal.dtype.names:
vertex_all[prop] = vertex_normal[prop]
if colors is not None:
for prop in vertex_color.dtype.names:
vertex_all[prop] = vertex_color[prop]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=False)
# if not os.path.exists(os.path.dirname(filename)):
# os.makedirs(os.path.dirname(filename))
ply.write(filename)
def point_cloud_from_rays(ray_pts, weights, normals):
import ipdb; ipdb.set_trace()
''' color space process methods
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class GradLayer(nn.Module):
def __init__(self, ksize=3):
super(GradLayer, self).__init__()
self.ksize = ksize
if ksize == 0:
kernel_v = np.asarray(
[[-1, 0],
[ 1, 0]])
kernel_h = np.asarray(
[[-1, 1],
[ 0, 0]])
elif ksize == 1:
kernel_v = np.asarray(
[[0, -1, 0],
[0, 0, 0],
[0, 1, 0]])
kernel_h = np.asarray(
[[0, 0, 0],
[-1, 0, 1],
[0, 0, 0]])
self.ksize = 3
elif ksize == 3:
# kernel_v = np.asarray(
# [[0, -1, 0],
# [0, 0, 0],
# [0, 1, 0]])
# kernel_h = np.asarray(
# [[0, 0, 0],
# [-1, 0, 1],
# [0, 0, 0]])
# sobel
kernel_v = np.asarray(
[[-1,-2,-1],
[0, 0, 0],
[1, 2, 1]])
kernel_h = np.asarray(
[[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
elif ksize == 5:
kernel_v = np.asarray(
[[-1, -4, -6, -4, -1],
[-2, -8, -12, -8, -2],
[ 0, 0, 0, 0, 0],
[ 2, 8, 12, 8, 2],
[ 1, 4, 6, 4, 1],
])
kernel_h = kernel_v.T
else:
raise NotImplementedError
kernel_v = torch.FloatTensor(kernel_v/np.abs(kernel_v).sum()).unsqueeze(0).unsqueeze(0)
kernel_h = torch.FloatTensor(kernel_h/np.abs(kernel_h).sum()).unsqueeze(0).unsqueeze(0)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
def get_gray(self,x):
'''
Convert image to its gray one.
'''
gray_coeffs = [65.738, 129.057, 25.064]
convert = x.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
x_gray = x.mul(convert).sum(dim=1)
return x_gray.unsqueeze(1)
def forward(self, x):
if x.shape[1] == 3:
x = self.get_gray(x)
if self.ksize == 0:
x_v = torch.zeros_like(x)
x_h = torch.zeros_like(x)
x_v[...,1:,:] = (x[...,1:,:] - x[...,:-1,:]) / 2
x_h[...,1:] = (x[...,1:] - x[...,:-1]) / 2
else:
x_v = F.conv2d(x, self.weight_v, padding=self.ksize//2)
x_h = F.conv2d(x, self.weight_h, padding=self.ksize//2)
# x = torch.sqrt(torch.pow(x_v, 2) + torch.pow(x_h, 2) + 1e-6)
x = torch.cat([x_v, x_h],1)
return x
class GaussianLayer(nn.Module):
def __init__(self, ksize=3):
super(GaussianLayer, self).__init__()
self.ksize = ksize
if ksize == 3:
kernel = np.asarray(
[[1, 2, 1],
[2, 4, 2],
[1, 2, 1]])
elif ksize == 5:
kernel = np.asarray(
[[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1],
])
else:
raise NotImplementedError
kernel = torch.FloatTensor(kernel/np.abs(kernel).sum()).unsqueeze(0).unsqueeze(0)
self.weight = nn.Parameter(data=kernel, requires_grad=False)
def forward(self, x):
x = F.conv2d(x, self.weight, padding=self.ksize//2)
return x
def _gaussian_3dconv(ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).cuda()
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
# print(kernel)
return m
class GradLoss(nn.Module):
def __init__(self, ksize=3, gaussian=True):
super(GradLoss, self).__init__()
self.loss = nn.MSELoss()
self.grad_layer = GradLayer(ksize=ksize)
self.gaussian = gaussian
if self.gaussian:
self.gaussian_layer = GaussianLayer(ksize=3)
def forward(self, output, gt_img, savedir=''):
if self.gaussian:
# output = self.gaussian_layer(output)
gt_img = self.gaussian_layer(gt_img)
output_grad = self.grad_layer(output)
gt_grad = self.grad_layer(gt_img)
loss = self.loss(output_grad, gt_grad)
if savedir:
img1 = np.concatenate([to8b(gt_img.detach().cpu().numpy())[0,0][...,None],
to8b(5*gt_grad.detach().cpu().numpy())[0,0][...,None],
to8b(5*gt_grad.detach().cpu().numpy())[0,1][...,None]], axis=1)
img2 = np.concatenate([to8b(output.detach().cpu().numpy())[0,0][...,None],
to8b(5*output_grad.detach().cpu().numpy())[0,0][...,None],
to8b(5*output_grad.detach().cpu().numpy())[0,1][...,None]], axis=1)
img8 = np.concatenate([img1, img2], axis=0)
if not os.path.exists(os.path.join(savedir, "debug_figs")):
os.mkdir(os.path.join(savedir, "debug_figs"))
imageio.imwrite(os.path.join(savedir, "debug_figs", "grad_module_{}.png".format(loss)), img8)
return loss
def rgb_to_luminance(rgb, return_chromaticity=False, gamma_correction=False, lum_avg=1):
# todo: gamma correction?
luminance = 0.299 * rgb[...,0] + 0.587 * rgb[...,1] + 0.114 * rgb[...,2]
luminance = luminance / lum_avg
if return_chromaticity:
chromaticity = rgb / (luminance[..., None] + 1e-5)
return luminance[..., None], chromaticity
return luminance[..., None]
def get_sobel(img, ksize=3, thrd=0.1, g_ksize=0, d_ksize=0, suffix='', vis=False):
if img.shape[-1] > 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_sobel_x = cv2.Sobel(img, -1, 1, 0, ksize=ksize)
img_sobel_y = cv2.Sobel(img, -1, 0, 1, ksize=ksize)
# img_sobel_xy = cv2.Sobel(img, -1, 1, 1, ksize=ksize)
absx = cv2.convertScaleAbs(img_sobel_x)
absy = cv2.convertScaleAbs(img_sobel_y)
sobel = cv2.addWeighted(absx, 0.5, absy, 0.5,0)
if g_ksize > 0:
gaussian = cv2.GaussianBlur(sobel,(g_ksize,g_ksize),0)
else:
gaussian = sobel
if vis:
titles = ['Original', 'Sobel','gaussian']
images = [img, sobel, gaussian]
for i in range(len(titles)):
plt.subplot(1, len(titles), i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.savefig("debug_figs/test_sobel_k{}_{}.jpg".format(ksize, suffix), dpi=200)
return gaussian
def calc_grad(img, delta=1, padding=None, kernel=None):
# image: (H, W, C)
grad_x = (img[delta:] - img[:-delta]) / delta
grad_y = (img[: ,delta:] - img[:, :-delta]) / delta
mid = delta // 2 + 1
if padding is None:
grad = torch.cat([grad_x[:, delta:], grad_y[delta:, :]], -1)
else:
raise NotImplementedError
return grad
''' Evaluation metrics (ssim, lpips)
'''
def rgb_ssim(img0, img1, max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
# Modified from https://github.com/google/mipnerf/blob/16e73dfdb52044dcceb47cda5243a686391a6e0f/internal/math.py#L58
assert len(img0.shape) == 3
assert img0.shape[-1] == 3
assert img0.shape == img1.shape
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((np.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = np.exp(-0.5 * f_i)
filt /= np.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return scipy.signal.convolve2d(z, f, mode='valid')
filt_fn = lambda z: np.stack([
convolve2d(convolve2d(z[...,i], filt[:, None]), filt[None, :])
for i in range(z.shape[-1])], -1)
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = np.maximum(0., sigma00)
sigma11 = np.maximum(0., sigma11)
sigma01 = np.sign(sigma01) * np.minimum(
np.sqrt(sigma00 * sigma11), np.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = np.mean(ssim_map)
return ssim_map if return_map else ssim
__LPIPS__ = {}
def init_lpips(net_name, device):
assert net_name in ['alex', 'vgg']
import lpips
print(f'init_lpips: lpips_{net_name}')
return lpips.LPIPS(net=net_name, version='0.1').eval().to(device)
def rgb_lpips(np_gt, np_im, net_name, device):
if net_name not in __LPIPS__:
__LPIPS__[net_name] = init_lpips(net_name, device)
gt = torch.from_numpy(np_gt).permute([2, 0, 1]).contiguous().to(device)
im = torch.from_numpy(np_im).permute([2, 0, 1]).contiguous().to(device)
return __LPIPS__[net_name](gt, im, normalize=True).item()
"""
Sampling strategies
"""
def up_sample(rays_o, rays_d, z_vals, sdf, n_importance, inv_s):
"""
Up sampling give a fixed inv_s
copied from neus
"""
batch_size, n_samples = z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
# ----------------------------------------------------------------------------------------------------------
# Use min value of [ cos, prev_cos ]
# Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more
# robust when meeting situations like below:
#
# SDF
# ^
# |\ -----x----...
# | \ /
# | x x
# |---\----/-------------> 0 level
# | \ /
# | \/
# |
# ----------------------------------------------------------------------------------------------------------
prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1)
cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)
cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)
cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - cos_val * dist * 0.5
next_esti_sdf = mid_sdf + cos_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)
next_cdf = torch.sigmoid(next_esti_sdf * inv_s)
alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
weights = alpha * torch.cumprod(
torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples)
u = u.expand(list(cdf.shape[:-1]) + [n_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [n_samples])
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
"""
Ref-NeRF utils
"""
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2**torch.arange(min_deg, max_deg)
shape = x.shape[:-1] + (int(x.shape[1] * scales.shape[0]),)
scaled_x = (x[..., None, :] * scales[:, None]).reshape(shape)
four_feat = torch.sin(
torch.cat([scaled_x, scaled_x + 0.5 * torch.pi], axis=-1))
if append_identity:
return torch.cat([x] + [four_feat], axis=-1)
else:
return four_feat
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / np.math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return ((-1)**m * 2**l * np.math.factorial(l) / np.math.factorial(k) /
np.math.factorial(l - k - m) *
generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l))
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return (np.sqrt(
(2.0 * l + 1.0) * np.math.factorial(l - m) /
(4.0 * np.pi * np.math.factorial(l + m))) * assoc_legendre_coeff(l, m, k))
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2**(deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = torch.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
ml_array = torch.from_numpy(ml_array).cuda()
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[..., 0:1]
y = xyz[..., 1:2]
z = xyz[..., 2:3]
# Compute z Vandermonde matrix.
vmz = torch.cat([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = torch.cat([(x + 1j * y)**m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * torch.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * torch.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return torch.cat([torch.real(ide), torch.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_enc_fn(mode, deg_view):
if mode == 'pos_enc':
def dir_enc_fn(direction, _):
return pos_enc(
direction, min_deg=0, max_deg=deg_view, append_identity=True)
return dir_enc_fn, 3 + 3 * deg_view * 2
elif mode == 'ide':
ide_dims = [4, 10, 20, 38]
return generate_ide_fn(deg_view), ide_dims[deg_view-1]
else:
raise NameError
# def generate_dir_enc_fn(deg_view):
# """Generate directional encoding (DE) function.
# Args:
# deg_view: number of spherical harmonics degrees to use.
# Returns:
# A function for evaluating directional encoding.
# """
# integrated_dir_enc_fn = generate_ide_fn(deg_view)
#
# def dir_enc_fn(xyz):
# """Function returning directional encoding (DE)."""
# return integrated_dir_enc_fn(xyz, torch.zeros_like(xyz[..., :1]))
#
# return dir_enc_fn
@torch.no_grad()
def get_surface_sliding(sdf, resolution=512, grid_boundary=[-1.1, 1.1], level=0):
avg_pool_3d = torch.nn.AvgPool3d(2, stride=2)
upsample = torch.nn.Upsample(scale_factor=2, mode='nearest')
assert resolution % 512 == 0
resN = resolution
cropN = 512
level = 0
N = resN // cropN
grid_min = [grid_boundary[0], grid_boundary[0], grid_boundary[0]]
grid_max = [grid_boundary[1], grid_boundary[1], grid_boundary[1]]
xs = np.linspace(grid_min[0], grid_max[0], N+1)
ys = np.linspace(grid_min[1], grid_max[1], N+1)
zs = np.linspace(grid_min[2], grid_max[2], N+1)
print(xs)
print(ys)
print(zs)
meshes = []
for i in range(N):
for j in range(N):
for k in range(N):
print(i, j, k)
x_min, x_max = xs[i], xs[i+1]
y_min, y_max = ys[j], ys[j+1]
z_min, z_max = zs[k], zs[k+1]
x = np.linspace(x_min, x_max, cropN)
y = np.linspace(y_min, y_max, cropN)
z = np.linspace(z_min, z_max, cropN)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float).cuda()
def evaluate(points):
z = []
for _, pnts in enumerate(torch.split(points, 100000, dim=0)):
z.append(sdf(pnts))
z = torch.cat(z, axis=0)
return z
# construct point pyramids
points = points.reshape(cropN, cropN, cropN, 3).permute(3, 0, 1, 2)
points_pyramid = [points]
for _ in range(3):
points = avg_pool_3d(points[None])[0]
points_pyramid.append(points)
points_pyramid = points_pyramid[::-1]
# evalute pyramid with mask
mask = None
threshold = 2 * (x_max - x_min)/cropN * 8
for pid, pts in enumerate(points_pyramid):
coarse_N = pts.shape[-1]
pts = pts.reshape(3, -1).permute(1, 0).contiguous()
if mask is None:
pts_sdf = evaluate(pts)
else:
mask = mask.reshape(-1)
pts_to_eval = pts[mask]
#import pdb; pdb.set_trace()
if pts_to_eval.shape[0] > 0:
pts_sdf_eval = evaluate(pts_to_eval.contiguous())
pts_sdf[mask] = pts_sdf_eval
print("ratio", pts_to_eval.shape[0] / pts.shape[0])
if pid < 3:
# update mask
mask = torch.abs(pts_sdf) < threshold
mask = mask.reshape(coarse_N, coarse_N, coarse_N)[None, None]
mask = upsample(mask.float()).bool()
pts_sdf = pts_sdf.reshape(coarse_N, coarse_N, coarse_N)[None, None]
pts_sdf = upsample(pts_sdf)
pts_sdf = pts_sdf.reshape(-1)
threshold /= 2.
z = pts_sdf.detach().cpu().numpy()
if (not (np.min(z) > level or np.max(z) < level)):
z = z.astype(np.float32)
verts, faces, normals, values = measure.marching_cubes(
volume=z.reshape(cropN, cropN, cropN), #.transpose([1, 0, 2]),
level=level,
spacing=(
(x_max - x_min)/(cropN-1),
(y_max - y_min)/(cropN-1),
(z_max - z_min)/(cropN-1) ))
print(np.array([x_min, y_min, z_min]))
print(verts.min(), verts.max())
verts = verts + np.array([x_min, y_min, z_min])
print(verts.min(), verts.max())
meshcrop = trimesh.Trimesh(verts, faces, normals)
#meshcrop.export(f"{i}_{j}_{k}.ply")
meshes.append(meshcrop)
combined = trimesh.util.concatenate(meshes)
return combined
# copy from MiDaS
def compute_scale_and_shift(prediction, target, mask):
# system matrix: A = [[a_00, a_01], [a_10, a_11]]
a_00 = torch.sum(mask * prediction * prediction, (1, 2))
a_01 = torch.sum(mask * prediction, (1, 2))
a_11 = torch.sum(mask, (1, 2))
# right hand side: b = [b_0, b_1]
b_0 = torch.sum(mask * prediction * target, (1, 2))
b_1 = torch.sum(mask * target, (1, 2))
# solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
x_0 = torch.zeros_like(b_0)
x_1 = torch.zeros_like(b_1)
det = a_00 * a_11 - a_01 * a_01
valid = det.nonzero()
x_0[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
return x_0, x_1
def reduction_batch_based(image_loss, M):
# average of all valid pixels of the batch
# avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def reduction_image_based(image_loss, M):
# mean of average of valid pixels of an image
# avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def gradient_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
diff = prediction - target
diff = torch.mul(mask, diff)
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
grad_x = torch.mul(mask_x, grad_x)
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
grad_y = torch.mul(mask_y, grad_y)
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
return reduction(image_loss, M)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
class GradientLoss(nn.Module):
def __init__(self, scales=4, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
self.__scales = scales
def forward(self, prediction, target, mask):
total = 0
for scale in range(self.__scales):
step = pow(2, scale)
total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step],
mask[:, ::step, ::step], reduction=self.__reduction)
return total
class ScaleAndShiftInvariantLoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based', ema_scale_shift=False, momentum=0.9, detach_scale_shift=False):
super().__init__()
self.__data_loss = MSELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
self.__prediction_ssi = None
self.ema_scale_shift = ema_scale_shift
self.detach_scale_shift = detach_scale_shift
self.momentum = momentum
if self.ema_scale_shift:
self.register_buffer('scale', torch.tensor([0]).float())
self.register_buffer('shift', torch.tensor([0]).float())
def forward(self, prediction, target, mask, share_scale_shift=False):
if share_scale_shift:
prediction_ = prediction.view(1, -1, prediction.size(-1))
target_ = target.view(1, -1, target.size(-1))
mask_ = mask.view(1, -1, mask.size(-1))
scale_, shift_ = compute_scale_and_shift(prediction_, target_, mask_)
if self.detach_scale_shift:
scale_ = scale_.detach()
shift_ = shift_.detach()
if self.ema_scale_shift:
if self.scale.item() == 0:
self.scale.data = scale_
if self.shift.item() == 0:
self.shift.data = shift_
self.scale.data = self.momentum * self.scale.data + (1 - self.momentum) * scale_
self.shift.data = self.momentum * self.shift.data + (1 - self.momentum) * shift_
scale = self.scale.expand(prediction.size(0))
shift = self.shift.expand(prediction.size(0))
else:
scale = scale_.expand(prediction.size(0))
shift = shift_.expand(prediction.size(0))
else:
scale, shift = compute_scale_and_shift(prediction, target, mask)
self.__prediction_ssi = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)
total = self.__data_loss(self.__prediction_ssi, target, mask)
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.__prediction_ssi, target, mask)
return total
def __get_prediction_ssi(self):
return self.__prediction_ssi
prediction_ssi = property(__get_prediction_ssi)
# end copy | 45,758 | 36.848635 | 132 | py |
Voxurf | Voxurf-main/lib/ref_utils.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
# from . import math
# import jax.numpy as jnp
import numpy as np
import torch
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return 2.0 * jnp.sum(
normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs
def l2_normalize(x, eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis."""
return x / jnp.sqrt(jnp.maximum(jnp.sum(x**2, axis=-1, keepdims=True), eps))
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
one_eps = 1 - jnp.finfo(jnp.float32).eps
return (weights * jnp.arccos(
jnp.clip((normals * normals_gt).sum(-1), -one_eps,
one_eps))).sum() / weights.sum() * 180.0 / jnp.pi
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / np.math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return ((-1)**m * 2**l * np.math.factorial(l) / np.math.factorial(k) /
np.math.factorial(l - k - m) *
generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l))
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return (np.sqrt(
(2.0 * l + 1.0) * np.math.factorial(l - m) /
(4.0 * np.pi * np.math.factorial(l + m))) * assoc_legendre_coeff(l, m, k))
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2**(deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[..., 0:1]
y = xyz[..., 1:2]
z = xyz[..., 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y)**m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, torch.zeros_like(xyz[..., :1]))
return dir_enc_fn | 6,003 | 35.609756 | 84 | py |
Voxurf | Voxurf-main/lib/dtu_eval.py | # NeuralWarp All rights reseved to Thales LAS and ENPC.
#
# This code is freely available for academic use only and Provided “as is” without any warranty.
#
# Modification are allowed for academic research provided that the following conditions are met :
# * Redistributions of source code or any format must retain the above copyright notice and this list of conditions.
# * Neither the name of Thales LAS and ENPC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# adapted from https://github.com/jzhangbs/DTUeval-python
import numpy as np
import sklearn.neighbors as skln
from tqdm import tqdm
from scipy.io import loadmat
import multiprocessing as mp
import trimesh
import os
def sample_single_tri(input_):
n1, n2, v1, v2, tri_vert = input_
c = np.mgrid[:n1 + 1, :n2 + 1]
c += 0.5
c[0] /= max(n1, 1e-7)
c[1] /= max(n2, 1e-7)
c = np.transpose(c, (1, 2, 0))
k = c[c.sum(axis=-1) < 1] # m2
q = v1 * k[:, :1] + v2 * k[:, 1:] + tri_vert
return q
def write_vis_pcd(file, points, colors):
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors)
o3d.io.write_point_cloud(file, pcd)
def eval(in_file, scene, eval_dir, dataset_dir=os.path.join('data', 'DTU'), suffix="", max_dist = 20, use_o3d=False, runtime=False):
# default dtu values
scene = int(scene)
patch = 60
thresh = 0.2 # downsample density
# max_dist = 20
if runtime:
thresh = 0.5
pbar = tqdm(total=9)
pbar.set_description('read data mesh')
if use_o3d:
import open3d as o3d
# a liiiittle bit difference in two loaders, which could be ignored temporally
# use open3d
data_mesh = o3d.io.read_triangle_mesh(str(in_file))
data_mesh.remove_unreferenced_vertices()
mp.freeze_support()
vertices = np.asarray(data_mesh.vertices)
triangles = np.asarray(data_mesh.triangles)
tri_vert = vertices[triangles]
else:
# use trimesh
data_mesh = trimesh.load(in_file)
data_mesh.remove_unreferenced_vertices()
vertices = np.asarray(data_mesh.vertices)
triangles = np.asarray(data_mesh.faces)
tri_vert = vertices[triangles]
pbar.update(1)
pbar.set_description('sample pcd from mesh')
v1 = tri_vert[:, 1] - tri_vert[:, 0]
v2 = tri_vert[:, 2] - tri_vert[:, 0]
l1 = np.linalg.norm(v1, axis=-1, keepdims=True)
l2 = np.linalg.norm(v2, axis=-1, keepdims=True)
area2 = np.linalg.norm(np.cross(v1, v2), axis=-1, keepdims=True)
non_zero_area = (area2 > 0)[:, 0]
l1, l2, area2, v1, v2, tri_vert = [
arr[non_zero_area] for arr in [l1, l2, area2, v1, v2, tri_vert]
]
thr = thresh * np.sqrt(l1 * l2 / area2)
n1 = np.floor(l1 / thr)
n2 = np.floor(l2 / thr)
with mp.Pool() as mp_pool:
new_pts = mp_pool.map(sample_single_tri,
((n1[i, 0], n2[i, 0], v1[i:i + 1], v2[i:i + 1], tri_vert[i:i + 1, 0]) for i in
range(len(n1))), chunksize=1024)
new_pts = np.concatenate(new_pts, axis=0)
data_pcd = np.concatenate([vertices, new_pts], axis=0)
pbar.update(1)
pbar.set_description('random shuffle pcd index')
shuffle_rng = np.random.default_rng()
shuffle_rng.shuffle(data_pcd, axis=0)
pbar.update(1)
pbar.set_description('downsample pcd')
nn_engine = skln.NearestNeighbors(n_neighbors=1, radius=thresh, algorithm='kd_tree', n_jobs=-1)
nn_engine.fit(data_pcd)
rnn_idxs = nn_engine.radius_neighbors(data_pcd, radius=thresh, return_distance=False)
mask = np.ones(data_pcd.shape[0], dtype=np.bool_)
for curr, idxs in enumerate(rnn_idxs):
if mask[curr]:
mask[idxs] = 0
mask[curr] = 1
data_down = data_pcd[mask]
trimesh.PointCloud(data_down).export("tmp.ply", "ply")
pbar.update(1)
pbar.set_description('masking data pcd')
obs_mask_file = loadmat(os.path.join(dataset_dir, 'ObsMask', f'ObsMask{scene}_10.mat'))
ObsMask, BB, Res = [obs_mask_file[attr] for attr in ['ObsMask', 'BB', 'Res']]
BB = BB.astype(np.float32)
inbound = ((data_down >= BB[:1] - patch) & (data_down < BB[1:] + patch * 2)).sum(axis=-1) == 3
data_in = data_down[inbound]
data_grid = np.around((data_in - BB[:1]) / Res).astype(np.int32)
grid_inbound = ((data_grid >= 0) & (data_grid < np.expand_dims(ObsMask.shape, 0))).sum(axis=-1) == 3
data_grid_in = data_grid[grid_inbound]
in_obs = ObsMask[data_grid_in[:, 0], data_grid_in[:, 1], data_grid_in[:, 2]].astype(np.bool_)
data_in_obs = data_in[grid_inbound][in_obs]
pbar.update(1)
pbar.set_description('read STL pcd')
if use_o3d:
# use open3d
stl_pcd = o3d.io.read_point_cloud(os.path.join(dataset_dir, 'Points', 'stl', f'stl{scene:03}_total.ply'))
stl = np.asarray(stl_pcd.points)
else:
# use trimesh
stl = trimesh.load(os.path.join(dataset_dir, 'Points', 'stl', f'stl{scene:03}_total.ply'))
if runtime:
num_gt = data_in_obs.shape[0] * 2
skip = stl.shape[0] // num_gt
stl = stl[::skip]
else:
stl = stl[::1]
pbar.update(1)
pbar.set_description('compute data2stl')
nn_engine.fit(stl)
dist_d2s, idx_d2s = nn_engine.kneighbors(data_in_obs, n_neighbors=1, return_distance=True)
mean_d2s = dist_d2s[dist_d2s < max_dist].mean()
pbar.update(1)
pbar.set_description('compute stl2data')
ground_plane = loadmat(os.path.join(dataset_dir, 'ObsMask', f'Plane{scene}.mat'))['P']
stl_hom = np.concatenate([stl, np.ones_like(stl[:, :1])], -1)
above = (ground_plane.reshape((1, 4)) * stl_hom).sum(-1) > 0
stl_above = stl[above]
nn_engine.fit(data_in)
dist_s2d, idx_s2d = nn_engine.kneighbors(stl_above, n_neighbors=1, return_distance=True)
mean_s2d = dist_s2d[dist_s2d < max_dist].mean()
pbar.update(1)
pbar.set_description('visualize error')
vis_dist = 1
R = np.array([[1, 0, 0]], dtype=np.float64)
G = np.array([[0, 1, 0]], dtype=np.float64)
B = np.array([[0, 0, 1]], dtype=np.float64)
W = np.array([[1, 1, 1]], dtype=np.float64)
data_color = np.tile(B, (data_down.shape[0], 1))
data_alpha = dist_d2s.clip(max=vis_dist) / vis_dist
data_color[np.where(inbound)[0][grid_inbound][in_obs]] = R * data_alpha + W * (1 - data_alpha)
data_color[np.where(inbound)[0][grid_inbound][in_obs][dist_d2s[:, 0] >= max_dist]] = G
stl_color = np.tile(B, (stl.shape[0], 1))
stl_alpha = dist_s2d.clip(max=vis_dist) / vis_dist
stl_color[np.where(above)[0]] = R * stl_alpha + W * (1 - stl_alpha)
stl_color[np.where(above)[0][dist_s2d[:, 0] >= max_dist]] = G
if use_o3d:
write_vis_pcd(f'{eval_dir}/vis_{scene:03}_d2s{suffix}.ply', data_down, data_color)
write_vis_pcd(f'{eval_dir}/vis_{scene:03}_s2d{suffix}.ply', stl, stl_color)
pbar.update(1)
pbar.close()
over_all = (mean_d2s + mean_s2d) / 2
# print(" [ d2s: {:.3f} | s2d: {:.3f} | mean: {:.3f} ]".format(mean_d2s, mean_s2d, over_all))
with open(f'{eval_dir}/result{suffix}.txt', 'w') as f:
f.write(f'{mean_d2s} {mean_s2d} {over_all}')
return mean_d2s, mean_s2d, over_all
| 7,399 | 38.361702 | 193 | py |
Voxurf | Voxurf-main/lib/load_co3d.py | import os
import json
import gzip
import glob
import torch
import numpy as np
import imageio
import torch.nn.functional as F
import cv2
def load_co3d_data(cfg):
# load meta
with gzip.open(cfg.annot_path, 'rt', encoding='utf8') as zipfile:
annot = [v for v in json.load(zipfile) if v['sequence_name'] == cfg.sequence_name]
with open(cfg.split_path) as f:
split = json.load(f)
train_im_path = set()
test_im_path = set()
for k, lst in split.items():
for v in lst:
if v[0] == cfg.sequence_name:
if 'known' in k:
train_im_path.add(v[-1])
else:
test_im_path.add(v[-1])
assert len(annot) == len(train_im_path) + len(test_im_path), 'Mismatch: '\
f'{len(annot)} == {len(train_im_path) + len(test_im_path)}'
# load datas
imgs = []
masks = []
poses = []
Ks = []
i_split = [[], []]
remove_empty_masks_cnt = [0, 0]
for i, meta in enumerate(annot):
im_fname = meta['image']['path']
assert im_fname in train_im_path or im_fname in test_im_path
sid = 0 if im_fname in train_im_path else 1
if meta['mask']['mass'] == 0:
remove_empty_masks_cnt[sid] += 1
continue
im_path = os.path.join(cfg.datadir, im_fname)
mask_path = os.path.join(cfg.datadir, meta['mask']['path'])
mask = imageio.imread(mask_path) / 255.
if mask.max() < 0.5:
remove_empty_masks_cnt[sid] += 1
continue
Rt = np.concatenate([meta['viewpoint']['R'], np.array(meta['viewpoint']['T'])[:,None]], 1)
pose = np.linalg.inv(np.concatenate([Rt, [[0,0,0,1]]]))
imgs.append(imageio.imread(im_path) / 255.)
masks.append(mask)
poses.append(pose)
assert imgs[-1].shape[:2] == tuple(meta['image']['size'])
half_image_size_wh = np.float32(meta['image']['size'][::-1]) * 0.5
principal_point = np.float32(meta['viewpoint']['principal_point'])
focal_length = np.float32(meta['viewpoint']['focal_length'])
principal_point_px = -1.0 * (principal_point - 1.0) * half_image_size_wh
focal_length_px = focal_length * half_image_size_wh
Ks.append(np.array([
[focal_length_px[0], 0, principal_point_px[0]],
[0, focal_length_px[1], principal_point_px[1]],
[0, 0, 1],
]))
i_split[sid].append(len(imgs)-1)
if sum(remove_empty_masks_cnt) > 0:
print('load_co3d_data: removed %d train / %d test due to empty mask' % tuple(remove_empty_masks_cnt))
print(f'load_co3d_data: num images {len(i_split[0])} train / {len(i_split[1])} test')
imgs = np.array(imgs)
masks = np.array(masks)
poses = np.stack(poses, 0)
Ks = np.stack(Ks, 0)
render_poses = poses[i_split[-1]]
i_split.append(i_split[-1])
# visyalization hwf
H, W = np.array([im.shape[:2] for im in imgs]).mean(0).astype(int)
focal = Ks[:,[0,1],[0,1]].mean()
return imgs, masks, poses, render_poses, [H, W, focal], Ks, i_split
| 3,135 | 35.465116 | 109 | py |
Voxurf | Voxurf-main/lib/load_tankstemple.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
def load_tankstemple_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
""" delete single side """
ref_pos = poses[0][:,-1]
dist = ((poses[:,:,-1] - ref_pos[None]) ** 2).sum(-1)
i_select = np.argsort(dist)[:50]
i_split[0] = i_select.tolist()
path_traj = os.path.join(basedir, 'test_traj.txt')
if os.path.isfile(path_traj):
render_poses = torch.Tensor(np.loadtxt(path_traj).reshape(-1,4,4).astype(np.float32))
else:
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split
def normalize(x):
return x / np.linalg.norm(x)
def load_tankstemple_data_bound(basedir, movie_render_kwargs={}):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
### generate spiral poses for rendering fly-through movie
centroid = poses[:,:3,3].mean(0)
radcircle = movie_render_kwargs.get('scale_r', 1.0) * np.linalg.norm(poses[:,:3,3] - centroid, axis=-1).mean()
centroid[0] += movie_render_kwargs.get('shift_x', 0)
centroid[1] += movie_render_kwargs.get('shift_y', 0)
centroid[2] += movie_render_kwargs.get('shift_z', 0)
new_up_rad = movie_render_kwargs.get('pitch_deg', 0) * np.pi / 180
target_y = radcircle * np.tan(new_up_rad)
render_poses = []
for th in np.linspace(0., 2.*np.pi, 200):
camorigin = np.array([radcircle * np.cos(th), 0, radcircle * np.sin(th)])
if movie_render_kwargs.get('flip_up_vec', False):
up = np.array([0,-1.,0])
else:
up = np.array([0,1.,0])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin + centroid
# rotate to align with new pitch rotation
lookat = -vec2
lookat[1] = target_y
lookat = normalize(lookat)
lookat *= -1
vec2 = -lookat
vec1 = normalize(np.cross(vec2, vec0))
p = np.stack([vec0, vec1, vec2, pos], 1)
render_poses.append(p)
render_poses = np.stack(render_poses, 0)
render_poses = np.concatenate([render_poses, np.broadcast_to(poses[0,:3,-1:], render_poses[:,:3,-1:].shape)], -1)
return imgs, poses, render_poses, [H, W, focal], K, i_split
| 3,813 | 32.752212 | 117 | py |
Voxurf | Voxurf-main/lib/load_scannet.py | import os
import torch
import torch.nn.functional as F
import numpy as np
from glob import glob
import cv2
import random
import imageio
import skimage
def load_rgb(path, normalize_rgb = False):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
# if normalize_rgb: # [-1,1] --> [0,1]
# img -= 0.5
# img *= 2.
return img
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv2.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K/K[2,2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3,3] = (t[:3] / t[3])[:,0]
return intrinsics, pose
def glob_imgs(path):
imgs = []
for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:
imgs.extend(glob(os.path.join(path, ext)))
return imgs
def glob_data(data_dir):
data_paths = []
data_paths.extend(glob(data_dir))
data_paths = sorted(data_paths)
return data_paths
def load_scannet_data(data_dir, img_res=[384, 384], center_crop_type='no_crop', use_mask=False, num_views=-1):
# instance_dir = os.path.join(data_dir, 'scan{0}'.format(scan_id))
instance_dir = data_dir
total_pixels = img_res[0] * img_res[1]
img_res = img_res
num_views = num_views
assert num_views in [-1, 3, 6, 9]
assert os.path.exists(instance_dir), "Data directory is empty"
image_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_rgb.png"))
depth_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_depth.npy"))
normal_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_normal.npy"))
# mask is only used in the replica dataset as some monocular depth predictions have very large error and we ignore it
if use_mask:
mask_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_mask.npy"))
else:
mask_paths = None
n_images = len(image_paths)
cam_file = '{0}/cameras.npz'.format(instance_dir)
camera_dict = np.load(cam_file)
scale_mats = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
world_mats = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
# cam_file_2 = '{0}/cameras_sphere.npz'.format(instance_dir)
# camera_dict_2 = np.load(cam_file_2)
# scale_mats_2 = [camera_dict_2['scale_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
# world_mats_2 = [camera_dict_2['world_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
# for i in range(n_images):
# assert np.sum(np.abs(scale_mats[i] - scale_mats_2[i])) == 0
# assert np.sum(np.abs(world_mats[i] - world_mats_2[i])) == 0
intrinsics_all = []
pose_all = []
for scale_mat, world_mat in zip(scale_mats, world_mats):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
# because we do resize and center crop 384x384 when using omnidata model, we need to adjust the camera intrinsic accordingly
if center_crop_type == 'center_crop_for_replica':
scale = 384 / 680
offset = (1200 - 680 ) * 0.5
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'center_crop_for_tnt':
scale = 384 / 540
offset = (960 - 540) * 0.5
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'center_crop_for_dtu':
scale = 384 / 1200
offset = (1600 - 1200) * 0.5
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'padded_for_dtu':
scale = 384 / 1200
offset = 0
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'no_crop': # for scannet dataset, we already adjust the camera intrinsic duing preprocessing so nothing to be done here
pass
else:
raise NotImplementedError
intrinsics_all.append(intrinsics)
pose_all.append(pose)
rgb_images = []
for path in image_paths:
rgb = load_rgb(path)
rgb_images.append(rgb)
imgs = np.stack(rgb_images, 0)
poses = np.stack(pose_all, 0)
K = np.stack(intrinsics_all, 0)
K = intrinsics_all[0]
H, W = imgs[0].shape[:2]
focal = intrinsics_all[0][0,0]
depth_images = []
normal_images = []
for dpath, npath in zip(depth_paths, normal_paths):
depth = np.load(dpath)
depth_images.append(depth)
normal = np.load(npath)
# important as the output of omnidata is normalized
normal = normal * 2. - 1.
normal = np.transpose(normal, (1,2,0))
normal_images.append(normal)
depth_images = np.stack(depth_images, 0)
normal_images = np.stack(normal_images, 0)
# load mask
mask_images = []
if mask_paths is None:
for rgb in rgb_images:
mask = np.ones_like(rgb[:, :, :1])
mask_images.append(mask)
else:
for path in mask_paths:
mask = np.load(path)
mask_images.append(mask)
masks = np.stack(mask_images, 0)
i_split = [np.array(np.arange(len(imgs))), np.array(np.arange(0, len(imgs), 10)), np.array(np.arange(0, len(imgs), 10))]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split, scale_mats[0], masks, depth_images, normal_images
# if __name__ == "__main__":
# load_scannet_data('/mnt/petrelfs/wangjiaqi/VoxurF-new/data/scannet/scan1/')
| 6,003 | 31.106952 | 153 | py |
Voxurf | Voxurf-main/lib/voxurf_fine.py | import os
import time
import numpy as np
from copy import deepcopy
import cv2
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
from torch_scatter import segment_coo
from . import grid
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in [os.path.join('cuda', 'render_utils.cpp'), os.path.join('cuda', 'render_utils_kernel.cu')]],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4,
center_sdf=False, grad_feat=(1.0,), sdf_feat=(),
use_layer_norm=False,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_sdf=False,
smooth_ksize=0, smooth_sigma=1,
k_rgbnet_depth=3, k_res=False, k_posbase_pe=5, k_viewbase_pe=4,
k_center_sdf=False, k_grad_feat=(1.0,), k_sdf_feat=(),
smooth_scale=True, use_grad_norm=True,
use_rgb_k=True, k_detach_1=True, k_detach_2=True,
use_rgbnet_k0=False,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.nearest = nearest
self.smooth_scale = smooth_scale
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.smooth_sdf = smooth_sdf
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
# init density voxel grid
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
if self.sdf_init_mode == "ball_init":
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
x, y, z = np.mgrid[-1.0:1.0:self.world_size[0].item() * 1j, -1.0:1.0:self.world_size[1].item() * 1j, -1.0:1.0:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 - 1).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
if rgbnet_dim <= 0:
# color voxel grid (dvgo coarse stage)
self.k0_dim = 3
self.rgbnet = None
else:
self.k0_dim = rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
dim0 = (3+3*posbase_pe*2) + (3+3*viewbase_pe*2)
self.use_grad_norm = use_grad_norm
self.center_sdf = center_sdf
self.grad_feat = grad_feat
self.sdf_feat = sdf_feat
self.use_rgb_k = use_rgb_k
self.k_detach_1 = k_detach_1
self.k_detach_2 = k_detach_2
self.use_rgbnet_k0 = use_rgbnet_k0
self.use_layer_norm = use_layer_norm
dim0 += len(self.grad_feat) * 3
dim0 += len(self.sdf_feat) * 6
if self.use_rgbnet_k0:
dim0 += self.k0_dim
if self.center_sdf:
dim0 += 1
if not self.use_layer_norm:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
# the second rgb net
self.k_res = k_res
self.k_center_sdf = k_center_sdf
self.k_grad_feat = k_grad_feat
self.k_sdf_feat = k_sdf_feat
self.register_buffer('k_posfreq', torch.FloatTensor([(2**i) for i in range(k_posbase_pe)]))
self.register_buffer('k_viewfreq', torch.FloatTensor([(2**i) for i in range(k_viewbase_pe)]))
k_dim0 = (3+3*k_posbase_pe*2) + (3+3*k_viewbase_pe*2) + self.k0_dim
if self.k_res:
k_dim0 += 3
if self.k_center_sdf:
k_dim0 += 1
k_dim0 += len(self.k_grad_feat) * 3
k_dim0 += len(self.k_sdf_feat) * 6
if not self.use_layer_norm:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('k_rgbnet mlp', self.k_rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
# grad conv to calculate gradient
self.init_gradient_conv()
self.grad_mode = grad_mode
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
# fixme: a better operator?
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
self.mask_kernel = weight.view(1, -1).float().cuda()
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
return m
def init_smooth_conv_test_k3(self, ksize=3, sigma=0.4):
self.smooth_conv_test_k3 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv_test_k5(self, ksize=5, sigma=0.4):
self.smooth_conv_test_k5 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_feature_smooth_conv(self, ksize=3, sigma=1):
self.smooth_feature = ksize > 0
if self.smooth_feature:
self.feature_smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init feature smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_sdf_from_sdf(self, sdf0=None, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from sdf" + " -"*3, "\n")
if sdf0.shape != self.sdf.grid.shape:
sdf0 = F.interpolate(sdf0, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(sdf0 / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data).to(self.sdf.grid) / reduce
else:
self.sdf.grid.data = sdf0.to(self.sdf.grid) / reduce # + self.act_shift
if self.mask_cache is not None:
self._set_nonempty_mask()
if self.smooth_scale:
m = self._gaussian_3dconv(ksize=5, sigma=1)
with torch.no_grad():
self.sdf.grid = torch.nn.Parameter(m(self.sdf.grid.data)).cuda()
self.gradient = self.neus_sdf_gradient()
def init_sdf_from_density(self, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from density" + " -"*3, "\n")
self.s = torch.nn.Parameter(torch.ones(1)) * 10
if zero2neg:
self.density.data[self.density.data==0] = -100
if self.density.shape != self.sdf.grid.shape:
self.density.data = F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(-torch.tanh(self.density.data) / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data)
else:
self.sdf.grid.data = -torch.tanh(self.density.data) / reduce # + self.act_shift
self.gradient = self.neus_sdf_gradient()
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dvgo: voxel_size ', self.voxel_size)
print('dvgo: world_size ', self.world_size)
print('dvgo: voxel_size_base ', self.voxel_size_base)
print('dvgo: voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'grad_feat': self.grad_feat,
'sdf_feat': self.sdf_feat,
'k_grad_feat': self.k_grad_feat,
'k_sdf_feat': self.k_sdf_feat,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
self.density[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
self.density[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dvgo: scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
self.density = torch.nn.Parameter(
F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
self.sdf.scale_volume_grid(self.world_size)
self.k0.scale_volume_grid(self.world_size)
if self.mask_cache is not None:
self._set_nonempty_mask()
print('dvgo: scale_volume_grid finish')
def density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.density.total_variation_add_grad(w, w, w, dense_mode)
def sdf_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.sdf.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.k0.total_variation_add_grad(w, w, w, dense_mode)
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, grad_tv=0, smooth_sdf_tv=0):
t1 = time.time()
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, self.nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[self.nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
if self.rgbnet is not None:
v = self.k0
else:
v = torch.sigmoid(self.k0)
tv = 0
if k0_tv > 0:
tv += total_variation(v, self.nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(-F.softplus(density + self.act_shift) * interval)
def neus_sdf_gradient(self, mode=None, sdf=None):
# the gradient grid from the sdf grid
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
# force s_val value to change with global step
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# dist is a constant in this impelmentation
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, sample_ret=True, sample_grad=False, displace=0.1, smooth=False):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
if smooth:
grid = self.smooth_conv(grids[0])
grids[0] = grid
outs = []
if sample_ret:
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid = grids[0]
ret = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners).reshape(
grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze(-1)
outs.append(ret)
if sample_grad:
grid = grids[0]
feat, grad = self.sample_sdfs(xyz, grid, displace_list=[1.0], use_grad_norm=False)
feat = torch.cat([feat[:, 4:6], feat[:, 2:4], feat[:, 0:2]], dim=-1)
grad = torch.cat([grad[:, [2]], grad[:, [1]], grad[:, [0]]], dim=-1)
outs.append(grad)
outs.append(feat)
if len(outs) == 1:
return outs[0]
else:
return outs
def sample_sdfs(self, xyz, *grids, displace_list, mode='bilinear', align_corners=True, use_grad_norm=False):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
grid = grids[0]
# ind from xyz to zyx !!!!!
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid_size = grid.size()[-3:]
size_factor_zyx = torch.tensor([grid_size[2], grid_size[1], grid_size[0]]).cuda()
ind = ((ind_norm + 1) / 2) * (size_factor_zyx - 1)
offset = torch.tensor([[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1]]).cuda()
displace = torch.tensor(displace_list).cuda()
offset = offset[:, None, :] * displace[None, :, None]
all_ind = ind.unsqueeze(-2) + offset.view(-1, 3)
all_ind = all_ind.view(1, 1, 1, -1, 3)
all_ind[..., 0] = all_ind[..., 0].clamp(min=0, max=size_factor_zyx[0] - 1)
all_ind[..., 1] = all_ind[..., 1].clamp(min=0, max=size_factor_zyx[1] - 1)
all_ind[..., 2] = all_ind[..., 2].clamp(min=0, max=size_factor_zyx[2] - 1)
all_ind_norm = (all_ind / (size_factor_zyx-1)) * 2 - 1
feat = F.grid_sample(grid, all_ind_norm, mode=mode, align_corners=align_corners)
all_ind = all_ind.view(1, 1, 1, -1, 6, len(displace_list), 3)
diff = all_ind[:, :, :, :, 1::2, :, :] - all_ind[:, :, :, :, 0::2, :, :]
diff, _ = diff.max(dim=-1)
feat_ = feat.view(1, 1, 1, -1, 6, len(displace_list))
feat_diff = feat_[:, :, :, :, 1::2, :] - feat_[:, :, :, :, 0::2, :]
grad = feat_diff / diff / self.voxel_size
feat = feat.view(shape[-1], 6, len(displace_list))
grad = grad.view(shape[-1], 3, len(displace_list))
if use_grad_norm:
grad = grad / (grad.norm(dim=1, keepdim=True) + 1e-5)
feat = feat.view(shape[-1], 6 * len(displace_list))
grad = grad.view(shape[-1], 3 * len(displace_list))
return feat, grad
def hit_coarse_geo(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):
'''Check whether the rays hit the solved coarse geometry or not'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
shape = rays_o.shape[:-1]
rays_o = rays_o.reshape(-1, 3).contiguous()
rays_d = rays_d.reshape(-1, 3).contiguous()
stepdist = stepsize * self.voxel_size
ray_pts, mask_outbbox, ray_id = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)[:3]
mask_inbbox = ~mask_outbbox
hit = torch.zeros([len(rays_o)], dtype=torch.bool)
hit[ray_id[mask_inbbox][self.mask_cache(ray_pts[mask_inbbox])]] = 1
return hit.reshape(shape)
def sample_ray(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
stepdist = stepsize * self.voxel_size
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
mask_inbbox = ~mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
ret_dict = {}
N = len(rays_o)
ray_pts, ray_id, step_id, mask_outbbox, N_steps = self.sample_ray(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
# interval = render_kwargs['stepsize'] * self.voxel_size_ratio
gradient, gradient_error = None, None
if self.mask_cache is not None:
mask = self.mask_cache(ray_pts)
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
mask_outbbox[~mask_outbbox] |= ~mask
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
sdf, gradient, feat = self.grid_sampler(ray_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, alpha = self.neus_alpha_from_sdf_scatter(viewdirs, ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
mask = None
if self.fast_color_thres > 0:
mask = (alpha > self.fast_color_thres)
alpha = alpha[mask]
ray_id = ray_id[mask]
ray_pts = ray_pts[mask]
step_id = step_id[mask]
gradient = gradient[mask] # merge to sample once
sdf = sdf[mask]
# compute accumulated transmittance
if ray_id.ndim == 2:
print(mask, alpha, ray_id)
mask = mask.squeeze()
alpha = alpha.squeeze()
ray_id = ray_id.squeeze()
ray_pts = ray_pts.squeeze()
step_id = step_id.squeeze()
gradient = gradient.squeeze()
sdf = sdf.squeeze()
weights, alphainv_last = Alphas2Weights.apply(alpha, ray_id, N)
if self.fast_color_thres > 0:
mask = (weights > self.fast_color_thres)
weights = weights[mask]
alpha = alpha[mask]
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
gradient = gradient[mask]
sdf = sdf[mask]
k0 = self.k0(ray_pts)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(ray_pts, sdf_grid, displace_list=all_grad_inds_, use_grad_norm=self.use_grad_norm)
else:
all_feat, all_grad = None, None
self.gradient = self.neus_sdf_gradient()
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat([viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
if self.use_rgbnet_k0:
rgb_feat = torch.cat([
k0, xyz_emb, viewdirs_emb.flatten(0, -2)[ray_id]
], -1)
else:
rgb_feat = torch.cat([
xyz_emb, viewdirs_emb.flatten(0, -2)[ray_id]
], -1)
hierarchical_feats = []
if self.center_sdf:
hierarchical_feats.append(sdf[:, None])
if len(all_grad_inds) > 0:
hierarchical_feats.append(all_feat)
hierarchical_feats.append(all_grad)
if len(hierarchical_feats) > 0:
rgb_feat = torch.cat([rgb_feat, *hierarchical_feats], dim=-1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
if self.use_rgb_k:
k_xyz_emb = (rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(-2)
k_xyz_emb = torch.cat([rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()], -1)
k_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(-2)
k_viewdirs_emb = torch.cat([viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat = torch.cat([
k0, k_xyz_emb, k_viewdirs_emb.flatten(0, -2)[ray_id]
], -1)
assert len(self.k_grad_feat) == 1 and self.k_grad_feat[0] == 1.0
assert len(self.k_sdf_feat) == 0
all_feats_ = [gradient]
if self.k_center_sdf:
all_feats_.append(sdf[:, None])
if len(all_feats_) > 0:
all_feats_ = torch.cat(all_feats_, dim=-1)
k_rgb_feat = torch.cat([k_rgb_feat, all_feats_], dim=-1)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat.detach()], dim=-1)
else:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat], dim=-1)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + self.k_rgbnet(k_rgb_feat)
else:
k_rgb_logit = rgb_logit + self.k_rgbnet(k_rgb_feat)
k_rgb = torch.sigmoid(k_rgb_logit)
k_rgb_marched = segment_coo(
src=(weights.unsqueeze(-1) * k_rgb),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum') + alphainv_last[..., None] * render_kwargs['bg']
k_rgb_marched = k_rgb_marched.clamp(0, 1)
else:
k_rgb_marched = None
# Ray marching
rgb_marched = segment_coo(
src=(weights.unsqueeze(-1) * rgb),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum') + alphainv_last[..., None] * render_kwargs['bg']
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(weights.unsqueeze(-1) * normal),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
if render_kwargs.get('render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(weights * step_id * dist), index=ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = 0
ret_dict.update({
'alphainv_cum': alphainv_last,
'weights': weights,
'rgb_marched': rgb_marched,
# 'k_rgb_marched': k_rgb_marched,
'normal_marched': normal_marched,
'raw_alpha': alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"gradient_error": gradient_error,
"s_val": s_val,
})
if self.use_rgb_k:
ret_dict.update({
'rgb_marched': k_rgb_marched,
'rgb_marched0': rgb_marched,
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
sdf_grid = self.smooth_conv(
self.sdf.grid) if self.smooth_sdf else self.sdf.grid
# self.gradient = self.neus_sdf_gradient()
sdf, gradient, feat = self.grid_sampler(ray_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
k0 = self.k0(ray_pts)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(ray_pts, sdf_grid,
displace_list=all_grad_inds_,
use_grad_norm=self.use_grad_norm)
else:
all_feat, all_grad = None, None
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
if self.use_rgbnet_k0:
rgb_feat = torch.cat([
k0, xyz_emb, viewdirs_emb.flatten(0, -2)
], -1)
else:
rgb_feat = torch.cat([
xyz_emb, viewdirs_emb.flatten(0, -2)
], -1)
hierarchical_feats = []
if self.center_sdf:
hierarchical_feats.append(sdf[:, None])
if len(all_grad_inds) > 0:
hierarchical_feats.append(all_feat)
hierarchical_feats.append(all_grad)
if len(hierarchical_feats) > 0:
rgb_feat = torch.cat([rgb_feat, *hierarchical_feats], dim=-1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
if self.use_rgb_k:
k_xyz_emb = (rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(-2)
k_xyz_emb = torch.cat([rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()],
-1)
k_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(
-2)
k_viewdirs_emb = torch.cat(
[viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat = torch.cat([
k0, k_xyz_emb, k_viewdirs_emb.flatten(0, -2)
], -1)
assert len(self.k_grad_feat) == 1 and self.k_grad_feat[0] == 1.0
assert len(self.k_sdf_feat) == 0
all_feats_ = [gradient]
if self.k_center_sdf:
all_feats_.append(sdf[:, None])
if len(all_feats_) > 0:
all_feats_ = torch.cat(all_feats_, dim=-1)
k_rgb_feat = torch.cat([k_rgb_feat, all_feats_], dim=-1)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat.detach()], dim=-1)
else:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat], dim=-1)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + self.k_rgbnet(k_rgb_feat)
else:
k_rgb_logit = rgb_logit + self.k_rgbnet(k_rgb_feat)
rgb = torch.sigmoid(k_rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, smooth=True, sigma=0.5, **kwargs):
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
if smooth:
self.init_smooth_conv_test_k3(sigma=sigma)
sdf_grid = self.smooth_conv_test_k3(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
query_func = lambda pts: self.grid_sampler(pts, - sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def cumprod_exclusive(p):
# Not sure why: it will be slow at the end of training if clamping at 1e-10 is not applied
return torch.cat([torch.ones_like(p[...,[0]]), p.clamp_min(1e-10).cumprod(-1)], -1)
def get_ray_marching_ray(alpha):
alphainv_cum = cumprod_exclusive(1-alpha)
weights = alpha * alphainv_cum[..., :-1]
return weights, alphainv_cum
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
def total_variation_step2(v, mask=None):
tv2 = (v[:,:,2:,:,:] - v[:,:,:-2,:,:]).abs() / 2
tv3 = (v[:,:,:,2:,:] - v[:,:,:,:-2,:]).abs() / 2
tv4 = (v[:,:,:,:,2:] - v[:,:,:,:,:-2]).abs() / 2
if mask is not None:
tv2 = tv2[mask[:,:,:-2] & mask[:,:,2:]]
tv3 = tv3[mask[:,:,:,:-2] & mask[:,:,:,2:]]
tv4 = tv4[mask[:,:,:,:,:-2] & mask[:,:,:,:,2:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,3], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs, rgbnet_sup_reduce=1):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
mask[i:i+CHUNK] = model.hit_coarse_geo(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs).to(DEVICE)
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
| 51,680 | 42.871817 | 153 | py |
Voxurf | Voxurf-main/lib/load_nerfpp.py | '''
Modify from
https://github.com/Kai-46/nerfplusplus/blob/master/data_loader_split.py
'''
import os
import glob
import scipy
import imageio
import numpy as np
import torch
########################################################################################################################
# camera coordinate system: x-->right, y-->down, z-->scene (opencv/colmap convention)
# poses is camera-to-world
########################################################################################################################
def find_files(dir, exts):
if os.path.isdir(dir):
files_grabbed = []
for ext in exts:
files_grabbed.extend(glob.glob(os.path.join(dir, ext)))
if len(files_grabbed) > 0:
files_grabbed = sorted(files_grabbed)
return files_grabbed
else:
return []
def load_data_split(split_dir, skip=1, try_load_min_depth=True, only_img_files=False):
def parse_txt(filename):
assert os.path.isfile(filename)
nums = open(filename).read().split()
return np.array([float(x) for x in nums]).reshape([4, 4]).astype(np.float32)
if only_img_files:
img_files = find_files('{}/rgb'.format(split_dir), exts=['*.png', '*.jpg'])
return img_files
# camera parameters files
intrinsics_files = find_files('{}/intrinsics'.format(split_dir), exts=['*.txt'])
pose_files = find_files('{}/pose'.format(split_dir), exts=['*.txt'])
intrinsics_files = intrinsics_files[::skip]
pose_files = pose_files[::skip]
cam_cnt = len(pose_files)
# img files
img_files = find_files('{}/rgb'.format(split_dir), exts=['*.png', '*.jpg'])
if len(img_files) > 0:
img_files = img_files[::skip]
assert(len(img_files) == cam_cnt)
else:
img_files = [None, ] * cam_cnt
# mask files
mask_files = find_files('{}/mask'.format(split_dir), exts=['*.png', '*.jpg'])
if len(mask_files) > 0:
mask_files = mask_files[::skip]
assert(len(mask_files) == cam_cnt)
else:
mask_files = [None, ] * cam_cnt
# min depth files
mindepth_files = find_files('{}/min_depth'.format(split_dir), exts=['*.png', '*.jpg'])
if try_load_min_depth and len(mindepth_files) > 0:
mindepth_files = mindepth_files[::skip]
assert(len(mindepth_files) == cam_cnt)
else:
mindepth_files = [None, ] * cam_cnt
return intrinsics_files, pose_files, img_files, mask_files, mindepth_files
def rerotate_poses(poses, render_poses):
poses = np.copy(poses)
centroid = poses[:,:3,3].mean(0)
poses[:,:3,3] = poses[:,:3,3] - centroid
# Find the minimum pca vector with minimum eigen value
x = poses[:,:3,3]
mu = x.mean(0)
cov = np.cov((x-mu).T)
ev , eig = np.linalg.eig(cov)
cams_up = eig[:,np.argmin(ev)]
if cams_up[1] < 0:
cams_up = -cams_up
# Find rotation matrix that align cams_up with [0,1,0]
R = scipy.spatial.transform.Rotation.align_vectors(
[[0,-1,0]], cams_up[None])[0].as_matrix()
# Apply rotation and add back the centroid position
poses[:,:3,:3] = R @ poses[:,:3,:3]
poses[:,:3,[3]] = R @ poses[:,:3,[3]]
poses[:,:3,3] = poses[:,:3,3] + centroid
render_poses = np.copy(render_poses)
render_poses[:,:3,3] = render_poses[:,:3,3] - centroid
render_poses[:,:3,:3] = R @ render_poses[:,:3,:3]
render_poses[:,:3,[3]] = R @ render_poses[:,:3,[3]]
render_poses[:,:3,3] = render_poses[:,:3,3] + centroid
return poses, render_poses
def load_nerfpp_data(basedir, rerotate=True):
tr_K, tr_c2w, tr_im_path = load_data_split(os.path.join(basedir, 'train'))[:3]
te_K, te_c2w, te_im_path = load_data_split(os.path.join(basedir, 'test'))[:3]
assert len(tr_K) == len(tr_c2w) and len(tr_K) == len(tr_im_path)
assert len(te_K) == len(te_c2w) and len(te_K) == len(te_im_path)
# Determine split id list
i_split = [[], []]
i = 0
for _ in tr_c2w:
i_split[0].append(i)
i += 1
for _ in te_c2w:
i_split[1].append(i)
i += 1
# Load camera intrinsics. Assume all images share a intrinsic.
K_flatten = np.loadtxt(tr_K[0])
for path in tr_K:
assert np.allclose(np.loadtxt(path), K_flatten)
for path in te_K:
assert np.allclose(np.loadtxt(path), K_flatten)
K = K_flatten.reshape(4,4)[:3,:3]
# Load camera poses
poses = []
for path in tr_c2w:
poses.append(np.loadtxt(path).reshape(4,4))
for path in te_c2w:
poses.append(np.loadtxt(path).reshape(4,4))
# Load images
imgs = []
for path in tr_im_path:
imgs.append(imageio.imread(path) / 255.)
for path in te_im_path:
imgs.append(imageio.imread(path) / 255.)
# Bundle all data
imgs = np.stack(imgs, 0)
poses = np.stack(poses, 0)
i_split.append(i_split[1])
H, W = imgs.shape[1:3]
focal = K[[0,1], [0,1]].mean()
# Generate movie trajectory
render_poses_path = sorted(glob.glob(os.path.join(basedir, 'camera_path', 'pose', '*txt')))
render_poses = []
for path in render_poses_path:
render_poses.append(np.loadtxt(path).reshape(4,4))
render_poses = np.array(render_poses)
render_K = np.loadtxt(glob.glob(os.path.join(basedir, 'camera_path', 'intrinsics', '*txt'))[0]).reshape(4,4)[:3,:3]
render_poses[:,:,0] *= K[0,0] / render_K[0,0]
render_poses[:,:,1] *= K[1,1] / render_K[1,1]
if rerotate:
poses, render_poses = rerotate_poses(poses, render_poses)
render_poses = torch.Tensor(render_poses)
return imgs, poses, render_poses, [H, W, focal], K, i_split
| 5,638 | 33.175758 | 120 | py |
Voxurf | Voxurf-main/lib/grid.py | import os
import time
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in [os.path.join('cuda', 'render_utils.cpp'), os.path.join('cuda', 'render_utils_kernel.cu')]],
verbose=True)
total_variation_cuda = load(
name='total_variation_cuda',
sources=[
os.path.join(parent_dir, path)
for path in [os.path.join('cuda', 'total_variation.cpp'), os.path.join('cuda', 'total_variation_kernel.cu')]],
verbose=True)
def create_grid(type, **kwargs):
if type == 'DenseGrid':
return DenseGrid(**kwargs)
elif type == 'TensoRFGrid':
return TensoRFGrid(**kwargs)
else:
raise NotImplementedError
''' Dense 3D grid
'''
class DenseGrid(nn.Module):
def __init__(self, channels, world_size, xyz_min, xyz_max, **kwargs):
super(DenseGrid, self).__init__()
self.channels = channels
self.world_size = world_size
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.grid = nn.Parameter(torch.zeros([1, channels, *world_size]))
def forward(self, xyz):
'''
xyz: global coordinates to query
'''
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
out = F.grid_sample(self.grid, ind_norm, mode='bilinear', align_corners=True)
out = out.reshape(self.channels,-1).T.reshape(*shape,self.channels)
if self.channels == 1:
out = out.squeeze(-1)
return out
def scale_volume_grid(self, new_world_size):
if self.channels == 0:
self.grid = nn.Parameter(torch.zeros([1, self.channels, *new_world_size]))
else:
self.grid = nn.Parameter(
F.interpolate(self.grid.data, size=tuple(new_world_size), mode='trilinear', align_corners=True))
def total_variation_add_grad(self, wx, wy, wz, dense_mode, mask=None):
'''Add gradients by total variation loss in-place'''
if mask is None:
total_variation_cuda.total_variation_add_grad(
self.grid, self.grid.grad, wx, wy, wz, dense_mode)
else:
mask = mask.detach()
if self.grid.size(1) > 1 and mask.size() != self.grid.size():
mask = mask.repeat(1, self.grid.size(1), 1, 1, 1).contiguous()
assert mask.size() == self.grid.size()
total_variation_cuda.total_variation_add_grad_new(
self.grid, self.grid.grad, mask.float(), wx, wy, wz, dense_mode)
def get_dense_grid(self):
return self.grid
@torch.no_grad()
def __isub__(self, val):
self.grid.data -= val
return self
def extra_repr(self):
return f'channels={self.channels}, world_size={self.world_size.tolist()}'
''' Vector-Matrix decomposited grid
See TensoRF: Tensorial Radiance Fields (https://arxiv.org/abs/2203.09517)
'''
class TensoRFGrid(nn.Module):
def __init__(self, channels, world_size, xyz_min, xyz_max, config):
super(TensoRFGrid, self).__init__()
self.channels = channels
self.world_size = world_size
self.config = config
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
X, Y, Z = world_size
R = config['n_comp']
Rxy = config.get('n_comp_xy', R)
self.xy_plane = nn.Parameter(torch.randn([1, Rxy, X, Y]) * 0.1)
self.xz_plane = nn.Parameter(torch.randn([1, R, X, Z]) * 0.1)
self.yz_plane = nn.Parameter(torch.randn([1, R, Y, Z]) * 0.1)
self.x_vec = nn.Parameter(torch.randn([1, R, X, 1]) * 0.1)
self.y_vec = nn.Parameter(torch.randn([1, R, Y, 1]) * 0.1)
self.z_vec = nn.Parameter(torch.randn([1, Rxy, Z, 1]) * 0.1)
if self.channels > 1:
self.f_vec = nn.Parameter(torch.ones([R+R+Rxy, channels]))
nn.init.kaiming_uniform_(self.f_vec, a=np.sqrt(5))
def forward(self, xyz):
'''
xyz: global coordinates to query
'''
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,-1,3)
ind_norm = (xyz - self.xyz_min) / (self.xyz_max - self.xyz_min) * 2 - 1
ind_norm = torch.cat([ind_norm, torch.zeros_like(ind_norm[...,[0]])], dim=-1)
if self.channels > 1:
out = compute_tensorf_feat(
self.xy_plane, self.xz_plane, self.yz_plane,
self.x_vec, self.y_vec, self.z_vec, self.f_vec, ind_norm)
out = out.reshape(*shape,self.channels)
else:
out = compute_tensorf_val(
self.xy_plane, self.xz_plane, self.yz_plane,
self.x_vec, self.y_vec, self.z_vec, ind_norm)
out = out.reshape(*shape)
return out
def scale_volume_grid(self, new_world_size):
if self.channels == 0:
return
X, Y, Z = new_world_size
self.xy_plane = nn.Parameter(F.interpolate(self.xy_plane.data, size=[X,Y], mode='bilinear', align_corners=True))
self.xz_plane = nn.Parameter(F.interpolate(self.xz_plane.data, size=[X,Z], mode='bilinear', align_corners=True))
self.yz_plane = nn.Parameter(F.interpolate(self.yz_plane.data, size=[Y,Z], mode='bilinear', align_corners=True))
self.x_vec = nn.Parameter(F.interpolate(self.x_vec.data, size=[X,1], mode='bilinear', align_corners=True))
self.y_vec = nn.Parameter(F.interpolate(self.y_vec.data, size=[Y,1], mode='bilinear', align_corners=True))
self.z_vec = nn.Parameter(F.interpolate(self.z_vec.data, size=[Z,1], mode='bilinear', align_corners=True))
def total_variation_add_grad(self, wx, wy, wz, dense_mode):
'''Add gradients by total variation loss in-place'''
loss = wx * F.smooth_l1_loss(self.xy_plane[:,:,1:], self.xy_plane[:,:,:-1], reduction='sum') +\
wy * F.smooth_l1_loss(self.xy_plane[:,:,:,1:], self.xy_plane[:,:,:,:-1], reduction='sum') +\
wx * F.smooth_l1_loss(self.xz_plane[:,:,1:], self.xz_plane[:,:,:-1], reduction='sum') +\
wz * F.smooth_l1_loss(self.xz_plane[:,:,:,1:], self.xz_plane[:,:,:,:-1], reduction='sum') +\
wy * F.smooth_l1_loss(self.yz_plane[:,:,1:], self.yz_plane[:,:,:-1], reduction='sum') +\
wz * F.smooth_l1_loss(self.yz_plane[:,:,:,1:], self.yz_plane[:,:,:,:-1], reduction='sum') +\
wx * F.smooth_l1_loss(self.x_vec[:,:,1:], self.x_vec[:,:,:-1], reduction='sum') +\
wy * F.smooth_l1_loss(self.y_vec[:,:,1:], self.y_vec[:,:,:-1], reduction='sum') +\
wz * F.smooth_l1_loss(self.z_vec[:,:,1:], self.z_vec[:,:,:-1], reduction='sum')
loss /= 6
loss.backward()
def get_dense_grid(self):
if self.channels > 1:
feat = torch.cat([
torch.einsum('rxy,rz->rxyz', self.xy_plane[0], self.z_vec[0,:,:,0]),
torch.einsum('rxz,ry->rxyz', self.xz_plane[0], self.y_vec[0,:,:,0]),
torch.einsum('ryz,rx->rxyz', self.yz_plane[0], self.x_vec[0,:,:,0]),
])
grid = torch.einsum('rxyz,rc->cxyz', feat, self.f_vec)[None]
else:
grid = torch.einsum('rxy,rz->xyz', self.xy_plane[0], self.z_vec[0,:,:,0]) + \
torch.einsum('rxz,ry->xyz', self.xz_plane[0], self.y_vec[0,:,:,0]) + \
torch.einsum('ryz,rx->xyz', self.yz_plane[0], self.x_vec[0,:,:,0])
grid = grid[None,None]
return grid
def extra_repr(self):
return f'channels={self.channels}, world_size={self.world_size.tolist()}, n_comp={self.config["n_comp"]}'
def compute_tensorf_feat(xy_plane, xz_plane, yz_plane, x_vec, y_vec, z_vec, f_vec, ind_norm):
# Interp feature (feat shape: [n_pts, n_comp])
xy_feat = F.grid_sample(xy_plane, ind_norm[:,:,:,[1,0]], mode='bilinear', align_corners=True).flatten(0,2).T
xz_feat = F.grid_sample(xz_plane, ind_norm[:,:,:,[2,0]], mode='bilinear', align_corners=True).flatten(0,2).T
yz_feat = F.grid_sample(yz_plane, ind_norm[:,:,:,[2,1]], mode='bilinear', align_corners=True).flatten(0,2).T
x_feat = F.grid_sample(x_vec, ind_norm[:,:,:,[3,0]], mode='bilinear', align_corners=True).flatten(0,2).T
y_feat = F.grid_sample(y_vec, ind_norm[:,:,:,[3,1]], mode='bilinear', align_corners=True).flatten(0,2).T
z_feat = F.grid_sample(z_vec, ind_norm[:,:,:,[3,2]], mode='bilinear', align_corners=True).flatten(0,2).T
# Aggregate components
feat = torch.cat([
xy_feat * z_feat,
xz_feat * y_feat,
yz_feat * x_feat,
], dim=-1)
feat = torch.mm(feat, f_vec)
return feat
def compute_tensorf_val(xy_plane, xz_plane, yz_plane, x_vec, y_vec, z_vec, ind_norm):
# Interp feature (feat shape: [n_pts, n_comp])
xy_feat = F.grid_sample(xy_plane, ind_norm[:,:,:,[1,0]], mode='bilinear', align_corners=True).flatten(0,2).T
xz_feat = F.grid_sample(xz_plane, ind_norm[:,:,:,[2,0]], mode='bilinear', align_corners=True).flatten(0,2).T
yz_feat = F.grid_sample(yz_plane, ind_norm[:,:,:,[2,1]], mode='bilinear', align_corners=True).flatten(0,2).T
x_feat = F.grid_sample(x_vec, ind_norm[:,:,:,[3,0]], mode='bilinear', align_corners=True).flatten(0,2).T
y_feat = F.grid_sample(y_vec, ind_norm[:,:,:,[3,1]], mode='bilinear', align_corners=True).flatten(0,2).T
z_feat = F.grid_sample(z_vec, ind_norm[:,:,:,[3,2]], mode='bilinear', align_corners=True).flatten(0,2).T
# Aggregate components
feat = (xy_feat * z_feat).sum(-1) + (xz_feat * y_feat).sum(-1) + (yz_feat * x_feat).sum(-1)
return feat
''' Mask grid
It supports query for the known free space and unknown space.
'''
class MaskGrid(nn.Module):
def __init__(self, path=None, mask_cache_thres=None, mask=None, xyz_min=None, xyz_max=None):
super(MaskGrid, self).__init__()
if path is not None:
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
density = F.max_pool3d(st['model_state_dict']['density.grid'], kernel_size=3, padding=1, stride=1)
alpha = 1 - torch.exp(-F.softplus(density + st['model_state_dict']['act_shift']) * st['model_kwargs']['voxel_size_ratio'])
mask = (alpha >= self.mask_cache_thres).squeeze(0).squeeze(0)
xyz_min = torch.Tensor(st['model_kwargs']['xyz_min'])
xyz_max = torch.Tensor(st['model_kwargs']['xyz_max'])
else:
mask = mask.bool()
xyz_min = torch.Tensor(xyz_min)
xyz_max = torch.Tensor(xyz_max)
self.register_buffer('mask', mask)
xyz_len = xyz_max - xyz_min
self.register_buffer('xyz2ijk_scale', (torch.Tensor(list(mask.shape)) - 1) / xyz_len)
self.register_buffer('xyz2ijk_shift', -xyz_min * self.xyz2ijk_scale)
@torch.no_grad()
def forward(self, xyz):
'''Skip know freespace
@xyz: [..., 3] the xyz in global coordinate.
'''
shape = xyz.shape[:-1]
xyz = xyz.reshape(-1, 3)
mask = render_utils_cuda.maskcache_lookup(self.mask, xyz, self.xyz2ijk_scale, self.xyz2ijk_shift)
mask = mask.reshape(shape)
return mask
def extra_repr(self):
return f'mask.shape=list(self.mask.shape)'
| 11,636 | 46.11336 | 134 | py |
Voxurf | Voxurf-main/lib/load_data.py | import numpy as np
import os
from .load_blender import load_blender_data
from .load_nsvf import load_nsvf_data
from .load_blendedmvs import load_blendedmvs_data
from .load_tankstemple import load_tankstemple_data, load_tankstemple_data_bound
from .load_nerfpp import load_nerfpp_data
from .load_deepvoxels import load_dv_data
from .load_dtu import load_dtu_data
from .load_volsdf_bmvs import load_vbmvs_data
from .load_co3d import load_co3d_data
from .load_scannet import load_scannet_data
from .load_llff import load_llff_data
from .load_mobilebrick import load_mobilebrick_data
def load_data(args, reso_level=2, train_all=True, wmask=True, white_bg=True):
print("[ resolution level {} | train all {} | wmask {} | white_bg {}]".format(reso_level, train_all, wmask, white_bg))
K, depths = None, None
scale_mats_np = None
masks = None
if args.dataset_type == 'llff':
images, depths, poses, bds, render_poses, i_test = load_llff_data(
args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify,
load_depths=args.load_depths)
hwf = poses[0,:3,-1]
poses = poses[:,:3,:4]
print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)
if not isinstance(i_test, list):
i_test = [i_test]
if args.llffhold > 0:
print('Auto LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[::args.llffhold]
i_val = i_test
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
print('DEFINING BOUNDS')
if args.ndc:
near = 0.
far = 1.
else:
near = np.ndarray.min(bds) * .9
far = np.ndarray.max(bds) * 1.
print('NEAR FAR', near, far)
elif args.dataset_type == 'blender':
images, poses, render_poses, hwf, i_split = load_blender_data(args.datadir, args.half_res, args.testskip)
print('Loaded blender', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
near, far = 2., 6.
if images.shape[-1] == 4:
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]*images[...,-1:]
elif args.dataset_type == 'blendedmvs':
images, poses, render_poses, hwf, K, i_split = load_blendedmvs_data(args.datadir)
print('Loaded blendedmvs', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3])
assert images.shape[-1] == 3
elif args.dataset_type == 'dtu':
images, poses, render_poses, hwf, K, i_split, scale_mats_np, masks = load_dtu_data(args.datadir, reso_level=reso_level, mask=wmask, white_bg=white_bg)
print('Loaded dtu', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
if train_all:
i_train = np.arange(int(images.shape[0]))
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3])
assert images.shape[-1] == 3
elif args.dataset_type == 'scannet':
images, poses, render_poses, hwf, K, i_split, scale_mats_np, masks, depths, normals = load_scannet_data(args.datadir)
print('Loaded scannet', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
if train_all:
i_train = np.arange(int(images.shape[0]))
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3])
assert images.shape[-1] == 3
elif args.dataset_type == 'volsdf_bmvs':
images, poses, render_poses, hwf, K, i_split = load_vbmvs_data(args.datadir)
print('Loaded dtu', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3])
assert images.shape[-1] == 3
elif args.dataset_type == 'tankstemple':
images, poses, render_poses, hwf, K, i_split = load_tankstemple_data_bound(
args.datadir, movie_render_kwargs=args.movie_render_kwargs)
print('Loaded tankstemple', images.shape, render_poses.shape, hwf,
args.datadir)
i_train, i_val, i_test = i_split
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3], ratio=0)
if images.shape[-1] == 4:
if args.white_bkgd:
images = images[..., :3] * images[..., -1:] + (
1. - images[..., -1:])
else:
images = images[..., :3] * images[..., -1:]
elif args.dataset_type == 'nsvf':
images, poses, render_poses, hwf, i_split = load_nsvf_data(args.datadir)
print('Loaded nsvf', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3])
if images.shape[-1] == 4:
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]*images[...,-1:]
elif args.dataset_type == 'deepvoxels':
args.scene = args.datadir.split(os.sep)[-1]
args.datadir = os.path.join(*args.datadir.split(os.sep)[:-1])
images, poses, render_poses, hwf, i_split = load_dv_data(scene=args.scene, basedir=args.datadir, testskip=args.testskip)
print('Loaded deepvoxels', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
hemi_R = np.mean(np.linalg.norm(poses[:,:3,-1], axis=-1))
near = hemi_R - 1
far = hemi_R + 1
assert args.white_bkgd
assert images.shape[-1] == 3
elif args.dataset_type == 'co3d':
# each image can be in different shapes and intrinsics
images, masks, poses, render_poses, hwf, K, i_split = load_co3d_data(args)
print('Loaded co3d', args.datadir, args.annot_path, args.sequence_name)
i_train, i_val, i_test = i_split
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3], ratio=0)
for i in range(len(images)):
if args.white_bkgd:
images[i] = images[i] * masks[i][...,None] + (1.-masks[i][...,None])
else:
images[i] = images[i] * masks[i][...,None]
elif args.dataset_type == 'nerfpp':
images, poses, render_poses, hwf, K, i_split = load_nerfpp_data(args.datadir)
print('Loaded nerf_pp', images.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
near_clip, far = inward_nearfar_heuristic(poses[i_train, :3, 3], ratio=0.02)
near = 0
elif args.dataset_type == "mobile_brick":
images, poses, render_poses, hwf, K, i_split, scale_mats_np, masks = load_mobilebrick_data(args.datadir, reso_level=reso_level, mask=wmask, white_bg=white_bg)
print('Loaded mobile_brick', images.shape, render_poses.shape, hwf, args.datadir)
i_train, i_val, i_test = i_split
if train_all:
i_train = np.arange(int(images.shape[0]))
near, far = inward_nearfar_heuristic(poses[i_train, :3, 3])
assert images.shape[-1] == 3
else:
raise NotImplementedError(f'Unknown dataset type {args.dataset_type} exiting')
# Cast intrinsics to right types
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
HW = np.array([im.shape[:2] for im in images])
irregular_shape = (images.dtype is np.dtype('object'))
if K is None:
K = np.array([
[focal, 0, 0.5*W],
[0, focal, 0.5*H],
[0, 0, 1]
])
if len(K.shape) == 2:
Ks = K[None].repeat(len(poses), axis=0)
else:
Ks = K
render_poses = render_poses[...,:4]
print("Split: train {} | validate {} | test {}".format(
len(i_train), len(i_val), len(i_test)))
print('near, far: ', near, far)
if wmask and masks is None:
masks = images.mean(-1) > 0
data_dict = dict(
hwf=hwf, HW=HW, Ks=Ks, near=near, far=far,
i_train=i_train, i_val=i_val, i_test=i_test,
poses=poses, render_poses=render_poses,
images=images, depths=depths,
irregular_shape=irregular_shape,
scale_mats_np=scale_mats_np,
masks=masks
)
if args.dataset_type == 'scannet':
data_dict['depths'] = depths
data_dict['normals'] = normals
return data_dict
def inward_nearfar_heuristic(cam_o, ratio=0.05):
dist = np.linalg.norm(cam_o[:,None] - cam_o, axis=-1)
far = dist.max()
near = far * ratio
return near, far
| 8,870 | 37.737991 | 166 | py |
Voxurf | Voxurf-main/lib/load_blender.py | import os
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
def load_blender_data(basedir, half_res=False, testskip=1):
splits = ['train', 'val', 'test']
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if s=='train' or testskip==0:
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, frame['file_path'] + '.png')
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']))
imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)
poses = np.array(poses).astype(np.float32)
counts.append(counts[-1] + imgs.shape[0])
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
H, W = imgs[0].shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)
if half_res:
H = H//2
W = W//2
focal = focal/2.
imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))
for i, img in enumerate(imgs):
imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)
imgs = imgs_half_res
# imgs = tf.image.resize_area(imgs, [400, 400]).numpy()
return imgs, poses, render_poses, [H, W, focal], i_split
| 2,553 | 27.065934 | 115 | py |
Voxurf | Voxurf-main/lib/load_llff.py | import numpy as np
import os, imageio
import glob
import shutil
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
def imread(f):
if f.endswith('png'):
return imageio.imread(f, ignoregamma=True)
else:
return imageio.imread(f)
def depthread(path):
with open(path, "rb") as fid:
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if byte == b"&":
num_delimiter += 1
if num_delimiter >= 3:
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order="F")
return np.transpose(array, (1, 0, 2)).squeeze()
def _minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
imgdir = os.path.join(basedir, 'images')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(100./r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
[shutil.copy(f, os.path.join(imgdir, f.split(os.sep)[-1])) for f in glob.glob(os.path.join(imgdir_orig, '*'))]
ext = imgs[0].split('.')[-1]
args = ' '.join(['mogrify', '-resize', resizearg, '-format', 'png', '*.{}'.format(ext)])
print(args)
os.chdir(imgdir)
check_output(args, shell=True)
os.chdir(wd)
if ext != 'png':
[os.remove(f) for f in glob.glob(os.path.join(imgdir, f'*.{ext}'))]
print('Removed duplicates')
print('Done')
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True, load_depths=False):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1,2,0])
bds = poses_arr[:, -2:].transpose([1,0])
img0 = [os.path.join(basedir, 'images', f) for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = imageio.imread(img0).shape
sfx = ''
if factor is not None and factor != 1:
sfx = '_{}'.format(factor)
_minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, 'images' + sfx)
if not os.path.exists(imgdir):
print( imgdir, 'does not exist, returning' )
return
imgfiles = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir)) if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')]
if poses.shape[-1] != len(imgfiles):
print( 'Mismatch between imgs {} and poses {} !!!!'.format(len(imgfiles), poses.shape[-1]) )
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1./factor
if not load_imgs:
return poses, bds
imgs = imgs = [imread(f)[...,:3]/255. for f in imgfiles]
imgs = np.stack(imgs, -1)
print('Loaded image data', imgs.shape, poses[:,-1,0])
if not load_depths:
return poses, bds, imgs
depthdir = os.path.join(basedir, 'stereo', 'depth_maps')
assert os.path.exists(depthdir), f'Dir not found: {depthdir}'
depthfiles = [os.path.join(depthdir, f) for f in sorted(os.listdir(depthdir)) if f.endswith('.geometric.bin')]
assert poses.shape[-1] == len(depthfiles), 'Mismatch between imgs {} and poses {} !!!!'.format(len(depthfiles), poses.shape[-1])
depths = [depthread(f) for f in depthfiles]
depths = np.stack(depths, -1)
print('Loaded depth data', depths.shape)
return poses, bds, imgs, depths
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3,:3].T, (pts-c2w[:3,3])[...,np.newaxis])[...,0]
return tt
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
vec2 = normalize(poses[:, :3, 2].sum(0))
up = poses[:, :3, 1].sum(0)
c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
return c2w
def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
rads = np.array(list(rads) + [1.])
hwf = c2w[:,4:5]
for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]:
c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads)
z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
def recenter_poses(poses):
poses_ = poses+0
bottom = np.reshape([0,0,0,1.], [1,4])
c2w = poses_avg(poses)
c2w = np.concatenate([c2w[:3,:4], bottom], -2)
bottom = np.tile(np.reshape(bottom, [1,1,4]), [poses.shape[0],1,1])
poses = np.concatenate([poses[:,:3,:4], bottom], -2)
poses = np.linalg.inv(c2w) @ poses
poses_[:,:3,:4] = poses[:,:3,:4]
poses = poses_
return poses
#####################
def spherify_poses(poses, bds, depths):
p34_to_44 = lambda p : np.concatenate([p, np.tile(np.reshape(np.eye(4)[-1,:], [1,1,4]), [p.shape[0], 1,1])], 1)
rays_d = poses[:,:3,2:3]
rays_o = poses[:,:3,3:4]
def min_line_dist(rays_o, rays_d):
A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0,2,1])
b_i = -A_i @ rays_o
pt_mindist = np.squeeze(-np.linalg.inv((np.transpose(A_i, [0,2,1]) @ A_i).mean(0)) @ (b_i).mean(0))
return pt_mindist
pt_mindist = min_line_dist(rays_o, rays_d)
center = pt_mindist
up = (poses[:,:3,3] - center).mean(0)
vec0 = normalize(up)
vec1 = normalize(np.cross([.1,.2,.3], vec0))
vec2 = normalize(np.cross(vec0, vec1))
pos = center
c2w = np.stack([vec1, vec2, vec0, pos], 1)
poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:,:3,:4])
rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:,:3,3]), -1)))
sc = 1./rad
poses_reset[:,:3,3] *= sc
bds *= sc
rad *= sc
depths *= sc
centroid = np.mean(poses_reset[:,:3,3], 0)
zh = centroid[2]
radcircle = np.sqrt(rad**2-zh**2)
new_poses = []
for th in np.linspace(0.,2.*np.pi, 120):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
up = np.array([0,0,-1.])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin
p = np.stack([vec0, vec1, vec2, pos], 1)
new_poses.append(p)
new_poses = np.stack(new_poses, 0)
new_poses = np.concatenate([new_poses, np.broadcast_to(poses[0,:3,-1:], new_poses[:,:3,-1:].shape)], -1)
poses_reset = np.concatenate([poses_reset[:,:3,:4], np.broadcast_to(poses[0,:3,-1:], poses_reset[:,:3,-1:].shape)], -1)
return poses_reset, new_poses, bds, depths
def load_llff_data(basedir, factor=8, recenter=True, bd_factor=.75, spherify=False, path_zflat=False, load_depths=False):
poses, bds, imgs, *depths = _load_data(basedir, factor=factor, load_depths=load_depths) # factor=8 downsamples original imgs by 8x
print('Loaded', basedir, bds.min(), bds.max())
if load_depths:
depths = depths[0]
else:
depths = 0
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
images = imgs
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1. if bd_factor is None else 1./(bds.min() * bd_factor)
poses[:,:3,3] *= sc
bds *= sc
depths *= sc
if recenter:
poses = recenter_poses(poses)
if spherify:
poses, render_poses, bds, depths = spherify_poses(poses, bds, depths)
else:
c2w = poses_avg(poses)
print('recentered', c2w.shape)
print(c2w[:3,:4])
## Get spiral
# Get average pose
up = normalize(poses[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
close_depth, inf_depth = bds.min()*.9, bds.max()*5.
dt = .75
mean_dz = 1./(((1.-dt)/close_depth + dt/inf_depth))
focal = mean_dz
# Get radii for spiral path
shrink_factor = .8
zdelta = close_depth * .2
tt = poses[:,:3,3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 120
N_rots = 2
if path_zflat:
# zloc = np.percentile(tt, 10, 0)[2]
zloc = -close_depth * .1
c2w_path[:3,3] = c2w_path[:3,3] + zloc * c2w_path[:3,2]
rads[2] = 0.
N_rots = 1
N_views/=2
# Generate poses for spiral path
render_poses = render_path_spiral(c2w_path, up, rads, focal, zdelta, zrate=.5, rots=N_rots, N=N_views)
render_poses = np.array(render_poses).astype(np.float32)
c2w = poses_avg(poses)
print('Data:')
print(poses.shape, images.shape, bds.shape)
dists = np.sum(np.square(c2w[:3,3] - poses[:,:3,3]), -1)
i_test = np.argmin(dists)
print('HOLDOUT view is', i_test)
images = images.astype(np.float32)
poses = poses.astype(np.float32)
return images, depths, poses, bds, render_poses, i_test
| 11,205 | 31.108883 | 139 | py |
Voxurf | Voxurf-main/lib/load_deepvoxels.py | import os
import numpy as np
import imageio
def load_dv_data(scene='cube', basedir='/data/deepvoxels', testskip=1):
def parse_intrinsics(filepath, trgt_sidelength, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy = list(map(float, file.readline().split()))[:3]
grid_barycenter = np.array(list(map(float, file.readline().split())))
near_plane = float(file.readline())
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
print(cx,cy,f,height,width)
cx = cx / width * trgt_sidelength
cy = cy / height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses
def load_pose(filename):
assert os.path.isfile(filename)
nums = open(filename).read().split()
return np.array([float(x) for x in nums]).reshape([4,4]).astype(np.float32)
H = 512
W = 512
deepvoxels_base = '{}/train/{}/'.format(basedir, scene)
full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses = parse_intrinsics(os.path.join(deepvoxels_base, 'intrinsics.txt'), H)
print(full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses)
focal = full_intrinsic[0,0]
print(H, W, focal)
def dir2poses(posedir):
poses = np.stack([load_pose(os.path.join(posedir, f)) for f in sorted(os.listdir(posedir)) if f.endswith('txt')], 0)
transf = np.array([
[1,0,0,0],
[0,-1,0,0],
[0,0,-1,0],
[0,0,0,1.],
])
poses = poses @ transf
poses = poses[:,:3,:4].astype(np.float32)
return poses
posedir = os.path.join(deepvoxels_base, 'pose')
poses = dir2poses(posedir)
testposes = dir2poses('{}/test/{}/pose'.format(basedir, scene))
testposes = testposes[::testskip]
valposes = dir2poses('{}/validation/{}/pose'.format(basedir, scene))
valposes = valposes[::testskip]
imgfiles = [f for f in sorted(os.listdir(os.path.join(deepvoxels_base, 'rgb'))) if f.endswith('png')]
imgs = np.stack([imageio.imread(os.path.join(deepvoxels_base, 'rgb', f))/255. for f in imgfiles], 0).astype(np.float32)
testimgd = '{}/test/{}/rgb'.format(basedir, scene)
imgfiles = [f for f in sorted(os.listdir(testimgd)) if f.endswith('png')]
testimgs = np.stack([imageio.imread(os.path.join(testimgd, f))/255. for f in imgfiles[::testskip]], 0).astype(np.float32)
valimgd = '{}/validation/{}/rgb'.format(basedir, scene)
imgfiles = [f for f in sorted(os.listdir(valimgd)) if f.endswith('png')]
valimgs = np.stack([imageio.imread(os.path.join(valimgd, f))/255. for f in imgfiles[::testskip]], 0).astype(np.float32)
all_imgs = [imgs, valimgs, testimgs]
counts = [0] + [x.shape[0] for x in all_imgs]
counts = np.cumsum(counts)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate([poses, valposes, testposes], 0)
render_poses = testposes
print(poses.shape, imgs.shape)
return imgs, poses, render_poses, [H, W, focal], i_split
| 3,844 | 34.601852 | 142 | py |
Voxurf | Voxurf-main/lib/load_volsdf_bmvs.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_vbmvs_data(basedir, normallize=False, reso_level=1, mask=False):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*jpg')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
render_cameras_name = 'cameras.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normallize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normallize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
all_intrinsics.append(intrinsics)
all_poses.append(pose)
# all_poses.append(P)
if len(mask_paths) > 0:
all_masks.append((imageio.imread(mask_paths[i]) / 255.).astype(np.float32))
# all_imgs.append(cv.imread(im_name)/255)
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
if mask:
assert len(mask_paths) > 0
masks = np.stack(all_masks, 0)
imgs = imgs * masks
H, W = imgs[0].shape[:2]
if reso_level > 1:
H, W = H//reso_level, W//reso_level
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0] / reso_level
i_split = [np.arange(len(imgs)), np.arange(len(imgs))[::6], np.arange(len(imgs))[::6]]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split | 2,879 | 34.121951 | 111 | py |
tesserocr | tesserocr-master/setup.py | import codecs
import errno
import glob
import itertools
import logging
import os
import re
import subprocess
import sys
from os.path import abspath, dirname
from os.path import join as pjoin
from os.path import split as psplit
from setuptools import setup
from setuptools.command.build_ext import build_ext
from setuptools.extension import Extension
_LOGGER = logging.getLogger()
if os.environ.get('DEBUG'):
_LOGGER.setLevel(logging.DEBUG)
else:
_LOGGER.setLevel(logging.INFO)
_LOGGER.addHandler(logging.StreamHandler(sys.stderr))
_TESSERACT_MIN_VERSION = '3.04.00'
_CYTHON_COMPILE_TIME_ENV = None
# find_version from pip https://github.com/pypa/pip/blob/1.5.6/setup.py#L33
here = abspath(dirname(__file__))
EXTRA_COMPILE_ARGS = {
'msvc': ['/std:c11', '-DUSE_STD_NAMESPACE'],
'gcc': ['-std=c++11', '-DUSE_STD_NAMESPACE'],
}
def read(*parts):
return codecs.open(pjoin(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search('^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
if sys.version_info >= (3, 0):
def _read_string(s):
return s.decode('UTF-8')
else:
def _read_string(s):
return s
def major_version(version):
versions = version.split('.')
major = int(versions[0])
_LOGGER.info('Tesseract major version %s', major)
return major
def version_to_int(version):
subversion = None
subtrahend = 0
# Subtracts a certain amount from the version number to differentiate
# between alpha, beta and release versions.
if 'alpha' in version:
version_split = version.split('alpha')
subversion = version_split[1]
subtrahend = 2
elif 'beta' in version:
version_split = version.split('beta')
subversion = version_split[1]
subtrahend = 1
version = re.search(r'((?:\d+\.)+\d+)', version).group()
# Split the groups on ".", take only the first one, and print each
# group with leading 0 if needed. To be safe, also handle cases where
# an extra group is added to the version string, or if one or two
# groups are dropped.
version_groups = (version.split('.') + [0, 0])[:3]
version_str = '{:02}{:02}{:02}'.format(*map(int, version_groups))
version_str = str((int(version_str, 10) - subtrahend))
# Adds a 2 digit subversion number for the subversionrelease.
subversion_str = '00'
if subversion is not None and subversion != '':
subversion = re.search(r'(?:\d+)', subversion).group()
subversion_groups = (subversion.split('-') + [0, 0])[:1]
subversion_str = '{:02}'.format(*map(int, subversion_groups))
version_str += subversion_str
return int(version_str, 16)
def package_config():
"""Use pkg-config to get library build parameters and tesseract version."""
p = subprocess.Popen(
[
'pkg-config',
'--exists',
'--atleast-version={}'.format(_TESSERACT_MIN_VERSION),
'--print-errors',
'tesseract',
],
stderr=subprocess.PIPE,
)
_, error = p.communicate()
if p.returncode != 0:
if isinstance(error, bytes):
error = error.decode()
raise Exception(error)
p = subprocess.Popen(
['pkg-config', '--libs', '--cflags', 'tesseract'], stdout=subprocess.PIPE
)
output, _ = p.communicate()
flags = _read_string(output).strip().split()
p = subprocess.Popen(
['pkg-config', '--libs', '--cflags', 'lept'], stdout=subprocess.PIPE
)
output, _ = p.communicate()
flags2 = _read_string(output).strip().split()
options = {'-L': 'library_dirs', '-I': 'include_dirs', '-l': 'libraries'}
config = {'library_dirs': [], 'include_dirs': [], 'libraries': []}
for f in itertools.chain(flags, flags2):
try:
opt = options[f[:2]]
except KeyError:
continue
val = f[2:]
if opt == 'include_dirs' and psplit(val)[1].strip(os.sep) in (
'leptonica',
'tesseract',
):
val = dirname(val)
config[opt] += [val]
p = subprocess.Popen(
['pkg-config', '--modversion', 'tesseract'], stdout=subprocess.PIPE
)
version, _ = p.communicate()
version = _read_string(version).strip()
_LOGGER.info('Supporting tesseract v%s', version)
config['compile_time_env'] = {
'TESSERACT_MAJOR_VERSION': major_version(version),
'TESSERACT_VERSION': version_to_int(version)
}
_LOGGER.info('Configs from pkg-config: %s', config)
return config
def find_library(pattern, path_list, version=''):
"""Help routine to find library."""
result = []
for path in path_list:
filepattern = os.path.join(path, pattern)
result += glob.glob(filepattern)
# ignore debug library
result = [i for i in result if not i.endswith('d.lib')]
if version:
result = [i for i in result if version in i]
return result
def get_tesseract_version():
"""Try to extract version from tesseract otherwise default min version."""
config = {'libraries': ['tesseract', 'lept']}
try:
p = subprocess.Popen(
['tesseract', '-v'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
stdout_version, version = p.communicate()
version = _read_string(version).strip()
if version == '':
version = _read_string(stdout_version).strip()
version_match = re.search(r'^tesseract ((?:\d+\.)+\d+).*', version, re.M)
if version_match:
version = version_match.group(1)
else:
_LOGGER.warning(
'Failed to extract tesseract version number from: %s', version
)
version = _TESSERACT_MIN_VERSION
except OSError as e:
_LOGGER.warning('Failed to extract tesseract version from executable: %s', e)
version = _TESSERACT_MIN_VERSION
_LOGGER.info('Supporting tesseract v%s', version)
config['compile_time_env'] = {
'TESSERACT_MAJOR_VERSION': major_version(version),
'TESSERACT_VERSION': version_to_int(version)
}
if sys.platform == 'win32':
libpaths = os.getenv('LIBPATH', None)
if libpaths:
libpaths = list(filter(None, libpaths.split(';')))
else:
libpaths = []
if version:
lib_version = ''.join(version.split('.')[:2])
else:
lib_version = None
tess_lib = find_library('tesseract*.lib', libpaths, lib_version)
if len(tess_lib) >= 1:
base = os.path.basename(sorted(tess_lib, reverse=True)[0])
tess_lib = os.path.splitext(base)[0]
else:
error = 'Tesseract library not found in LIBPATH: {}'.format(libpaths)
raise RuntimeError(error)
lept_lib = find_library('lept*.lib', libpaths)
if len(lept_lib) >= 1:
base = os.path.basename(sorted(lept_lib, reverse=True)[0])
lept_lib = os.path.splitext(base)[0]
else:
error = 'Leptonica library not found in LIBPATH: {}'.format(libpaths)
raise RuntimeError(error)
includepaths = os.getenv('INCLUDE', None)
if includepaths:
includepaths = list(filter(None, includepaths.split(';')))
else:
includepaths = []
config['libraries'] = [tess_lib, lept_lib]
config['library_dirs'] = libpaths
config['include_dirs'] = includepaths
_LOGGER.info('Building with configs: %s', config)
return config
def get_build_args():
"""Return proper build parameters."""
try:
build_args = package_config()
except Exception as e:
if isinstance(e, OSError):
if e.errno != errno.ENOENT:
_LOGGER.warning('Failed to run pkg-config: %s', e)
else:
_LOGGER.warning(
'pkg-config failed to find tesseract/leptonica libraries: %s', e
)
build_args = get_tesseract_version()
_LOGGER.debug('build parameters: %s', build_args)
return build_args
def make_extension():
global _CYTHON_COMPILE_TIME_ENV
build_args = get_build_args()
_CYTHON_COMPILE_TIME_ENV = build_args.pop('compile_time_env')
return Extension(
'tesserocr', sources=['tesserocr.pyx'], language='c++', **build_args
)
class my_build_ext(build_ext, object):
def build_extensions(self):
compiler = self.compiler.compiler_type
_LOGGER.info('Detected compiler: %s', compiler)
extra_args = EXTRA_COMPILE_ARGS.get(compiler, EXTRA_COMPILE_ARGS['gcc'])
if isinstance(_CYTHON_COMPILE_TIME_ENV, dict):
version = _CYTHON_COMPILE_TIME_ENV.get('TESSERACT_VERSION', 0)
else:
version = 0
for extension in self.extensions:
if version >= 0x3050200:
_LOGGER.debug('tesseract >= 03.05.02 requires c++11 compiler support')
extension.extra_compile_args = extra_args
build_ext.build_extensions(self)
def finalize_options(self):
from Cython.Build.Dependencies import cythonize
self.distribution.ext_modules[:] = cythonize(
self.distribution.ext_modules, compile_time_env=_CYTHON_COMPILE_TIME_ENV
)
super(my_build_ext, self).finalize_options()
setup(
name='tesserocr',
version=find_version('tesserocr.pyx'),
description='A simple, Pillow-friendly, Python wrapper around '
'tesseract-ocr API using Cython',
long_description=read('README.rst'),
long_description_content_type='text/x-rst',
url='https://github.com/sirfz/tesserocr',
author='Fayez Zouheiry',
author_email='iamfayez@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics :: Capture :: Scanners',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Cython',
],
keywords='Tesseract,tesseract-ocr,OCR,optical character recognition,'
'PIL,Pillow,Cython',
cmdclass={'build_ext': my_build_ext},
ext_modules=[make_extension()],
test_suite='tests',
setup_requires=['Cython>=0.23'],
)
| 11,241 | 32.75976 | 87 | py |
tesserocr | tesserocr-master/tests/test_api.py | import unittest
import re
import os.path
import tesserocr
try:
from PIL import Image
pil_installed = True
except ImportError:
pil_installed = False
def version_to_int(version):
subversion = None
subtrahend = 0
# Subtracts a certain amount from the version number to differentiate
# between alpha, beta and release versions.
if "alpha" in version:
version_split = version.split("alpha")
subversion = version_split[1]
subtrahend = 2
elif "beta" in version:
version_split = version.split("beta")
subversion = version_split[1]
subtrahend = 1
version = re.search(r"((?:\d+\.)+\d+)", version).group()
# Split the groups on ".", take only the first one, and print each
# group with leading 0 if needed. To be safe, also handle cases where
# an extra group is added to the version string, or if one or two
# groups are dropped.
version_groups = (version.split(".") + [0, 0])[:3]
version_str = "{:02}{:02}{:02}".format(*map(int, version_groups))
version_str = str((int(version_str, 10) - subtrahend))
# Adds a 2 digit subversion number for the subversionrelease.
subversion_str = "00"
if subversion is not None and subversion != "":
subversion = re.search(r"(?:\d+)", subversion).group()
subversion_groups = (subversion.split("-") + [0, 0])[:1]
subversion_str = "{:02}".format(*map(int, subversion_groups))
version_str += subversion_str
return int(version_str, 16)
_TESSERACT_VERSION = version_to_int(tesserocr.PyTessBaseAPI.Version())
class TestTessBaseApi(unittest.TestCase):
_test_dir = os.path.abspath(os.path.dirname(__file__))
_image_file = os.path.join(_test_dir, "eurotext.png")
def setUp(self):
if pil_installed:
with open(self._image_file, "rb") as f:
self._image = Image.open(f)
self._image.load()
self._api = tesserocr.PyTessBaseAPI(init=True)
def tearDown(self):
if pil_installed:
self._image.close()
self._api.End()
def test_context_manager(self):
"""Test context manager behavior"""
with self._api as api:
self.assertIs(api, self._api)
api.SetImageFile(self._image_file)
self.assertEqual(api.GetUTF8Text(), self._api.GetUTF8Text())
# assert api has Ended
self.assertRaises(RuntimeError, self._api.GetUTF8Text)
def test_init_full(self):
"""Test InitFull."""
# check default settings
self.assertEqual(self._api.GetVariableAsString("file_type"), ".tif")
self.assertEqual(self._api.GetVariableAsString("edges_childarea"), "0.5")
# use box.train config variables
configs = ["box.train"]
# change edges_childarea
vars_ = {"edges_childarea": "0.7"}
self._api.End()
self._api.InitFull(configs=configs, variables=vars_)
# assert file_type from box.train and custom edges_childarea
self.assertEqual(self._api.GetVariableAsString("file_type"), ".bl")
self.assertEqual(self._api.GetVariableAsString("edges_childarea"), "0.7")
# reset back to default
self._api.End()
self._api.Init()
def test_init(self):
"""Test Init calls with different lang and oem."""
self._api.Init(lang="eng+osd")
self.assertEqual(self._api.GetInitLanguagesAsString(), "eng+osd")
self._api.Init(lang="eng")
self.assertEqual(self._api.GetInitLanguagesAsString(), "eng")
self._api.Init(oem=tesserocr.OEM.TESSERACT_ONLY)
self.assertEqual(self._api.oem(), tesserocr.OEM.TESSERACT_ONLY)
@unittest.skipIf(not pil_installed, "Pillow not installed")
def test_image(self):
"""Test SetImage and GetUTF8Text."""
self._api.SetImage(self._image)
text = self._api.GetUTF8Text()
self.assertIn("quick", text)
text2 = tesserocr.image_to_text(self._image)
self.assertEqual(text, text2)
def test_image_file(self):
"""Test SetImageFile and GetUTF8Text."""
self._api.SetImageFile(self._image_file)
text = self._api.GetUTF8Text()
self.assertIn("quick", text)
text2 = tesserocr.file_to_text(self._image_file)
self.assertEqual(text, text2)
@unittest.skipIf(not pil_installed, "Pillow not installed")
def test_thresholded_image(self):
"""Test GetThresholdedImage and GetThresholdedImageScaleFactor."""
orig_size = self._image.size
self._api.SetImage(self._image)
image = self._api.GetThresholdedImage()
self.assertIsNot(image, None)
self.assertIsInstance(image, Image.Image)
self.assertEqual(image.size, orig_size)
self.assertEqual(self._api.GetThresholdedImageScaleFactor(), 1)
def test_page_seg_mode(self):
"""Test SetPageSegMode and GetPageSegMode."""
self._api.SetPageSegMode(tesserocr.PSM.SINGLE_WORD)
self.assertEqual(self._api.GetPageSegMode(), tesserocr.PSM.SINGLE_WORD)
self._api.SetPageSegMode(tesserocr.PSM.AUTO)
self.assertEqual(self._api.GetPageSegMode(), tesserocr.PSM.AUTO)
def test_data_path(self):
"""Test GetDatapath and Init with an invalid data path."""
path = self._api.GetDatapath()
self._api.End()
self.assertRaises(
RuntimeError, self._api.Init, path=(self._test_dir + os.path.sep)
) # no tessdata
if _TESSERACT_VERSION >= 0x3999800:
new_path = path
else:
new_path = os.path.abspath(os.path.join(path, os.path.pardir)) + os.path.sep
self._api.End()
self._api.Init(new_path)
self.assertEqual(self._api.GetDatapath(), path)
def test_langs(self):
"""Test get langs methods."""
self._api.Init(lang="eng")
lang = self._api.GetInitLanguagesAsString()
self.assertEqual(lang, "eng")
langs = self._api.GetLoadedLanguages()
self.assertEqual(langs, ["eng"])
self.assertIn("eng", self._api.GetAvailableLanguages())
def test_variables(self):
"""Test SetVariable and GetVariableAsString."""
self._api.SetVariable("debug_file", "/dev/null")
self.assertEqual(self._api.GetVariableAsString("debug_file"), "/dev/null")
@unittest.skipIf(not pil_installed, "Pillow not installed")
def test_rectangle(self):
"""Test SetRectangle."""
self._api.SetImage(self._image)
self._api.SetRectangle(0, 0, 100, 43)
thresh = self._api.GetThresholdedImage()
self.assertEqual(thresh.size, (100, 43))
def test_word_confidences(self):
"""Test AllWordConfidences and MapWordConfidences."""
self._api.SetImageFile(self._image_file)
words = self._api.AllWords()
self.assertEqual(words, [])
self._api.Recognize()
words = self._api.AllWords()
confidences = self._api.AllWordConfidences()
self.assertEqual(len(words), len(confidences))
mapped_confidences = self._api.MapWordConfidences()
self.assertEqual([v[0] for v in mapped_confidences], words)
self.assertEqual([v[1] for v in mapped_confidences], confidences)
@unittest.skipIf(_TESSERACT_VERSION < 0x4000000, "tesseract < 4")
def test_LSTM_choices(self):
"""Test GetBestLSTMSymbolChoices."""
self._api.SetVariable("lstm_choice_mode", "2")
self._api.SetImageFile(self._image_file)
self._api.Recognize()
LSTM_choices = self._api.GetBestLSTMSymbolChoices()
words = self._api.AllWords()
self.assertEqual(len(words), len(LSTM_choices))
for choice, word in zip(LSTM_choices, words):
chosen_word = ""
for timestep in choice:
for alternative in timestep:
self.assertGreaterEqual(alternative[1], 0.0)
self.assertLessEqual(alternative[1], 2.0)
chosen_symbol = timestep[0][0]
if chosen_symbol != " ":
chosen_word += chosen_symbol
self.assertEqual(chosen_word, word)
@unittest.skipIf(_TESSERACT_VERSION < 0x4000000, "tesseract < 4")
def test_result_iterator(self):
"""Test result iterator."""
self._api.SetImageFile(self._image_file)
self._api.Recognize()
it = self._api.GetIterator()
level = tesserocr.RIL.WORD
for i, w in enumerate(tesserocr.iterate_level(it, level)):
text = w.GetUTF8Text(level)
blanks = w.BlanksBeforeWord()
if i == 0:
self.assertEqual(text, "The")
self.assertEqual(blanks, 0)
elif i == 1:
self.assertEqual(text, "(quick)")
self.assertEqual(blanks, 1)
else:
break
def test_detect_os(self):
"""Test DetectOS and DetectOrientationScript (tesseract v4+)."""
self._api.SetPageSegMode(tesserocr.PSM.OSD_ONLY)
self._api.SetImageFile(self._image_file)
orientation = self._api.DetectOS()
all(
self.assertIn(k, orientation)
for k in ["sconfidence", "oconfidence", "script", "orientation"]
)
self.assertEqual(orientation["orientation"], 0)
# this is sorted alphabetically!
languages = tesserocr.get_languages()[1]
self.assertLess(orientation["script"], len(languages))
# therefore does not work
# script_name = languages[orientation["script"]]
# self.assertEqual(script_name, 'Latin') # cannot test: not reliable
if _TESSERACT_VERSION >= 0x3999800:
orientation = self._api.DetectOrientationScript()
all(
self.assertIn(k, orientation)
for k in ["orient_deg", "orient_conf", "script_name", "script_conf"]
)
self.assertEqual(orientation["orient_deg"], 0)
self.assertEqual(orientation["script_name"], "Latin")
def test_clear(self):
"""Test Clear."""
self._api.SetImageFile(self._image_file)
self._api.GetUTF8Text()
self._api.Clear()
self.assertRaises(RuntimeError, self._api.GetUTF8Text)
def test_end(self):
"""Test End."""
self._api.End()
self._api.SetImageFile(self._image_file)
self.assertRaises(RuntimeError, self._api.GetUTF8Text)
@unittest.skipIf(not pil_installed, "Pillow not installed")
def test_empty_getcomponents(self):
self._api.Init()
image = Image.new("RGB", (100, 100), (1, 1, 1))
self._api.SetImage(image)
result = self._api.GetComponentImages(tesserocr.RIL.TEXTLINE, True)
# Test if empty
self.assertFalse(result)
@unittest.skipIf(not pil_installed, "Pillow not installed")
def test_empty_small_getcomponents(self):
self._api.Init()
image = Image.new("RGB", (1, 1), (1, 1, 1))
self._api.SetImage(image)
result = self._api.GetComponentImages(tesserocr.RIL.TEXTLINE, True)
# Test if empty
self.assertFalse(result)
def test_layout_getcomponents(self):
self._api.Init()
self._api.SetImageFile(self._image_file)
result = self._api.GetComponentImages(tesserocr.RIL.BLOCK, True)
# Test if not empty
self.assertTrue(result)
_, xywh, _, _ = result[0] # bbox of largest
self.assertIn("w", xywh)
self.assertIn("h", xywh)
area = xywh["w"] * xywh["h"]
# Test if the largest block is quite large
self.assertGreater(area, 400000)
def test_layout_boundingbox(self):
self._api.Init()
self._api.SetImageFile(self._image_file)
layout = self._api.AnalyseLayout()
# Test if not empty
self.assertTrue(layout)
self.assertFalse(layout.Empty(tesserocr.RIL.BLOCK))
result = layout.BoundingBox(tesserocr.RIL.BLOCK) # bbox of largest
self.assertIsNot(result, None)
x0, y0, x1, y1 = result
area = (x1 - x0) * (y1 - y0)
# Test if the largest block is quite large
self.assertGreater(area, 400000)
def test_layout_blockpolygon(self):
self._api.Init()
self._api.SetImageFile(self._image_file)
layout = self._api.AnalyseLayout()
# Test if not empty
self.assertTrue(layout)
self.assertFalse(layout.Empty(tesserocr.RIL.BLOCK))
result = layout.BlockPolygon() # polygon of largest
# Test if not empty
self.assertIsNot(result, None)
# Test there are at least 4 contour points
self.assertGreaterEqual(len(result), 4)
xs, ys = zip(*result)
x0, y0, x1, y1 = min(xs), min(ys), max(xs), max(ys)
area = (x1 - x0) * (y1 - y0)
# Test if the largest block is quite large
self.assertGreater(area, 400000)
def test_recognize(self):
"""Test Recognize with and without timeout."""
self._api.SetImageFile(self._image_file)
# timeout after 1 milliseconds (likely)
res = self._api.Recognize(1)
self.assertFalse(res)
self._api.SetImageFile(self._image_file)
# timeout after 10 seconds (unlikely)
res = self._api.Recognize(10000)
self.assertTrue(res)
self._api.SetImageFile(self._image_file)
# no timeout
res = self._api.Recognize()
self.assertTrue(res)
@unittest.skipIf(_TESSERACT_VERSION < 0x3040100, "tesseract < 4")
def test_row_attributes(self):
self._api.SetImageFile(self._image_file)
self._api.Recognize()
it = self._api.GetIterator()
attrs = it.RowAttributes()
self.assertIsInstance(attrs["row_height"], float)
self.assertIsInstance(attrs["ascenders"], float)
self.assertIsInstance(attrs["descenders"], float)
if __name__ == "__main__":
unittest.main()
| 13,971 | 38.357746 | 88 | py |
tesserocr | tesserocr-master/tests/__init__.py | 0 | 0 | 0 | py | |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/modeling.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import CLIPProcessor, CLIPModel
from torch import Tensor
from dataclasses import dataclass
from typing import Dict
from typing import Iterable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch import nn
# from .transcribe import transcribe as transcribe_function
# from .decoding import detect_language as detect_language_function, decode as decode_function
class Linear(nn.Linear):
def forward(self, x: Tensor) -> Tensor:
return F.linear(
x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype)
)
class AADV(nn.Module):
def __init__(self, config, CLIPConfig):
super().__init__()
self.config = config
self.temporal_position_embeddings = nn.Embedding(config.n_frames, config.transformer_width)
self.clip = CLIPModel(CLIPConfig)
attn_dropout = 0.1
is_add_bias_kv = True
is_add_zero_attn = True
self.temporal_self_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.query_multi_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.dialogue_multi_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.text_to_dialogue_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.gru = nn.GRU(config.transformer_width, config.transformer_width, num_layers=1)
self.combine_video_and_all_frame = nn.Linear(in_features=config.transformer_width * 2,
out_features=config.transformer_width)
self.combine_init = nn.Linear(in_features=config.transformer_width, out_features=config.transformer_width)
self.transform_all_frame_to_hidden = nn.Linear(in_features=512, out_features=config.transformer_width)
self.fuse_image= nn.Linear(in_features=config.transformer_width * 2,
out_features=config.transformer_width)
self.video_to_multimodal = nn.Linear(in_features=config.transformer_width,
out_features=config.transformer_width)
self.text_to_multimodal = nn.Linear(in_features=config.transformer_width, out_features=config.transformer_width)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.softmax = nn.Softmax(dim=-1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.loss_fct = CrossEntropyLoss()
def forward(self, inputs):
"""
:param inputs:
image_frames: (B x 20) x N
audio: B x 2 x 10000
summary: B x 77
script: B x 77
dialog: B x 10 x 77
all_ans: B x 10 x 77
all_frames: B x 768/512
:return: loss when training else None
"""
# pre-injection of all_frame feature
image_features = self.encode_image(inputs['image_frames'])
if self.config.search_key in ['script', 'summary']:
attn_mask = 1 - (inputs[self.config.search_key] == 0).long()
text_features = self.clip.get_text_features(inputs[self.config.search_key], attention_mask=attn_mask)
else:
# encoding dialogue query
text_features = self.encode_dialogue_query(inputs[self.config.search_key],
inputs[self.config.dialog_feature_key])
# r_text_features = text_features.unsqueeze(0).repeat(image_features.size(1), 1, 1) # added repeat
# text weighted representation of image representation
# query_to_image_attn = self.query_multi_attention(r_text_features.transpose(0, 1).contiguous(),
# image_features, image_features)[0].transpose(0, 1).contiguous()
# video_features = query_to_image_attn
image_features = image_features.transpose(0, 1).contiguous()
video_features = torch.sum(image_features, dim=1) / image_features.size(1)
n_video_features = torch.nn.functional.normalize(self.video_to_multimodal(video_features), p=2, dim=-1)
n_text_features = torch.nn.functional.normalize(self.text_to_multimodal(text_features), p=2, dim=-1)
logit_scale = self.logit_scale.exp()
# # original multiply
# logits = torch.mm(logit_scale * n_video_features, n_text_features.t())
# # text weighted multiply
# n_text_features = n_text_features.unsqueeze(1)
# logits = torch.bmm(logit_scale * n_video_features.transpose(0, 1).contiguous(),
# n_text_features.transpose(1, 2).contiguous()).squeeze(-1)
# labels = torch.tensor([i for i in range(text_features.size(0))], dtype=torch.long,
# device=self.config.device)
# loss_i = self.loss_fct(logits, labels)
# loss_e = self.loss_fct(logits.t(), labels)
# loss = (loss_i + loss_e) / 2
return image_features, n_video_features, n_text_features, logit_scale
def encode_image(self, images):
image_features = self.clip.get_image_features(images)
temporal_pos = torch.tensor(
[[i for i in range(self.config.n_frames)] for j in range(images.size(0) // self.config.n_frames)],
dtype=torch.int, device=self.config.device).view(-1)
frame_temporal_pos_embed = self.temporal_position_embeddings(temporal_pos)
image_features = (image_features + frame_temporal_pos_embed).view(images.size(0) // self.config.n_frames,
self.config.n_frames, -1)
image_features = image_features.transpose(0, 1).contiguous()
self_attn_image_features = self.temporal_self_attention(image_features, image_features, image_features)[0]
return self_attn_image_features
def encode_dialogue_query(self, dialogues, text_desc):
"""''
dialogues: B x 10 x 77
''"""
dialogues = dialogues[:, :self.config.dialog_runs, :]
d_input = dialogues.reshape(dialogues.size(0) * dialogues.size(1), -1).contiguous()
d_attn_mask = 1 - (d_input == 0).long()
dialogue_features = self.clip.get_text_features(d_input, attention_mask=d_attn_mask). \
view(dialogues.size(0), dialogues.size(1), -1)
dialogue_features = dialogue_features.transpose(0, 1).contiguous()
dialogue_features = self.dialogue_multi_attention(dialogue_features, dialogue_features, dialogue_features)[0]
dialogue_features, _ = self.gru(dialogue_features)
text_features = self.clip.get_text_features(text_desc, attention_mask=(1 - (text_desc == 0).long()))
dialogue_features = self.text_to_dialogue_attention(text_features.unsqueeze(1).transpose(0, 1).contiguous(),
dialogue_features, dialogue_features)[0].squeeze(0)
text_features = self.combine_init(dialogue_features + text_features)
# text_features = self.combine_init(torch.cat([text_features, dialogue_features], dim=-1))
return text_features
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
class LayerNorm(nn.LayerNorm):
def forward(self, x: Tensor) -> Tensor:
return super().forward(x.float()).type(x.dtype)
class Linear(nn.Linear):
def forward(self, x: Tensor) -> Tensor:
return F.linear(
x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype)
)
class Conv1d(nn.Conv1d):
def _conv_forward(self, x: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
return super()._conv_forward(
x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype)
)
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
class MultiHeadAttention(nn.Module):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = Linear(n_state, n_state)
self.key = Linear(n_state, n_state, bias=False)
self.value = Linear(n_state, n_state)
self.out = Linear(n_state, n_state)
def forward(
self,
x: Tensor,
xa: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
kv_cache: Optional[dict] = None,
):
q = self.query(x)
if kv_cache is None or xa is None or self.key not in kv_cache:
# hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
# otherwise, perform key/value projections for self- or cross-attention as usual.
k = self.key(x if xa is None else xa)
v = self.value(x if xa is None else xa)
else:
# for cross-attention, calculate keys and values once and reuse in subsequent calls.
k = kv_cache[self.key]
v = kv_cache[self.value]
wv = self.qkv_attention(q, k, v, mask)
return self.out(wv)
def qkv_attention(self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head) ** -0.25
q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
w = F.softmax(qk.float(), dim=-1).to(q.dtype)
return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2)
class ResidualAttentionBlock(nn.Module):
def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = MultiHeadAttention(n_state, n_head) if cross_attention else None
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp = nn.Sequential(Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state))
self.mlp_ln = LayerNorm(n_state)
def forward(
self,
x: Tensor,
xa: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
kv_cache: Optional[dict] = None,
):
x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)
if self.cross_attn:
x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)
x = x + self.mlp(self.mlp_ln(x))
return x
class AudioEncoder(nn.Module):
def __init__(self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int):
super().__init__()
self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
)
self.ln_post = LayerNorm(n_state)
def forward(self, x: Tensor):
"""
x : torch.Tensor, shape = (batch_size, n_mels, n_ctx)
the mel spectrogram of the audio
"""
x = F.gelu(self.conv1(x))
x = F.gelu(self.conv2(x))
x = x.permute(0, 2, 1)
assert x.shape[1:] == self.positional_embedding.shape, "incorrect audio shape"
x = (x + self.positional_embedding).to(x.dtype)
for block in self.blocks:
x = block(x)
x = self.ln_post(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x[:1], key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x.squeeze(0)
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 29,954 | 40.146978 | 122 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/data_preprocess.py | from tqdm import tqdm
import json
import codecs
import requests
import pandas as pd
from transformers import BertTokenizer, AutoTokenizer
from os import listdir
from os.path import isfile, join
import torch
import numpy as np
import random
json_load = lambda x: json.load(codecs.open(x, 'r', encoding='utf-8'))
json_dump = lambda d, p: json.dump(d, codecs.open(p, 'w', 'utf-8'), indent=2, ensure_ascii=False)
def draw_samples(lis, ratio):
samples = ratio if ratio > 1 else int(ratio * len(lis))
if samples > len(lis):
new_lis = np.random.choice(len(lis), samples, replace=True)
else:
new_lis = np.random.choice(len(lis), samples, replace=False)
n_lis = [lis[i] for i in new_lis]
return n_lis
def inspect_avsd():
dir = 'data/avsd/avsd_val.json'
js = json_load(dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def measure_avg_len(examples, key):
lens = 0
overlong = 0
for e in examples:
e = examples[e]
if e[key] is None or len(e[key]) == 0:
continue
te = tokenizer.tokenize(e[key])
if len(te) >= 60:
overlong += 1
lens += len(te)
print(overlong)
return lens / len(examples)
avg_len_sum = measure_avg_len(js, 'summary')
avg_len_script = measure_avg_len(js, 'script')
return
def extract_audio_from_video():
import moviepy.editor as mp
path = 'data/avsd/videos/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
for f in tqdm(onlyfiles):
dir = path + f
clip = mp.VideoFileClip(dir)
clip.audio.write_audiofile('data/avsd/audios/{}.wav'.format(f.split('.')[0]))
return
def sample_frames_from_video():
# Importing all necessary libraries
import cv2
path = 'data/avsd/videos/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
frames_per_video = 60
for f in tqdm(onlyfiles):
# Read the video from specified path
cam = cv2.VideoCapture(path + f)
# frame
currentframe = 0
all_frames = []
while (True):
# reading from frame
ret, frame = cam.read()
if ret:
all_frames.append(frame)
currentframe += 1
else:
break
lens = len(all_frames)
if lens >= frames_per_video:
interval = lens // frames_per_video
frame_ind = [i * interval for i in range(frames_per_video)]
for i in range(len(frame_ind)):
if frame_ind[i] >= lens:
frame_ind[i] = lens - 1
frame_ind[-1] = lens - 1
sampled_frames = [all_frames[i] for i in frame_ind]
else:
sampled_frames = sorted(draw_samples([i for i in range(len(all_frames))], frames_per_video))
sampled_frames = [all_frames[i] for i in sampled_frames]
for ind, frame in enumerate(sampled_frames):
cv2.imwrite('data/avsd/frames/{}_{}.jpg'.format(f.split('.')[0], str(ind)), frame)
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
def preprocess_avsd_to_tensor_dataset():
import clip
import torch
from transformers import AutoTokenizer, AutoFeatureExtractor
import pickle
image_dir = 'data/avsd/frames/'
audio_dir = 'data/avsd/audios/'
train_metadata_dir = 'data/avsd/avsd_train.json'
val_metadata_dir = 'data/avsd/avsd_val.json'
test_metadata_dir = 'data/avsd/avsd_test.json'
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-L/14@336px", device=device)
torch.random.manual_seed(0)
def read_image_and_audio(metadata_dir, split='train'):
metadata = json_load(metadata_dir)
all_video_names = []
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = [], [], [], [], [], []
for ind, key in enumerate(tqdm(metadata)):
md = metadata[key]
all_video_names.append(key)
'''
Abandoned due to significant use of memory
'''
# all_frames = []
# frame_index = sorted(draw_samples([i for i in range(20)], 10))
# for ind in frame_index:
# frame = preprocess(Image.open('{}{}_{}.jpg'.format(image_dir, key, str(ind))))
# all_frames.append(frame)
# all_frames = torch.cat(all_frames, dim=0)
all_frames = torch.tensor([ind], dtype=torch.int)
summary = md['summary'] if md['summary'] is not None else md['script']
script = md['script']
t_summary = clip.tokenize(summary, context_length=77, truncate=True)
t_script = clip.tokenize(script, context_length=77, truncate=True)
all_t_q = []
for dialog in md['data']:
q = dialog['question'] + ' ' + dialog['answer']
t_q = clip.tokenize(q, context_length=77, truncate=True)
all_t_q.append(t_q)
all_t_q = torch.cat(all_t_q, dim=0)
all_t_ans = []
for dialog in md['data']:
ans = dialog['answer']
t_ans = clip.tokenize(ans, context_length=77, truncate=True)
all_t_ans.append(t_ans)
all_t_ans = torch.cat(all_t_ans, dim=0)
all_images.append(all_frames)
all_audios.append(all_frames)
all_summaries.append(t_summary)
all_scripts.append(t_script)
all_dialogs.append(all_t_q)
all_ans_in_dialog.append(all_t_ans)
pickle.dump(
[all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog],
open('data/avsd/{}.cache'.format(split), "wb"), protocol=4)
video_names = {'split': split, 'data': all_video_names}
json_dump(video_names, 'data/avsd/{}_video_names.json'.format(split))
read_image_and_audio(train_metadata_dir, split='train')
read_image_and_audio(val_metadata_dir, split='val')
read_image_and_audio(test_metadata_dir, split='test')
def process_dialogs():
special_tokens = json_load('../dialog/additional_special_tokens.json')
def filtering(line):
for sp in special_tokens['additional_special_tokens'][:10]:
line = line.replace(sp, '')
return line
def output_dialogs_by_task(task_key, split):
data_dir = '../dialog/{}.csv'.format(split)
if split == 'dev':
split = 'val'
df = pd.read_csv(data_dir)
hist = list(df['history'])
inputs = list(df['input'])
target = list(df['target'])
tasks = list(df['task'])
source_lines, target_lines = [], []
for h, inp, targ, task in zip(hist, inputs, target, tasks):
if task == task_key:
if str(h) == 'nan':
h = ''
line = filtering(str(h) + ' ' + str(inp))
targf = filtering(targ)
if line.replace(' ', '') == '' or targf.replace(' ', '') == '':
continue
source_lines.append(line)
target_lines.append(str(targf))
with open('../dialog/dialog-task/{}/{}.source'.format(task_key, split), 'w') as f:
for line in source_lines:
f.writelines(line.replace('\n', '').strip() + '\n')
with open('../dialog/dialog-task/{}/{}.target'.format(task_key, split), 'w') as f:
for line in target_lines:
f.writelines(line.replace('\n', '').strip() + '\n')
task_keys = ['NLU', 'DST', 'NLG']
splits = ['train', 'dev', 'test']
for tk in task_keys:
for sp in splits:
output_dialogs_by_task(tk, sp)
def sample_frames_from_video_for_val_set():
# Importing all necessary libraries
import cv2
path = 'data/avsd/videos/Charades_v1/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
video_names = json_load("data/avsd/val_video_names.json")['data']
video_names = {vn: 0 for vn in video_names}
frames_per_video = 60
for ind, f in tqdm(enumerate(onlyfiles)):
if ind <= 9722:
continue
vn = f.split('.')[0]
if vn in video_names:
continue
# Read the video from specified path
cam = cv2.VideoCapture(path + f)
# frame
currentframe = 0
all_frames = []
while True:
# reading from frame
ret, frame = cam.read()
if ret:
all_frames.append(frame)
currentframe += 1
else:
break
lens = len(all_frames)
if lens >= frames_per_video:
interval = lens // frames_per_video
frame_ind = [i * interval for i in range(frames_per_video)]
for i in range(len(frame_ind)):
if frame_ind[i] >= lens:
frame_ind[i] = lens - 1
sampled_frames = [all_frames[i] for i in frame_ind]
else:
sampled_frames = sorted(draw_samples([i for i in range(len(all_frames))], frames_per_video))
sampled_frames = [all_frames[i] for i in sampled_frames]
for ind, frame in enumerate(sampled_frames):
cv2.imwrite('data/avsd/videos/frames/{}_{}.jpg'.format(f.split('.')[0], str(ind)), frame)
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
def retokenize_avsd_to_tensor_dataset():
import clip
import torch
from transformers import AutoTokenizer
import pickle
image_dir = 'data/avsd/videos/frames/'
audio_dir = 'data/avsd/videos/audios/'
train_metadata_dir = 'data/avsd/avsd_train.json'
val_metadata_dir = 'data/avsd/avsd_val.json'
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-L/14@336px", device=device)
tokenizer = AutoTokenizer.from_pretrained('openai/clip-vit-base-patch16')
def read_image_and_audio(metadata_dir, split='train'):
metadata = json_load(metadata_dir)
video_names = json_load('data/avsd/{}_video_names.json'.format(split))
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = [], [], [], [], [], []
for ind, key in enumerate(tqdm(video_names)):
md = metadata[key]
'''
Abandoned due to significant use of memory
'''
# all_frames = []
# frame_index = sorted(draw_samples([i for i in range(20)], 10))
# for ind in frame_index:
# frame = preprocess(Image.open('{}{}_{}.jpg'.format(image_dir, key, str(ind))))
# all_frames.append(frame)
# all_frames = torch.cat(all_frames, dim=0)
all_frames = torch.tensor([ind], dtype=torch.int)
summary = md['summary'] if md['summary'] is not None else md['script']
script = md['script']
t_summary = clip.tokenize(summary, context_length=77, truncate=True)
t_script = clip.tokenize(script, context_length=77, truncate=True)
all_t_q = []
for dialog in md['data']:
q = dialog['question'] + ' ' + dialog['answer']
t_q = clip.tokenize(q, context_length=77, truncate=True)
all_t_q.append(t_q)
all_t_q = torch.cat(all_t_q, dim=0)
all_t_ans = []
for dialog in md['data']:
ans = dialog['answer']
t_ans = clip.tokenize(ans, context_length=77, truncate=True)
all_t_ans.append(t_ans)
all_t_ans = torch.cat(all_t_ans, dim=0)
all_images.append(all_frames)
all_summaries.append(t_summary)
all_scripts.append(t_script)
all_dialogs.append(all_t_q)
all_ans_in_dialog.append(all_t_ans)
pickle.dump(
[all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog],
open('data/avsd/{}.cache'.format(split), "wb"), protocol=4)
read_image_and_audio(train_metadata_dir, split='train')
read_image_and_audio(val_metadata_dir, split='val')
def positionalencoding1d(d_model, length):
"""
:param d_model: dimension of the model
:param length: length of positions
:return: length*d_model position matrix
"""
import math
if d_model % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(d_model))
pe = torch.zeros(length, d_model)
position = torch.arange(0, length).unsqueeze(1)
div_term = torch.exp((torch.arange(0, d_model, 2, dtype=torch.float) *
-(math.log(10000.0) / d_model)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
def resize_images():
from PIL import Image
path = 'data/avsd/videos/frames/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
t = 0
indices_t = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 51, 59]
indices_e = [0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 170, 199]
indices = set(indices_e + indices_t)
for f in tqdm(onlyfiles):
ind = int(f.replace('.jpg', '').split('_')[1])
if ind not in indices:
continue
image = Image.open(path + f)
image.thumbnail((336, 336))
image.save(path.replace('frames', 'frames_resize') + f)
if __name__ == '__main__':
preprocess_avsd_to_tensor_dataset()
sample_frames_from_video()
extract_audio_from_video()
| 13,897 | 32.570048 | 115 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/run_dialogue_to_video_retrieval.py | """ running training and evaluation code for dialogue-to-video retrieval
Created by Chenyang Lyu
"""
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import TensorDataset
from transformers import CLIPProcessor, CLIPModel, CLIPConfig
import torch.distributed as dist
from torch.nn import CrossEntropyLoss
import argparse
import sklearn.metrics as metric
import glob
import logging
import os
import random
import numpy as np
import json
import pickle
import codecs
from PIL import Image
from tqdm import tqdm, trange
from sklearn.metrics import top_k_accuracy_score
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
)
from modeling import AADV
import clip
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
json_load = lambda x: json.load(codecs.open(x, 'r', encoding='utf-8'))
json_dump = lambda d, p: json.dump(d, codecs.open(p, 'w', 'utf-8'), indent=2, ensure_ascii=False)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def draw_samples(lis, ratio):
samples = ratio if ratio > 1 else int(ratio * len(lis))
if samples > len(lis):
new_lis = np.random.choice(len(lis), samples, replace=True)
else:
new_lis = np.random.choice(len(lis), samples, replace=False)
n_lis = [lis[i] for i in new_lis]
return n_lis
def train(args, model, train_dataset, preprocess, val_set=None):
""" Training the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
train_dataset, train_video_names = train_dataset
args.train_batch_size = args.train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
t_total = (len(train_dataloader) * args.num_train_epochs) // args.gradient_accumulation_steps
# Prepare optimizer for training
no_decay = ["bias", "LayerNorm.weight"]
optimizer_group_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
}
]
optimizer = AdamW(optimizer_group_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_steps * t_total),
num_training_steps=t_total)
loss_fct = CrossEntropyLoss()
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
steps_trained_in_current_epoch = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
# Skip past any already trained steps
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
batch = tuple(t.to(args.device) for t in batch)
train_video_ind = list(batch[0].cpu().numpy())
all_image_frames = []
for vid in train_video_ind:
_all_image_frames = []
for vfi in args.train_frame_ind:
frame = preprocess(
Image.open('{}{}_{}.jpg'.format(args.image_dir, train_video_names['data'][vid], str(vfi))))
_all_image_frames.append(frame.unsqueeze(0))
_all_image_frames = torch.cat(_all_image_frames, dim=0).unsqueeze(0)
all_image_frames.append(_all_image_frames)
all_image_frames = torch.cat(all_image_frames, dim=0).to(args.device)
inputs = {'image_frames': all_image_frames,
'audio': batch[1],
'summary': batch[2],
'script': batch[3],
'dialog': batch[4],
'ans_in_dialog': batch[5],
}
image_features, n_video_features, n_text_features, logit_scale = model(inputs)
logit_scale = logit_scale[0] if args.n_gpu > 1 else logit_scale
logits = torch.mm(logit_scale * n_video_features, n_text_features.t())
labels = torch.tensor([i for i in range(n_text_features.size(0))], dtype=torch.long,
device=args.device)
loss_i = loss_fct(logits, labels)
loss_e = loss_fct(logits.t(), labels)
loss = (loss_i + loss_e) / 2
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if args.local_rank == -1 and global_step % args.eval_steps == 0 and val_set is not None: # Only evaluate when single GPU otherwise metrics may not average well
evaluate(args, model, preprocess, val_set)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
torch.save(model_to_save.state_dict(), args.output_dir + 'model.pt')
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_last_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# if args.save_steps > 0 and global_step % args.save_steps == 0:
# # Save model checkpoint
# output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# model_to_save = model if hasattr(model, 'module') else model # Take care of distributed/parallel training
# model_to_save.save_pretrained(output_dir)
# torch.save(args, os.path.join(output_dir, 'training_args.bin'))
# logger.info("Saving model checkpoint to %s", output_dir)
if args.local_rank in [-1, 0]:
tb_writer.close()
global_step = 1 if global_step == 0 else global_step
return global_step, tr_loss / global_step
def evaluate(args, model, preprocess, eval_dataset, prefix=""):
eval_dataset, eval_video_names = eval_dataset
args._eval_batch_size = args.eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args._eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
model_module = model.module
else:
model_module = model
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_image_features = []
all_text_features = []
with torch.no_grad():
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model_module.eval()
batch = tuple(t.to(args.device) for t in batch)
eval_video_ind = list(batch[0].cpu().numpy())
all_image_frames = []
for vid in eval_video_ind:
_all_image_frames = []
for vfi in args.eval_frame_ind:
frame = preprocess(
Image.open('{}{}_{}.jpg'.format(args.image_dir, eval_video_names['data'][vid], str(vfi))))
_all_image_frames.append(frame.unsqueeze(0))
_all_image_frames = torch.cat(_all_image_frames, dim=0).unsqueeze(0)
all_image_frames.append(_all_image_frames)
all_image_frames = torch.cat(all_image_frames, dim=0).to(args.device)
inputs = {'image_frames': all_image_frames,
'audio': batch[1],
'summary': batch[2],
'script': batch[3],
'dialog': batch[4],
'ans_in_dialog': batch[5],
}
# image encoding without self-attention
all_image_features.append(model_module.encode_image(inputs['image_frames']).transpose(0, 1))
if args.search_key in ['script', 'summary']:
all_text_features.append(model_module.clip.get_text_features(inputs[args.search_key]))
else:
all_text_features.append(model_module.encode_dialogue_query(inputs[args.search_key],
inputs[args.dialog_feature_key]))
# print(all_image_features[-1].size(), all_text_features[-1].size())
all_image_features = torch.cat(all_image_features, dim=0)
all_video_features = torch.sum(all_image_features, dim=1) / all_image_features.size(1)
all_text_features = torch.cat(all_text_features, dim=0)
# r_text_features = all_text_features.unsqueeze(0).repeat(all_text_features.size(0), 1, 1) # added repeat
# r_text_features = r_text_features.transpose(0, 1)
# all_image_features = all_image_features.transpose(0, 1)
# all_image_features = model_module.query_multi_attention(r_text_features,
# all_image_features,
# all_image_features)[0].transpose(0, 1).to('cuda')
# model.to('cuda')
# # with l2 norm
t_video_features = torch.nn.functional.normalize(model_module.video_to_multimodal(all_video_features), p=2,
dim=-1)
t_text_features = torch.nn.functional.normalize(model_module.text_to_multimodal(all_text_features), p=2, dim=-1)
# # without l2 norm
# t_video_features = model.video_to_multimodal(all_image_features)
# t_text_features = model.text_to_multimodal(all_text_features)
logit_scale = model_module.logit_scale.exp()
# original multiply
logits = torch.mm(logit_scale * t_video_features, t_text_features.t())
# text weighted multiply
# t_text_features = t_text_features.unsqueeze(1)
# logits = torch.bmm(logit_scale * t_video_features.transpose(0, 1),
# t_text_features.transpose(1, 2)).squeeze(-1)
logits = logits.cpu().numpy()
labels = [i for i in range(t_video_features.size(0))]
top_1 = top_k_accuracy_score(labels, logits, k=1)
top_5 = top_k_accuracy_score(labels, logits, k=5)
top_10 = top_k_accuracy_score(labels, logits, k=10)
print('Metrics: top-1: {}, top-5: {}, top-10: {}'.format(str(round(100 * top_1, 2)),
str(round(100 * top_5, 2)),
str(round(100 * top_10, 2))))
evaluate_rank(logits, labels)
return
def evaluate_rank(sim_matrix, labels):
ranks = []
for logits, label in zip(sim_matrix, labels):
logits_w_ind = {ind: logit for ind, logit in enumerate(logits)}
rank_list = [key for key, value in sorted(logits_w_ind.items(), key=lambda item: item[1], reverse=True)]
ranks.append(rank_list.index(label) + 1)
print('Metrics: median rank: {}, mean rank: {}'.format(str(np.median(ranks)), str(np.mean(ranks))))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--task_name", type=str, default="avsd",
help="the name of the training task (the dataset name)")
parser.add_argument("--model_size", type=str, default="16",
help="the size of pre-trained CLIP model (ViT-16 or ViT-32)")
parser.add_argument("--num_train_epochs", type=int, default=10,
help="the numebr of training epochs")
parser.add_argument("--do_train", action="store_true",
help="whether to train the model or not")
parser.add_argument("--do_eval", action="store_true",
help="whether to evaluate the model or not")
parser.add_argument("--weight_decay", type=float, default=0.0,
help="the weight decay rate")
parser.add_argument("--learning_rate", type=float, default=1e-5,
help="the learning rate used to train the model")
parser.add_argument("--warmup_steps", type=float, default=0.0,
help="the warm_up step rate")
parser.add_argument("--seed", type=int, default=0,
help="the random seed used in model initialization and dataloader")
parser.add_argument("--train_batch_size", type=int, default=16,
help="the batch size used in training")
parser.add_argument("--eval_batch_size", type=int, default=16,
help="the batch size used in evaluation")
parser.add_argument("--logging_steps", type=int, default=50,
help="the logging steps")
parser.add_argument("--eval_steps", type=int, default=500,
help="conduct evaluation every eval_steps")
parser.add_argument("--device", type=int, default=0,
help="the device id used for training and evaluation")
parser.add_argument("--n_gpu", type=int, default=1,
help="number of gpus being used")
parser.add_argument("--attention_heads", type=int, default=8,
help="the attention heads used in multi head attention function")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="number of updates steps to accumulate before performing a backward/update pass")
parser.add_argument("--search_key", type=str, default="dialog",
help="the key used in retrieval")
parser.add_argument("--dialog_feature_key", type=str, default="summary",
help="the key used in dialog feature fusion")
parser.add_argument("--n_frames", type=int, default=6,
help="the frames sampled from each video in training")
parser.add_argument("--eval_n_frames", type=int, default=6,
help="the frames sampled from each video in evaluation")
parser.add_argument("--all_frame_feature_ratio", type=float, default=1.0,
help="the coefficient used to multiply with all frame features in training")
parser.add_argument("--eval_all_frame_feature_ratio", type=float, default=1.0,
help="the coefficient used to multiply with all frame features in evaluation")
parser.add_argument("--dialog_runs", type=int, default=10,
help="the runs of dialog query used in training and evaluation")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--image_dir", type=str, default="data/avsd/frames/",
help="the directory used to store video frames.")
parser.add_argument("--clip_model_name", type=str, default="openai/clip-vit-base-patch16",
help="the name for the CLIP model used in training.")
parser.add_argument("--clip_processor_name", type=str, default="ViT-B/16",
help="the name for the CLIP processor used in training.")
args, _ = parser.parse_known_args()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
args.adam_epsilon = 1e-8
args.max_grad_norm = 5.0
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
clip_model_name = args.clip_model_name # openai/clip-vit-large-patch14-336
clip_processor_name = args.clip_processor_name # "ViT-L/14@336px"
clip_config = CLIPConfig.from_pretrained(clip_model_name)
args.transformer_width = clip_config.projection_dim
args.audio_feature_dim = 20000
args.t_frames = 60
args.e_frames = 60
interval = args.t_frames // args.n_frames
frame_ind = [i * interval for i in range(args.n_frames)]
for i in range(len(frame_ind)):
if frame_ind[i] >= args.t_frames:
frame_ind[i] = args.t_frames - 1
frame_ind[-1] = args.t_frames - 1
args.train_frame_ind = frame_ind
# randomly sampled index
# args.frame_ind = draw_samples([i for i in range(args.t_frames)], args.n_frames)
# args.eval_n_frames = 30
interval = args.e_frames // args.n_frames
frame_ind = [i * interval for i in range(args.eval_n_frames)]
for i in range(len(frame_ind)):
if frame_ind[i] >= args.e_frames:
frame_ind[i] = args.e_frames - 1
frame_ind[-1] = args.e_frames - 1
args.eval_frame_ind = frame_ind
# args.eval_frame_ind = args.train_frame_ind
if 'large' in args.image_dir:
args.train_frame_ind = [i for i in range(args.n_frames)]
args.eval_frame_ind = [i for i in range(args.n_frames)]
model_prefix = 'video_retrieval'
args.output_dir = 'trained_models/dialog_to_video_retrieval/' \
'{}_{}_epochs-{}_lr-{}'.format(model_prefix,
args.search_key,
str(args.num_train_epochs),
str(args.learning_rate))
if args.local_rank in [-1, 0]:
print(args.output_dir)
data_dirs = ["data/avsd/train.cache", "data/avsd/val.cache", "data/avsd/test.cache"]
video_names = ["data/avsd/train_video_names.json", "data/avsd/val_video_names.json", "data/avsd/test_video_names.json"]
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = pickle.load(
open(data_dirs[0], 'rb'))
train_dataset = TensorDataset(torch.cat(all_images, dim=0),
torch.cat([audio.unsqueeze(0) for audio in all_audios], dim=0),
torch.cat(all_summaries, dim=0), torch.cat(all_scripts, dim=0),
torch.cat([dialog.unsqueeze(0) for dialog in all_dialogs], dim=0),
torch.cat([ans.unsqueeze(0) for ans in all_ans_in_dialog], dim=0))
train_dataset = (train_dataset, json_load(video_names[0]))
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = pickle.load(
open(data_dirs[1], 'rb'))
val_video_names = json_load(video_names[1])
val_dataset = TensorDataset(torch.cat(all_images, dim=0),
torch.cat([audio.unsqueeze(0) for audio in all_audios], dim=0),
torch.cat(all_summaries, dim=0),
torch.cat(all_scripts, dim=0),
torch.cat([dialog.unsqueeze(0) for dialog in all_dialogs], dim=0),
torch.cat([ans.unsqueeze(0) for ans in all_ans_in_dialog], dim=0))
val_dataset = (val_dataset, {'data': val_video_names['data']})
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = pickle.load(
open(data_dirs[2], 'rb'))
test_video_names = json_load(video_names[2])
test_dataset = TensorDataset(torch.cat(all_images, dim=0),
torch.cat([audio.unsqueeze(0)
for audio in all_audios], dim=0),
torch.cat(all_summaries, dim=0),
torch.cat(all_scripts, dim=0),
torch.cat([dialog.unsqueeze(0)
for dialog in all_dialogs], dim=0),
torch.cat([ans.unsqueeze(0)
for ans in all_ans_in_dialog], dim=0))
test_dataset = (test_dataset, {'data': test_video_names['data']})
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_model, preprocess = clip.load(clip_processor_name, device=device)
del clip_model
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
model = AADV(args, clip_config)
model.clip = CLIPModel.from_pretrained(clip_model_name)
model.to(args.device)
global_step, tr_loss = train(args, model, train_dataset, preprocess, val_set=val_dataset)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
torch.save(model_to_save.state_dict(), args.output_dir + 'model.pt')
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
model = AADV(args, clip_config)
model.load_state_dict(torch.load(checkpoint + 'model.pt'))
model.to(args.device)
evaluate(args, model, preprocess, val_dataset)
evaluate(args, model, preprocess, test_dataset)
return
if __name__ == '__main__':
main()
| 25,440 | 46.025878 | 180 | py |
PLRDiff | PLRDiff-main/test_single.py | import argparse
import os
import numpy as np
import torch as th
import torch.nn.functional as nF
from pathlib import Path
from guided_diffusion import utils
from guided_diffusion.create import create_model_and_diffusion_RS
import scipy.io as sio
from collections import OrderedDict
from os.path import join
from skimage.metrics import peak_signal_noise_ratio as PSNR
from skimage.metrics import structural_similarity as SSIM
from guided_diffusion.core import imresize
from math import sqrt, log
def blur_kernel(shape, var):
assert shape%2==1
mu = int((shape - 1)/2)
XX, YY = np.meshgrid(np.arange(shape), np.arange(shape))
out = np.exp((-(XX-mu)**2-(YY-mu)**2)/(2*var**2))
return np.float32(out/out.sum())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--baseconfig', type=str, default='base.json',
help='JSON file for creating model and diffusion')
parser.add_argument('-gpu', '--gpu_ids', type=str, default="0")
parser.add_argument('-dr', '--dataroot', type=str, default='') # dataroot with
parser.add_argument('-bs', '--batch_size', type=int, default=1)
parser.add_argument('-sr', '--savedir', type=str, default='./results')
parser.add_argument('-eta1', '--eta1', type=float, default=1) # trade off parameters 1
parser.add_argument('-eta2', '--eta2', type=float, default=2) # trade off parameters 2
parser.add_argument('-seed', '--seed', type=int, default=0)
parser.add_argument('-dn', '--dataname', type=str, default='') # dataname: used for save output
parser.add_argument('-step', '--step', type=int, default=1000) # diffusion steps
parser.add_argument('-scale', '--scale', type=int, default=4) # downsample scale
parser.add_argument('-ks', '--kernelsize', type=int, default=9) # kernel size
parser.add_argument('-sig', '--sig', type=float, default=None) # kernel variance
parser.add_argument('-sn', '--samplenum', type=int, default=1) # sample number
parser.add_argument('-rs', '--resume_state', type=str, default='') # path: pretrained model
## parse configs
args = parser.parse_args()
opt = utils.parse(args)
opt = utils.dict_to_nonedict(opt)
opt['diffusion']['diffusion_steps'] = opt['step']
device = th.device("cuda")
## create model and diffusion process
model, diffusion = create_model_and_diffusion_RS(opt)
## seed
seeed = opt['seed']
print(seeed)
np.random.seed(seeed)
th.manual_seed(seeed)
th.cuda.manual_seed(seeed)
## load model
load_path = opt['resume_state']
gen_path = '{}_gen.pth'.format(load_path)
cks = th.load(gen_path)
new_cks = OrderedDict()
for k, v in cks.items():
newkey = k[11:] if k.startswith('denoise_fn.') else k
new_cks[newkey] = v
model.load_state_dict(new_cks, strict=False)
model.to(device)
model.eval()
## params
param = dict()
param['scale'] = opt['scale']
param['eta1'] = opt['eta1']
param['eta2'] = opt['eta2']
k_s = opt['kernelsize']
if opt['sig'] is None:
sig = sqrt(4**2/(8*log(2)))
else:
sig = opt['sig']
## img
data = sio.loadmat(opt['dataroot'])
HRMS = th.from_numpy(np.float32(data['HRMS']))
ms, Ch = HRMS.shape[0], HRMS.shape[-1]
HRMS = HRMS.permute(2,0,1).unsqueeze(0)
param['k_s'] = k_s
kernel = blur_kernel(k_s, sig)
kernel = th.from_numpy(kernel).repeat(Ch,1,1,1)
param['kernel'] = kernel.to(device)
if opt['dataname'] == 'Chikusei':
param['Band'] = th.Tensor([31,63,95]).type(th.int).to(device)
elif opt['dataname'] == 'Houston':
param['Band'] = th.Tensor([35,71,107]).type(th.int).to(device)
elif opt['dataname'] == 'Pavia':
param['Band'] = th.Tensor([25,51,77]).type(th.int).to(device)
PH = th.from_numpy(np.float32(data['H'])).unsqueeze(0).unsqueeze(-1)
param['PH'] = PH.to(device)
PAN = th.from_numpy(np.float32(data['PAN'])).unsqueeze(0).unsqueeze(0)
LRMS = nF.conv2d(HRMS, kernel.to(HRMS.device), padding=int((k_s - 1)/2), groups=Ch)
LRMS = imresize(LRMS, 1/opt['scale'])
model_condition = {'LRMS': LRMS.to(device), 'PAN': PAN.to(device)}
out_path = Path(join(opt['savedir'], str(opt['scale'])+"_"+str(opt['kernelsize'])+"_"+str(sig)))
out_path.mkdir(parents=True, exist_ok=True)
Rr = 3 # spectral dimensironality of subspace
dname = opt['dataname']
for j in range(opt['samplenum']):
sample,E = diffusion.p_sample_loop(
model,
(1, Ch, ms, ms),
Rr = Rr,
clip_denoised=True,
model_condition=model_condition,
param=param,
save_root=out_path,
progress=True,
)
sample = (sample + 1)/2
im_out = th.matmul(E, sample.reshape(opt['batch_size'], Rr, -1)).reshape(opt['batch_size'], Ch, ms, ms)
im_out = im_out.cpu().squeeze(0).permute(1,2,0).numpy()
HRMS = HRMS.squeeze(0).permute(1,2,0).numpy()
LRMS = LRMS.squeeze(0).permute(1,2,0).numpy()
PAN = PAN.squeeze(0).permute(1,2,0).numpy()
A = sample.cpu().squeeze(0).permute(1,2,0).numpy()
E = E.cpu().squeeze(0).numpy()
psnr = 0
for i in range(Ch):
psnr += PSNR(HRMS, im_out)
psnr /= Ch
ssim = SSIM(HRMS, im_out)
## save output
sio.savemat(join(out_path, opt['dataname']+str(opt['step'])+".mat"), {'Rec': im_out, 'HRMS': HRMS, 'LRMS': LRMS, 'PAN':PAN, 'E':E, 'A':A})
print(f"{dname:s} \t PSNR: \t {psnr:.2f} \t SSIM: \t {ssim:.4f} \n")
| 5,697 | 35.292994 | 146 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.