code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
class DataUnreadableError(Exception):
pass
|
2301_81045437/openpilot
|
tools/lib/exceptions.py
|
Python
|
mit
| 45
|
import os
import socket
from urllib.parse import urlparse
from openpilot.tools.lib.url_file import URLFile
DATA_ENDPOINT = os.getenv("DATA_ENDPOINT", "http://data-raw.comma.internal/")
def internal_source_available():
try:
hostname = urlparse(DATA_ENDPOINT).hostname
port = urlparse(DATA_ENDPOINT).port or 80
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
s.connect((hostname, port))
return True
except (socket.gaierror, ConnectionRefusedError):
pass
return False
def resolve_name(fn):
if fn.startswith("cd:/"):
return fn.replace("cd:/", DATA_ENDPOINT)
return fn
def file_exists(fn):
fn = resolve_name(fn)
if fn.startswith(("http://", "https://")):
return URLFile(fn).get_length_online() != -1
return os.path.exists(fn)
def FileReader(fn, debug=False):
fn = resolve_name(fn)
if fn.startswith(("http://", "https://")):
return URLFile(fn, debug=debug)
return open(fn, "rb")
|
2301_81045437/openpilot
|
tools/lib/filereader.py
|
Python
|
mit
| 955
|
import json
import os
import pickle
import struct
import subprocess
import threading
from enum import IntEnum
from functools import wraps
import numpy as np
from lru import LRU
import _io
from openpilot.tools.lib.cache import cache_path_for_file_path, DEFAULT_CACHE_DIR
from openpilot.tools.lib.exceptions import DataUnreadableError
from openpilot.tools.lib.vidindex import hevc_index
from openpilot.common.file_helpers import atomic_write_in_dir
from openpilot.tools.lib.filereader import FileReader, resolve_name
HEVC_SLICE_B = 0
HEVC_SLICE_P = 1
HEVC_SLICE_I = 2
class GOPReader:
def get_gop(self, num):
# returns (start_frame_num, num_frames, frames_to_skip, gop_data)
raise NotImplementedError
class DoNothingContextManager:
def __enter__(self):
return self
def __exit__(self, *x):
pass
class FrameType(IntEnum):
raw = 1
h265_stream = 2
def fingerprint_video(fn):
with FileReader(fn) as f:
header = f.read(4)
if len(header) == 0:
raise DataUnreadableError(f"{fn} is empty")
elif header == b"\x00\xc0\x12\x00":
return FrameType.raw
elif header == b"\x00\x00\x00\x01":
if 'hevc' in fn:
return FrameType.h265_stream
else:
raise NotImplementedError(fn)
else:
raise NotImplementedError(fn)
def ffprobe(fn, fmt=None):
fn = resolve_name(fn)
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_format", "-show_streams"]
if fmt:
cmd += ["-f", fmt]
cmd += ["-i", "-"]
try:
with FileReader(fn) as f:
ffprobe_output = subprocess.check_output(cmd, input=f.read(4096))
except subprocess.CalledProcessError as e:
raise DataUnreadableError(fn) from e
return json.loads(ffprobe_output)
def cache_fn(func):
@wraps(func)
def cache_inner(fn, *args, **kwargs):
if kwargs.pop('no_cache', None):
cache_path = None
else:
cache_dir = kwargs.pop('cache_dir', DEFAULT_CACHE_DIR)
cache_path = cache_path_for_file_path(fn, cache_dir)
if cache_path and os.path.exists(cache_path):
with open(cache_path, "rb") as cache_file:
cache_value = pickle.load(cache_file)
else:
cache_value = func(fn, *args, **kwargs)
if cache_path:
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump(cache_value, cache_file, -1)
return cache_value
return cache_inner
@cache_fn
def index_stream(fn, ft):
if ft != FrameType.h265_stream:
raise NotImplementedError("Only h265 supported")
frame_types, dat_len, prefix = hevc_index(fn)
index = np.array(frame_types + [(0xFFFFFFFF, dat_len)], dtype=np.uint32)
probe = ffprobe(fn, "hevc")
return {
'index': index,
'global_prefix': prefix,
'probe': probe
}
def get_video_index(fn, frame_type, cache_dir=DEFAULT_CACHE_DIR):
return index_stream(fn, frame_type, cache_dir=cache_dir)
def read_file_check_size(f, sz, cookie):
buff = bytearray(sz)
bytes_read = f.readinto(buff)
assert bytes_read == sz, (bytes_read, sz)
return buff
def rgb24toyuv(rgb):
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
img = np.dot(rgb.reshape(-1, 3), yuv_from_rgb.T).reshape(rgb.shape)
ys = img[:, :, 0]
us = (img[::2, ::2, 1] + img[1::2, ::2, 1] + img[::2, 1::2, 1] + img[1::2, 1::2, 1]) / 4 + 128
vs = (img[::2, ::2, 2] + img[1::2, ::2, 2] + img[::2, 1::2, 2] + img[1::2, 1::2, 2]) / 4 + 128
return ys, us, vs
def rgb24toyuv420(rgb):
ys, us, vs = rgb24toyuv(rgb)
y_len = rgb.shape[0] * rgb.shape[1]
uv_len = y_len // 4
yuv420 = np.empty(y_len + 2 * uv_len, dtype=rgb.dtype)
yuv420[:y_len] = ys.reshape(-1)
yuv420[y_len:y_len + uv_len] = us.reshape(-1)
yuv420[y_len + uv_len:y_len + 2 * uv_len] = vs.reshape(-1)
return yuv420.clip(0, 255).astype('uint8')
def rgb24tonv12(rgb):
ys, us, vs = rgb24toyuv(rgb)
y_len = rgb.shape[0] * rgb.shape[1]
uv_len = y_len // 4
nv12 = np.empty(y_len + 2 * uv_len, dtype=rgb.dtype)
nv12[:y_len] = ys.reshape(-1)
nv12[y_len::2] = us.reshape(-1)
nv12[y_len+1::2] = vs.reshape(-1)
return nv12.clip(0, 255).astype('uint8')
def decompress_video_data(rawdat, vid_fmt, w, h, pix_fmt):
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
args = ["ffmpeg", "-v", "quiet",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
"-vsync", "0",
"-f", vid_fmt,
"-flags2", "showall",
"-i", "-",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"-"]
dat = subprocess.check_output(args, input=rawdat)
if pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, h, w, 3)
elif pix_fmt == "nv12":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, (h*w*3//2))
elif pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, (h*w*3//2))
elif pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, 3, h, w)
else:
raise NotImplementedError
return ret
class BaseFrameReader:
# properties: frame_type, frame_count, w, h
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def get(self, num, count=1, pix_fmt="yuv420p"):
raise NotImplementedError
def FrameReader(fn, cache_dir=DEFAULT_CACHE_DIR, readahead=False, readbehind=False, index_data=None):
frame_type = fingerprint_video(fn)
if frame_type == FrameType.raw:
return RawFrameReader(fn)
elif frame_type in (FrameType.h265_stream,):
if not index_data:
index_data = get_video_index(fn, frame_type, cache_dir)
return StreamFrameReader(fn, frame_type, index_data, readahead=readahead, readbehind=readbehind)
else:
raise NotImplementedError(frame_type)
class RawData:
def __init__(self, f):
self.f = _io.FileIO(f, 'rb')
self.lenn = struct.unpack("I", self.f.read(4))[0]
self.count = os.path.getsize(f) / (self.lenn+4)
def read(self, i):
self.f.seek((self.lenn+4)*i + 4)
return self.f.read(self.lenn)
class RawFrameReader(BaseFrameReader):
def __init__(self, fn):
# raw camera
self.fn = fn
self.frame_type = FrameType.raw
self.rawfile = RawData(self.fn)
self.frame_count = self.rawfile.count
self.w, self.h = 640, 480
def load_and_debayer(self, img):
img = np.frombuffer(img, dtype='uint8').reshape(960, 1280)
cimg = np.dstack([img[0::2, 1::2], ((img[0::2, 0::2].astype("uint16") + img[1::2, 1::2].astype("uint16")) >> 1).astype("uint8"), img[1::2, 0::2]])
return cimg
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
assert num+count <= self.frame_count
if pix_fmt not in ("nv12", "yuv420p", "rgb24"):
raise ValueError(f"Unsupported pixel format {pix_fmt!r}")
app = []
for i in range(num, num+count):
dat = self.rawfile.read(i)
rgb_dat = self.load_and_debayer(dat)
if pix_fmt == "rgb24":
app.append(rgb_dat)
elif pix_fmt == "nv12":
app.append(rgb24tonv12(rgb_dat))
elif pix_fmt == "yuv420p":
app.append(rgb24toyuv420(rgb_dat))
else:
raise NotImplementedError
return app
class VideoStreamDecompressor:
def __init__(self, fn, vid_fmt, w, h, pix_fmt):
self.fn = fn
self.vid_fmt = vid_fmt
self.w = w
self.h = h
self.pix_fmt = pix_fmt
if pix_fmt in ("nv12", "yuv420p"):
self.out_size = w*h*3//2 # yuv420p
elif pix_fmt in ("rgb24", "yuv444p"):
self.out_size = w*h*3
else:
raise NotImplementedError
self.proc = None
self.t = threading.Thread(target=self.write_thread)
self.t.daemon = True
def write_thread(self):
try:
with FileReader(self.fn) as f:
while True:
r = f.read(1024*1024)
if len(r) == 0:
break
self.proc.stdin.write(r)
except BrokenPipeError:
pass
finally:
self.proc.stdin.close()
def read(self):
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
cmd = [
"ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
# "-avioflags", "direct",
"-analyzeduration", "0",
"-probesize", "32",
"-flush_packets", "0",
# "-fflags", "nobuffer",
"-vsync", "0",
"-f", self.vid_fmt,
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", self.pix_fmt,
"pipe:1"
]
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
try:
self.t.start()
while True:
dat = self.proc.stdout.read(self.out_size)
if len(dat) == 0:
break
assert len(dat) == self.out_size
if self.pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((self.h, self.w, 3))
elif self.pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8)
elif self.pix_fmt == "nv12":
ret = np.frombuffer(dat, dtype=np.uint8)
elif self.pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((3, self.h, self.w))
else:
raise RuntimeError(f"unknown pix_fmt: {self.pix_fmt}")
yield ret
result_code = self.proc.wait()
assert result_code == 0, result_code
finally:
self.proc.kill()
self.t.join()
class StreamGOPReader(GOPReader):
def __init__(self, fn, frame_type, index_data):
assert frame_type == FrameType.h265_stream
self.fn = fn
self.frame_type = frame_type
self.frame_count = None
self.w, self.h = None, None
self.prefix = None
self.index = None
self.index = index_data['index']
self.prefix = index_data['global_prefix']
probe = index_data['probe']
self.prefix_frame_data = None
self.num_prefix_frames = 0
self.vid_fmt = "hevc"
i = 0
while i < self.index.shape[0] and self.index[i, 0] != HEVC_SLICE_I:
i += 1
self.first_iframe = i
assert self.first_iframe == 0
self.frame_count = len(self.index) - 1
self.w = probe['streams'][0]['width']
self.h = probe['streams'][0]['height']
def _lookup_gop(self, num):
frame_b = num
while frame_b > 0 and self.index[frame_b, 0] != HEVC_SLICE_I:
frame_b -= 1
frame_e = num + 1
while frame_e < (len(self.index) - 1) and self.index[frame_e, 0] != HEVC_SLICE_I:
frame_e += 1
offset_b = self.index[frame_b, 1]
offset_e = self.index[frame_e, 1]
return (frame_b, frame_e, offset_b, offset_e)
def get_gop(self, num):
frame_b, frame_e, offset_b, offset_e = self._lookup_gop(num)
assert frame_b <= num < frame_e
num_frames = frame_e - frame_b
with FileReader(self.fn) as f:
f.seek(offset_b)
rawdat = f.read(offset_e - offset_b)
if num < self.first_iframe:
assert self.prefix_frame_data
rawdat = self.prefix_frame_data + rawdat
rawdat = self.prefix + rawdat
skip_frames = 0
if num < self.first_iframe:
skip_frames = self.num_prefix_frames
return frame_b, num_frames, skip_frames, rawdat
class GOPFrameReader(BaseFrameReader):
#FrameReader with caching and readahead for formats that are group-of-picture based
def __init__(self, readahead=False, readbehind=False):
self.open_ = True
self.readahead = readahead
self.readbehind = readbehind
self.frame_cache = LRU(64)
if self.readahead:
self.cache_lock = threading.RLock()
self.readahead_last = None
self.readahead_len = 30
self.readahead_c = threading.Condition()
self.readahead_thread = threading.Thread(target=self._readahead_thread)
self.readahead_thread.daemon = True
self.readahead_thread.start()
else:
self.cache_lock = DoNothingContextManager()
def close(self):
if not self.open_:
return
self.open_ = False
if self.readahead:
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
self.readahead_thread.join()
def _readahead_thread(self):
while True:
self.readahead_c.acquire()
try:
if not self.open_:
break
self.readahead_c.wait()
finally:
self.readahead_c.release()
if not self.open_:
break
assert self.readahead_last
num, pix_fmt = self.readahead_last
if self.readbehind:
for k in range(num - 1, max(0, num - self.readahead_len), -1):
self._get_one(k, pix_fmt)
else:
for k in range(num, min(self.frame_count, num + self.readahead_len)):
self._get_one(k, pix_fmt)
def _get_one(self, num, pix_fmt):
assert num < self.frame_count
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
with self.cache_lock:
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
frame_b, num_frames, skip_frames, rawdat = self.get_gop(num)
ret = decompress_video_data(rawdat, self.vid_fmt, self.w, self.h, pix_fmt)
ret = ret[skip_frames:]
assert ret.shape[0] == num_frames
for i in range(ret.shape[0]):
self.frame_cache[(frame_b+i, pix_fmt)] = ret[i]
return self.frame_cache[(num, pix_fmt)]
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
if num + count > self.frame_count:
raise ValueError(f"{num + count} > {self.frame_count}")
if pix_fmt not in ("nv12", "yuv420p", "rgb24", "yuv444p"):
raise ValueError(f"Unsupported pixel format {pix_fmt!r}")
ret = [self._get_one(num + i, pix_fmt) for i in range(count)]
if self.readahead:
self.readahead_last = (num+count, pix_fmt)
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
return ret
class StreamFrameReader(StreamGOPReader, GOPFrameReader):
def __init__(self, fn, frame_type, index_data, readahead=False, readbehind=False):
StreamGOPReader.__init__(self, fn, frame_type, index_data)
GOPFrameReader.__init__(self, readahead, readbehind)
def GOPFrameIterator(gop_reader, pix_fmt):
dec = VideoStreamDecompressor(gop_reader.fn, gop_reader.vid_fmt, gop_reader.w, gop_reader.h, pix_fmt)
yield from dec.read()
def FrameIterator(fn, pix_fmt, **kwargs):
fr = FrameReader(fn, **kwargs)
if isinstance(fr, GOPReader):
yield from GOPFrameIterator(fr, pix_fmt)
else:
for i in range(fr.frame_count):
yield fr.get(i, pix_fmt=pix_fmt)[0]
|
2301_81045437/openpilot
|
tools/lib/framereader.py
|
Python
|
mit
| 14,860
|
import bz2
# regex patterns
class RE:
DONGLE_ID = r'(?P<dongle_id>[a-f0-9]{16})'
TIMESTAMP = r'(?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}--[0-9]{2}-[0-9]{2}-[0-9]{2})'
LOG_ID_V2 = r'(?P<count>[a-f0-9]{8})--(?P<uid>[a-z0-9]{10})'
LOG_ID = fr'(?P<log_id>(?:{TIMESTAMP}|{LOG_ID_V2}))'
ROUTE_NAME = fr'(?P<route_name>{DONGLE_ID}[|_/]{LOG_ID})'
SEGMENT_NAME = fr'{ROUTE_NAME}(?:--|/)(?P<segment_num>[0-9]+)'
INDEX = r'-?[0-9]+'
SLICE = fr'(?P<start>{INDEX})?:?(?P<end>{INDEX})?:?(?P<step>{INDEX})?'
SEGMENT_RANGE = fr'{ROUTE_NAME}(?:(--|/)(?P<slice>({SLICE})))?(?:/(?P<selector>([qras])))?'
BOOTLOG_NAME = ROUTE_NAME
EXPLORER_FILE = fr'^(?P<segment_name>{SEGMENT_NAME})--(?P<file_name>[a-z]+\.[a-z0-9]+)$'
OP_SEGMENT_DIR = fr'^(?P<segment_name>{SEGMENT_NAME})$'
def save_log(dest, log_msgs, compress=True):
dat = b"".join(msg.as_builder().to_bytes() for msg in log_msgs)
if compress:
dat = bz2.compress(dat)
with open(dest, "wb") as f:
f.write(dat)
|
2301_81045437/openpilot
|
tools/lib/helpers.py
|
Python
|
mit
| 989
|
#!/usr/bin/env python
import sys
import termios
import atexit
from select import select
STDIN_FD = sys.stdin.fileno()
class KBHit:
def __init__(self) -> None:
''' Creates a KBHit object that you can call to do various keyboard things.
'''
self.set_kbhit_terminal()
def set_kbhit_terminal(self) -> None:
''' Save old terminal settings for closure, remove ICANON & ECHO flags.
'''
# Save the terminal settings
self.old_term = termios.tcgetattr(STDIN_FD)
self.new_term = self.old_term.copy()
# New terminal setting unbuffered
self.new_term[3] &= ~(termios.ICANON | termios.ECHO)
termios.tcsetattr(STDIN_FD, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit
atexit.register(self.set_normal_term)
def set_normal_term(self) -> None:
''' Resets to normal terminal. On Windows this is a no-op.
'''
termios.tcsetattr(STDIN_FD, termios.TCSAFLUSH, self.old_term)
@staticmethod
def getch() -> str:
''' Returns a keyboard character after kbhit() has been called.
Should not be called in the same program as getarrow().
'''
return sys.stdin.read(1)
@staticmethod
def getarrow() -> int:
''' Returns an arrow-key code after kbhit() has been called. Codes are
0 : up
1 : right
2 : down
3 : left
Should not be called in the same program as getch().
'''
c = sys.stdin.read(3)[2]
vals = [65, 67, 66, 68]
return vals.index(ord(c))
@staticmethod
def kbhit():
''' Returns True if keyboard character was hit, False otherwise.
'''
return select([sys.stdin], [], [], 0)[0] != []
# Test
if __name__ == "__main__":
kb = KBHit()
print('Hit any key, or ESC to exit')
while True:
if kb.kbhit():
c = kb.getch()
if c == '\x1b': # ESC
break
print(c)
kb.set_normal_term()
|
2301_81045437/openpilot
|
tools/lib/kbhit.py
|
Python
|
mit
| 1,873
|
import os
from cereal import log as capnp_log, messaging
from cereal.services import SERVICE_LIST
from openpilot.tools.lib.logreader import LogIterable, RawLogIterable
ALL_SERVICES = list(SERVICE_LIST.keys())
def raw_live_logreader(services: list[str] = ALL_SERVICES, addr: str = '127.0.0.1') -> RawLogIterable:
if addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
for m in services:
messaging.sub_sock(m, poller, addr=addr)
while True:
polld = poller.poll(100)
for sock in polld:
msg = sock.receive()
yield msg
def live_logreader(services: list[str] = ALL_SERVICES, addr: str = '127.0.0.1') -> LogIterable:
for m in raw_live_logreader(services, addr):
with capnp_log.Event.from_bytes(m) as evt:
yield evt
|
2301_81045437/openpilot
|
tools/lib/live_logreader.py
|
Python
|
mit
| 830
|
#!/usr/bin/env python3
import bz2
from functools import partial
import multiprocessing
import capnp
import enum
import os
import pathlib
import sys
import tqdm
import urllib.parse
import warnings
from collections.abc import Callable, Iterable, Iterator
from urllib.parse import parse_qs, urlparse
from cereal import log as capnp_log
from openpilot.common.swaglog import cloudlog
from openpilot.tools.lib.comma_car_segments import get_url as get_comma_segments_url
from openpilot.tools.lib.openpilotci import get_url
from openpilot.tools.lib.filereader import FileReader, file_exists, internal_source_available
from openpilot.tools.lib.route import Route, SegmentRange
LogMessage = type[capnp._DynamicStructReader]
LogIterable = Iterable[LogMessage]
RawLogIterable = Iterable[bytes]
class _LogFileReader:
def __init__(self, fn, canonicalize=True, only_union_types=False, sort_by_time=False, dat=None):
self.data_version = None
self._only_union_types = only_union_types
ext = None
if not dat:
_, ext = os.path.splitext(urllib.parse.urlparse(fn).path)
if ext not in ('', '.bz2'):
# old rlogs weren't bz2 compressed
raise Exception(f"unknown extension {ext}")
with FileReader(fn) as f:
dat = f.read()
if ext == ".bz2" or dat.startswith(b'BZh9'):
dat = bz2.decompress(dat)
ents = capnp_log.Event.read_multiple_bytes(dat)
_ents = []
try:
for e in ents:
_ents.append(e)
except capnp.KjException:
warnings.warn("Corrupted events detected", RuntimeWarning, stacklevel=1)
self._ents = list(sorted(_ents, key=lambda x: x.logMonoTime) if sort_by_time else _ents)
self._ts = [x.logMonoTime for x in self._ents]
def __iter__(self) -> Iterator[capnp._DynamicStructReader]:
for ent in self._ents:
if self._only_union_types:
try:
ent.which()
yield ent
except capnp.lib.capnp.KjException:
pass
else:
yield ent
class ReadMode(enum.StrEnum):
RLOG = "r" # only read rlogs
QLOG = "q" # only read qlogs
SANITIZED = "s" # read from the commaCarSegments database
AUTO = "a" # default to rlogs, fallback to qlogs
AUTO_INTERACTIVE = "i" # default to rlogs, fallback to qlogs with a prompt from the user
LogPath = str | None
LogPaths = list[LogPath]
ValidFileCallable = Callable[[LogPath], bool]
Source = Callable[[SegmentRange, ReadMode], LogPaths]
InternalUnavailableException = Exception("Internal source not available")
def default_valid_file(fn: LogPath) -> bool:
return fn is not None and file_exists(fn)
def auto_strategy(rlog_paths: LogPaths, qlog_paths: LogPaths, interactive: bool, valid_file: ValidFileCallable) -> LogPaths:
# auto select logs based on availability
if any(rlog is None or not valid_file(rlog) for rlog in rlog_paths) and all(qlog is not None and valid_file(qlog) for qlog in qlog_paths):
if interactive:
if input("Some rlogs were not found, would you like to fallback to qlogs for those segments? (y/n) ").lower() != "y":
return rlog_paths
else:
cloudlog.warning("Some rlogs were not found, falling back to qlogs for those segments...")
return [rlog if valid_file(rlog) else (qlog if valid_file(qlog) else None)
for (rlog, qlog) in zip(rlog_paths, qlog_paths, strict=True)]
return rlog_paths
def apply_strategy(mode: ReadMode, rlog_paths: LogPaths, qlog_paths: LogPaths, valid_file: ValidFileCallable = default_valid_file) -> LogPaths:
if mode == ReadMode.RLOG:
return rlog_paths
elif mode == ReadMode.QLOG:
return qlog_paths
elif mode == ReadMode.AUTO:
return auto_strategy(rlog_paths, qlog_paths, False, valid_file)
elif mode == ReadMode.AUTO_INTERACTIVE:
return auto_strategy(rlog_paths, qlog_paths, True, valid_file)
raise Exception(f"invalid mode: {mode}")
def comma_api_source(sr: SegmentRange, mode: ReadMode) -> LogPaths:
route = Route(sr.route_name)
rlog_paths = [route.log_paths()[seg] for seg in sr.seg_idxs]
qlog_paths = [route.qlog_paths()[seg] for seg in sr.seg_idxs]
# comma api will have already checked if the file exists
def valid_file(fn):
return fn is not None
return apply_strategy(mode, rlog_paths, qlog_paths, valid_file=valid_file)
def internal_source(sr: SegmentRange, mode: ReadMode) -> LogPaths:
if not internal_source_available():
raise InternalUnavailableException
def get_internal_url(sr: SegmentRange, seg, file):
return f"cd:/{sr.dongle_id}/{sr.timestamp}/{seg}/{file}.bz2"
rlog_paths = [get_internal_url(sr, seg, "rlog") for seg in sr.seg_idxs]
qlog_paths = [get_internal_url(sr, seg, "qlog") for seg in sr.seg_idxs]
return apply_strategy(mode, rlog_paths, qlog_paths)
def openpilotci_source(sr: SegmentRange, mode: ReadMode) -> LogPaths:
rlog_paths = [get_url(sr.route_name, seg, "rlog") for seg in sr.seg_idxs]
qlog_paths = [get_url(sr.route_name, seg, "qlog") for seg in sr.seg_idxs]
return apply_strategy(mode, rlog_paths, qlog_paths)
def comma_car_segments_source(sr: SegmentRange, mode=ReadMode.RLOG) -> LogPaths:
return [get_comma_segments_url(sr.route_name, seg) for seg in sr.seg_idxs]
def direct_source(file_or_url: str) -> LogPaths:
return [file_or_url]
def get_invalid_files(files):
for f in files:
if f is None or not file_exists(f):
yield f
def check_source(source: Source, *args) -> LogPaths:
files = source(*args)
assert next(get_invalid_files(files), False) is False
return files
def auto_source(sr: SegmentRange, mode=ReadMode.RLOG) -> LogPaths:
if mode == ReadMode.SANITIZED:
return comma_car_segments_source(sr, mode)
SOURCES: list[Source] = [internal_source, openpilotci_source, comma_api_source, comma_car_segments_source,]
exceptions = []
# for automatic fallback modes, auto_source needs to first check if rlogs exist for any source
if mode in [ReadMode.AUTO, ReadMode.AUTO_INTERACTIVE]:
for source in SOURCES:
try:
return check_source(source, sr, ReadMode.RLOG)
except Exception:
pass
# Automatically determine viable source
for source in SOURCES:
try:
return check_source(source, sr, mode)
except Exception as e:
exceptions.append(e)
raise Exception(f"auto_source could not find any valid source, exceptions for sources: {exceptions}")
def parse_useradmin(identifier: str):
if "useradmin.comma.ai" in identifier:
query = parse_qs(urlparse(identifier).query)
return query["onebox"][0]
return None
def parse_cabana(identifier: str):
if "cabana.comma.ai" in identifier:
query = parse_qs(urlparse(identifier).query)
return query["route"][0]
return None
def parse_direct(identifier: str):
if identifier.startswith(("http://", "https://", "cd:/")) or pathlib.Path(identifier).exists():
return identifier
return None
def parse_indirect(identifier: str):
parsed = parse_useradmin(identifier) or parse_cabana(identifier)
if parsed is not None:
return parsed, comma_api_source, True
return identifier, None, False
class LogReader:
def _parse_identifiers(self, identifier: str | list[str]):
if isinstance(identifier, list):
return [i for j in identifier for i in self._parse_identifiers(j)]
parsed, source, is_indirect = parse_indirect(identifier)
if not is_indirect:
direct_parsed = parse_direct(identifier)
if direct_parsed is not None:
return direct_source(identifier)
sr = SegmentRange(parsed)
mode = self.default_mode if sr.selector is None else ReadMode(sr.selector)
source = self.default_source if source is None else source
identifiers = source(sr, mode)
invalid_count = len(list(get_invalid_files(identifiers)))
assert invalid_count == 0, f"{invalid_count}/{len(identifiers)} invalid log(s) found, please ensure all logs \
are uploaded or auto fallback to qlogs with '/a' selector at the end of the route name."
return identifiers
def __init__(self, identifier: str | list[str], default_mode: ReadMode = ReadMode.RLOG,
default_source=auto_source, sort_by_time=False, only_union_types=False):
self.default_mode = default_mode
self.default_source = default_source
self.identifier = identifier
self.sort_by_time = sort_by_time
self.only_union_types = only_union_types
self.__lrs: dict[int, _LogFileReader] = {}
self.reset()
def _get_lr(self, i):
if i not in self.__lrs:
self.__lrs[i] = _LogFileReader(self.logreader_identifiers[i], sort_by_time=self.sort_by_time, only_union_types=self.only_union_types)
return self.__lrs[i]
def __iter__(self):
for i in range(len(self.logreader_identifiers)):
yield from self._get_lr(i)
def _run_on_segment(self, func, i):
return func(self._get_lr(i))
def run_across_segments(self, num_processes, func):
with multiprocessing.Pool(num_processes) as pool:
ret = []
num_segs = len(self.logreader_identifiers)
for p in tqdm.tqdm(pool.imap(partial(self._run_on_segment, func), range(num_segs)), total=num_segs):
ret.extend(p)
return ret
def reset(self):
self.logreader_identifiers = self._parse_identifiers(self.identifier)
@staticmethod
def from_bytes(dat):
return _LogFileReader("", dat=dat)
def filter(self, msg_type: str):
return (getattr(m, m.which()) for m in filter(lambda m: m.which() == msg_type, self))
def first(self, msg_type: str):
return next(self.filter(msg_type), None)
if __name__ == "__main__":
import codecs
# capnproto <= 0.8.0 throws errors converting byte data to string
# below line catches those errors and replaces the bytes with \x__
codecs.register_error("strict", codecs.backslashreplace_errors)
log_path = sys.argv[1]
lr = LogReader(log_path, sort_by_time=True)
for msg in lr:
print(msg)
|
2301_81045437/openpilot
|
tools/lib/logreader.py
|
Python
|
mit
| 9,865
|
from openpilot.tools.lib.openpilotcontainers import OpenpilotCIContainer
def get_url(*args, **kwargs):
return OpenpilotCIContainer.get_url(*args, **kwargs)
def upload_file(*args, **kwargs):
return OpenpilotCIContainer.upload_file(*args, **kwargs)
def upload_bytes(*args, **kwargs):
return OpenpilotCIContainer.upload_bytes(*args, **kwargs)
BASE_URL = OpenpilotCIContainer.BASE_URL
|
2301_81045437/openpilot
|
tools/lib/openpilotci.py
|
Python
|
mit
| 391
|
#!/usr/bin/env python3
from openpilot.tools.lib.azure_container import AzureContainer
OpenpilotCIContainer = AzureContainer("commadataci", "openpilotci")
DataCIContainer = AzureContainer("commadataci", "commadataci")
DataProdContainer = AzureContainer("commadata2", "commadata2")
|
2301_81045437/openpilot
|
tools/lib/openpilotcontainers.py
|
Python
|
mit
| 281
|
import os
import re
from functools import cache
from urllib.parse import urlparse
from collections import defaultdict
from itertools import chain
from openpilot.tools.lib.auth_config import get_token
from openpilot.tools.lib.api import CommaApi
from openpilot.tools.lib.helpers import RE
QLOG_FILENAMES = ['qlog', 'qlog.bz2']
QCAMERA_FILENAMES = ['qcamera.ts']
LOG_FILENAMES = ['rlog', 'rlog.bz2', 'raw_log.bz2']
CAMERA_FILENAMES = ['fcamera.hevc', 'video.hevc']
DCAMERA_FILENAMES = ['dcamera.hevc']
ECAMERA_FILENAMES = ['ecamera.hevc']
class Route:
def __init__(self, name, data_dir=None):
self._name = RouteName(name)
self.files = None
if data_dir is not None:
self._segments = self._get_segments_local(data_dir)
else:
self._segments = self._get_segments_remote()
self.max_seg_number = self._segments[-1].name.segment_num
@property
def name(self):
return self._name
@property
def segments(self):
return self._segments
def log_paths(self):
log_path_by_seg_num = {s.name.segment_num: s.log_path for s in self._segments}
return [log_path_by_seg_num.get(i, None) for i in range(self.max_seg_number + 1)]
def qlog_paths(self):
qlog_path_by_seg_num = {s.name.segment_num: s.qlog_path for s in self._segments}
return [qlog_path_by_seg_num.get(i, None) for i in range(self.max_seg_number + 1)]
def camera_paths(self):
camera_path_by_seg_num = {s.name.segment_num: s.camera_path for s in self._segments}
return [camera_path_by_seg_num.get(i, None) for i in range(self.max_seg_number + 1)]
def dcamera_paths(self):
dcamera_path_by_seg_num = {s.name.segment_num: s.dcamera_path for s in self._segments}
return [dcamera_path_by_seg_num.get(i, None) for i in range(self.max_seg_number + 1)]
def ecamera_paths(self):
ecamera_path_by_seg_num = {s.name.segment_num: s.ecamera_path for s in self._segments}
return [ecamera_path_by_seg_num.get(i, None) for i in range(self.max_seg_number + 1)]
def qcamera_paths(self):
qcamera_path_by_seg_num = {s.name.segment_num: s.qcamera_path for s in self._segments}
return [qcamera_path_by_seg_num.get(i, None) for i in range(self.max_seg_number + 1)]
# TODO: refactor this, it's super repetitive
def _get_segments_remote(self):
api = CommaApi(get_token())
route_files = api.get('v1/route/' + self.name.canonical_name + '/files')
self.files = list(chain.from_iterable(route_files.values()))
segments = {}
for url in self.files:
_, dongle_id, time_str, segment_num, fn = urlparse(url).path.rsplit('/', maxsplit=4)
segment_name = f'{dongle_id}|{time_str}--{segment_num}'
if segments.get(segment_name):
segments[segment_name] = Segment(
segment_name,
url if fn in LOG_FILENAMES else segments[segment_name].log_path,
url if fn in QLOG_FILENAMES else segments[segment_name].qlog_path,
url if fn in CAMERA_FILENAMES else segments[segment_name].camera_path,
url if fn in DCAMERA_FILENAMES else segments[segment_name].dcamera_path,
url if fn in ECAMERA_FILENAMES else segments[segment_name].ecamera_path,
url if fn in QCAMERA_FILENAMES else segments[segment_name].qcamera_path,
)
else:
segments[segment_name] = Segment(
segment_name,
url if fn in LOG_FILENAMES else None,
url if fn in QLOG_FILENAMES else None,
url if fn in CAMERA_FILENAMES else None,
url if fn in DCAMERA_FILENAMES else None,
url if fn in ECAMERA_FILENAMES else None,
url if fn in QCAMERA_FILENAMES else None,
)
return sorted(segments.values(), key=lambda seg: seg.name.segment_num)
def _get_segments_local(self, data_dir):
files = os.listdir(data_dir)
segment_files = defaultdict(list)
for f in files:
fullpath = os.path.join(data_dir, f)
explorer_match = re.match(RE.EXPLORER_FILE, f)
op_match = re.match(RE.OP_SEGMENT_DIR, f)
if explorer_match:
segment_name = explorer_match.group('segment_name')
fn = explorer_match.group('file_name')
if segment_name.replace('_', '|').startswith(self.name.canonical_name):
segment_files[segment_name].append((fullpath, fn))
elif op_match and os.path.isdir(fullpath):
segment_name = op_match.group('segment_name')
if segment_name.startswith(self.name.canonical_name):
for seg_f in os.listdir(fullpath):
segment_files[segment_name].append((os.path.join(fullpath, seg_f), seg_f))
elif f == self.name.canonical_name:
for seg_num in os.listdir(fullpath):
if not seg_num.isdigit():
continue
segment_name = f'{self.name.canonical_name}--{seg_num}'
for seg_f in os.listdir(os.path.join(fullpath, seg_num)):
segment_files[segment_name].append((os.path.join(fullpath, seg_num, seg_f), seg_f))
segments = []
for segment, files in segment_files.items():
try:
log_path = next(path for path, filename in files if filename in LOG_FILENAMES)
except StopIteration:
log_path = None
try:
qlog_path = next(path for path, filename in files if filename in QLOG_FILENAMES)
except StopIteration:
qlog_path = None
try:
camera_path = next(path for path, filename in files if filename in CAMERA_FILENAMES)
except StopIteration:
camera_path = None
try:
dcamera_path = next(path for path, filename in files if filename in DCAMERA_FILENAMES)
except StopIteration:
dcamera_path = None
try:
ecamera_path = next(path for path, filename in files if filename in ECAMERA_FILENAMES)
except StopIteration:
ecamera_path = None
try:
qcamera_path = next(path for path, filename in files if filename in QCAMERA_FILENAMES)
except StopIteration:
qcamera_path = None
segments.append(Segment(segment, log_path, qlog_path, camera_path, dcamera_path, ecamera_path, qcamera_path))
if len(segments) == 0:
raise ValueError(f'Could not find segments for route {self.name.canonical_name} in data directory {data_dir}')
return sorted(segments, key=lambda seg: seg.name.segment_num)
class Segment:
def __init__(self, name, log_path, qlog_path, camera_path, dcamera_path, ecamera_path, qcamera_path):
self._name = SegmentName(name)
self.log_path = log_path
self.qlog_path = qlog_path
self.camera_path = camera_path
self.dcamera_path = dcamera_path
self.ecamera_path = ecamera_path
self.qcamera_path = qcamera_path
@property
def name(self):
return self._name
class RouteName:
def __init__(self, name_str: str):
self._name_str = name_str
delim = next(c for c in self._name_str if c in ("|", "/"))
self._dongle_id, self._time_str = self._name_str.split(delim)
assert len(self._dongle_id) == 16, self._name_str
assert len(self._time_str) == 20, self._name_str
self._canonical_name = f"{self._dongle_id}|{self._time_str}"
@property
def canonical_name(self) -> str: return self._canonical_name
@property
def dongle_id(self) -> str: return self._dongle_id
@property
def time_str(self) -> str: return self._time_str
def __str__(self) -> str: return self._canonical_name
class SegmentName:
# TODO: add constructor that takes dongle_id, time_str, segment_num and then create instances
# of this class instead of manually constructing a segment name (use canonical_name prop instead)
def __init__(self, name_str: str, allow_route_name=False):
data_dir_path_separator_index = name_str.rsplit("|", 1)[0].rfind("/")
use_data_dir = (data_dir_path_separator_index != -1) and ("|" in name_str)
self._name_str = name_str[data_dir_path_separator_index + 1:] if use_data_dir else name_str
self._data_dir = name_str[:data_dir_path_separator_index] if use_data_dir else None
seg_num_delim = "--" if self._name_str.count("--") == 2 else "/"
name_parts = self._name_str.rsplit(seg_num_delim, 1)
if allow_route_name and len(name_parts) == 1:
name_parts.append("-1") # no segment number
self._route_name = RouteName(name_parts[0])
self._num = int(name_parts[1])
self._canonical_name = f"{self._route_name._dongle_id}|{self._route_name._time_str}--{self._num}"
@property
def canonical_name(self) -> str: return self._canonical_name
@property
def dongle_id(self) -> str: return self._route_name.dongle_id
@property
def time_str(self) -> str: return self._route_name.time_str
@property
def segment_num(self) -> int: return self._num
@property
def route_name(self) -> RouteName: return self._route_name
@property
def data_dir(self) -> str | None: return self._data_dir
def __str__(self) -> str: return self._canonical_name
@cache
def get_max_seg_number_cached(sr: 'SegmentRange') -> int:
try:
api = CommaApi(get_token())
max_seg_number = api.get("/v1/route/" + sr.route_name.replace("/", "|"))["maxqlog"]
assert isinstance(max_seg_number, int)
return max_seg_number
except Exception as e:
raise Exception("unable to get max_segment_number. ensure you have access to this route or the route is public.") from e
class SegmentRange:
def __init__(self, segment_range: str):
m = re.fullmatch(RE.SEGMENT_RANGE, segment_range)
assert m is not None, f"Segment range is not valid {segment_range}"
self.m = m
@property
def route_name(self) -> str:
return self.m.group("route_name")
@property
def dongle_id(self) -> str:
return self.m.group("dongle_id")
@property
def timestamp(self) -> str:
return self.m.group("timestamp")
@property
def log_id(self) -> str:
return self.m.group("log_id")
@property
def slice(self) -> str:
return self.m.group("slice") or ""
@property
def selector(self) -> str | None:
return self.m.group("selector")
@property
def seg_idxs(self) -> list[int]:
m = re.fullmatch(RE.SLICE, self.slice)
assert m is not None, f"Invalid slice: {self.slice}"
start, end, step = (None if s is None else int(s) for s in m.groups())
# one segment specified
if start is not None and end is None and ':' not in self.slice:
if start < 0:
start += get_max_seg_number_cached(self) + 1
return [start]
s = slice(start, end, step)
# no specified end or using relative indexing, need number of segments
if end is None or end < 0 or (start is not None and start < 0):
return list(range(get_max_seg_number_cached(self) + 1))[s]
else:
return list(range(end + 1))[s]
def __str__(self) -> str:
return f"{self.dongle_id}/{self.log_id}" + (f"/{self.slice}" if self.slice else "") + (f"/{self.selector}" if self.selector else "")
def __repr__(self) -> str:
return self.__str__()
|
2301_81045437/openpilot
|
tools/lib/route.py
|
Python
|
mit
| 10,899
|
# Utilities for sanitizing routes of only essential data for testing car ports and doing validation.
from openpilot.tools.lib.logreader import LogIterable, LogMessage
def sanitize_vin(vin: str):
# (last 6 digits of vin are serial number https://en.wikipedia.org/wiki/Vehicle_identification_number)
VIN_SENSITIVE = 6
return vin[:-VIN_SENSITIVE] + "X" * VIN_SENSITIVE
def sanitize_msg(msg: LogMessage) -> LogMessage:
if msg.which() == "carParams":
msg = msg.as_builder()
msg.carParams.carVin = sanitize_vin(msg.carParams.carVin)
msg = msg.as_reader()
return msg
PRESERVE_SERVICES = ["can", "carParams", "pandaStates", "pandaStateDEPRECATED"]
def sanitize(lr: LogIterable) -> LogIterable:
filtered = filter(lambda msg: msg.which() in PRESERVE_SERVICES, lr)
sanitized = map(sanitize_msg, filtered)
return sanitized
|
2301_81045437/openpilot
|
tools/lib/sanitizer.py
|
Python
|
mit
| 848
|
import logging
import os
import socket
import time
from hashlib import sha256
from urllib3 import PoolManager, Retry
from urllib3.response import BaseHTTPResponse
from urllib3.util import Timeout
from openpilot.common.file_helpers import atomic_write_in_dir
from openpilot.system.hardware.hw import Paths
# Cache chunk size
K = 1000
CHUNK_SIZE = 1000 * K
logging.getLogger("urllib3").setLevel(logging.WARNING)
def hash_256(link: str) -> str:
hsh = str(sha256((link.split("?")[0]).encode('utf-8')).hexdigest())
return hsh
class URLFileException(Exception):
pass
class URLFile:
_pool_manager: PoolManager|None = None
@staticmethod
def reset() -> None:
URLFile._pool_manager = None
@staticmethod
def pool_manager() -> PoolManager:
if URLFile._pool_manager is None:
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),]
retries = Retry(total=5, backoff_factor=0.5, status_forcelist=[409, 429, 503, 504])
URLFile._pool_manager = PoolManager(num_pools=10, maxsize=100, socket_options=socket_options, retries=retries)
return URLFile._pool_manager
def __init__(self, url: str, timeout: int=10, debug: bool=False, cache: bool|None=None):
self._url = url
self._timeout = Timeout(connect=timeout, read=timeout)
self._pos = 0
self._length: int|None = None
self._debug = debug
# True by default, false if FILEREADER_CACHE is defined, but can be overwritten by the cache input
self._force_download = not int(os.environ.get("FILEREADER_CACHE", "0"))
if cache is not None:
self._force_download = not cache
if not self._force_download:
os.makedirs(Paths.download_cache_root(), exist_ok=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
pass
def _request(self, method: str, url: str, headers: dict[str, str]|None=None) -> BaseHTTPResponse:
return URLFile.pool_manager().request(method, url, timeout=self._timeout, headers=headers)
def get_length_online(self) -> int:
response = self._request('HEAD', self._url)
if not (200 <= response.status <= 299):
return -1
length = response.headers.get('content-length', 0)
return int(length)
def get_length(self) -> int:
if self._length is not None:
return self._length
file_length_path = os.path.join(Paths.download_cache_root(), hash_256(self._url) + "_length")
if not self._force_download and os.path.exists(file_length_path):
with open(file_length_path) as file_length:
content = file_length.read()
self._length = int(content)
return self._length
self._length = self.get_length_online()
if not self._force_download and self._length != -1:
with atomic_write_in_dir(file_length_path, mode="w") as file_length:
file_length.write(str(self._length))
return self._length
def read(self, ll: int|None=None) -> bytes:
if self._force_download:
return self.read_aux(ll=ll)
file_begin = self._pos
file_end = self._pos + ll if ll is not None else self.get_length()
assert file_end != -1, f"Remote file is empty or doesn't exist: {self._url}"
# We have to align with chunks we store. Position is the begginiing of the latest chunk that starts before or at our file
position = (file_begin // CHUNK_SIZE) * CHUNK_SIZE
response = b""
while True:
self._pos = position
chunk_number = self._pos / CHUNK_SIZE
file_name = hash_256(self._url) + "_" + str(chunk_number)
full_path = os.path.join(Paths.download_cache_root(), str(file_name))
data = None
# If we don't have a file, download it
if not os.path.exists(full_path):
data = self.read_aux(ll=CHUNK_SIZE)
with atomic_write_in_dir(full_path, mode="wb") as new_cached_file:
new_cached_file.write(data)
else:
with open(full_path, "rb") as cached_file:
data = cached_file.read()
response += data[max(0, file_begin - position): min(CHUNK_SIZE, file_end - position)]
position += CHUNK_SIZE
if position >= file_end:
self._pos = file_end
return response
def read_aux(self, ll: int|None=None) -> bytes:
download_range = False
headers = {}
if self._pos != 0 or ll is not None:
if ll is None:
end = self.get_length() - 1
else:
end = min(self._pos + ll, self.get_length()) - 1
if self._pos >= end:
return b""
headers['Range'] = f"bytes={self._pos}-{end}"
download_range = True
if self._debug:
t1 = time.time()
response = self._request('GET', self._url, headers=headers)
ret = response.data
if self._debug:
t2 = time.time()
if t2 - t1 > 0.1:
print(f"get {self._url} {headers!r} {t2 - t1:.3f} slow")
response_code = response.status
if response_code == 416: # Requested Range Not Satisfiable
raise URLFileException(f"Error, range out of bounds {response_code} {headers} ({self._url}): {repr(ret)[:500]}")
if download_range and response_code != 206: # Partial Content
raise URLFileException(f"Error, requested range but got unexpected response {response_code} {headers} ({self._url}): {repr(ret)[:500]}")
if (not download_range) and response_code != 200: # OK
raise URLFileException(f"Error {response_code} {headers} ({self._url}): {repr(ret)[:500]}")
self._pos += len(ret)
return ret
def seek(self, pos:int) -> None:
self._pos = pos
@property
def name(self) -> str:
return self._url
os.register_at_fork(after_in_child=URLFile.reset)
|
2301_81045437/openpilot
|
tools/lib/url_file.py
|
Python
|
mit
| 5,619
|
#!/usr/bin/env python3
import argparse
import os
import struct
from enum import IntEnum
from openpilot.tools.lib.filereader import FileReader
DEBUG = int(os.getenv("DEBUG", "0"))
# compare to ffmpeg parsing
# ffmpeg -i <input.hevc> -c copy -bsf:v trace_headers -f null - 2>&1 | grep -B4 -A32 '] 0 '
# H.265 specification
# https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.265-201802-S!!PDF-E&type=items
NAL_UNIT_START_CODE = b"\x00\x00\x01"
NAL_UNIT_START_CODE_SIZE = len(NAL_UNIT_START_CODE)
NAL_UNIT_HEADER_SIZE = 2
class HevcNalUnitType(IntEnum):
TRAIL_N = 0 # RBSP structure: slice_segment_layer_rbsp( )
TRAIL_R = 1 # RBSP structure: slice_segment_layer_rbsp( )
TSA_N = 2 # RBSP structure: slice_segment_layer_rbsp( )
TSA_R = 3 # RBSP structure: slice_segment_layer_rbsp( )
STSA_N = 4 # RBSP structure: slice_segment_layer_rbsp( )
STSA_R = 5 # RBSP structure: slice_segment_layer_rbsp( )
RADL_N = 6 # RBSP structure: slice_segment_layer_rbsp( )
RADL_R = 7 # RBSP structure: slice_segment_layer_rbsp( )
RASL_N = 8 # RBSP structure: slice_segment_layer_rbsp( )
RASL_R = 9 # RBSP structure: slice_segment_layer_rbsp( )
RSV_VCL_N10 = 10
RSV_VCL_R11 = 11
RSV_VCL_N12 = 12
RSV_VCL_R13 = 13
RSV_VCL_N14 = 14
RSV_VCL_R15 = 15
BLA_W_LP = 16 # RBSP structure: slice_segment_layer_rbsp( )
BLA_W_RADL = 17 # RBSP structure: slice_segment_layer_rbsp( )
BLA_N_LP = 18 # RBSP structure: slice_segment_layer_rbsp( )
IDR_W_RADL = 19 # RBSP structure: slice_segment_layer_rbsp( )
IDR_N_LP = 20 # RBSP structure: slice_segment_layer_rbsp( )
CRA_NUT = 21 # RBSP structure: slice_segment_layer_rbsp( )
RSV_IRAP_VCL22 = 22
RSV_IRAP_VCL23 = 23
RSV_VCL24 = 24
RSV_VCL25 = 25
RSV_VCL26 = 26
RSV_VCL27 = 27
RSV_VCL28 = 28
RSV_VCL29 = 29
RSV_VCL30 = 30
RSV_VCL31 = 31
VPS_NUT = 32 # RBSP structure: video_parameter_set_rbsp( )
SPS_NUT = 33 # RBSP structure: seq_parameter_set_rbsp( )
PPS_NUT = 34 # RBSP structure: pic_parameter_set_rbsp( )
AUD_NUT = 35
EOS_NUT = 36
EOB_NUT = 37
FD_NUT = 38
PREFIX_SEI_NUT = 39
SUFFIX_SEI_NUT = 40
RSV_NVCL41 = 41
RSV_NVCL42 = 42
RSV_NVCL43 = 43
RSV_NVCL44 = 44
RSV_NVCL45 = 45
RSV_NVCL46 = 46
RSV_NVCL47 = 47
UNSPEC48 = 48
UNSPEC49 = 49
UNSPEC50 = 50
UNSPEC51 = 51
UNSPEC52 = 52
UNSPEC53 = 53
UNSPEC54 = 54
UNSPEC55 = 55
UNSPEC56 = 56
UNSPEC57 = 57
UNSPEC58 = 58
UNSPEC59 = 59
UNSPEC60 = 60
UNSPEC61 = 61
UNSPEC62 = 62
UNSPEC63 = 63
# B.2.2 Byte stream NAL unit semantics
# - The nal_unit_type within the nal_unit( ) syntax structure is equal to VPS_NUT, SPS_NUT or PPS_NUT.
# - The byte stream NAL unit syntax structure contains the first NAL unit of an access unit in decoding
# order, as specified in clause 7.4.2.4.4.
HEVC_PARAMETER_SET_NAL_UNITS = (
HevcNalUnitType.VPS_NUT,
HevcNalUnitType.SPS_NUT,
HevcNalUnitType.PPS_NUT,
)
# 3.29 coded slice segment NAL unit: A NAL unit that has nal_unit_type in the range of TRAIL_N to RASL_R,
# inclusive, or in the range of BLA_W_LP to RSV_IRAP_VCL23, inclusive, which indicates that the NAL unit
# contains a coded slice segment
HEVC_CODED_SLICE_SEGMENT_NAL_UNITS = (
HevcNalUnitType.TRAIL_N,
HevcNalUnitType.TRAIL_R,
HevcNalUnitType.TSA_N,
HevcNalUnitType.TSA_R,
HevcNalUnitType.STSA_N,
HevcNalUnitType.STSA_R,
HevcNalUnitType.RADL_N,
HevcNalUnitType.RADL_R,
HevcNalUnitType.RASL_N,
HevcNalUnitType.RASL_R,
HevcNalUnitType.BLA_W_LP,
HevcNalUnitType.BLA_W_RADL,
HevcNalUnitType.BLA_N_LP,
HevcNalUnitType.IDR_W_RADL,
HevcNalUnitType.IDR_N_LP,
HevcNalUnitType.CRA_NUT,
)
class VideoFileInvalid(Exception):
pass
def get_ue(dat: bytes, start_idx: int, skip_bits: int) -> tuple[int, int]:
prefix_val = 0
prefix_len = 0
suffix_val = 0
suffix_len = 0
i = start_idx
while i < len(dat):
j = 7
while j >= 0:
if skip_bits > 0:
skip_bits -= 1
elif prefix_val == 0:
prefix_val = (dat[i] >> j) & 1
prefix_len += 1
else:
suffix_val = (suffix_val << 1) | ((dat[i] >> j) & 1)
suffix_len += 1
j -= 1
if prefix_val == 1 and prefix_len - 1 == suffix_len:
val = 2**(prefix_len-1) - 1 + suffix_val
size = prefix_len + suffix_len
return val, size
i += 1
raise VideoFileInvalid("invalid exponential-golomb code")
def require_nal_unit_start(dat: bytes, nal_unit_start: int) -> None:
if nal_unit_start < 1:
raise ValueError("start index must be greater than zero")
if dat[nal_unit_start:nal_unit_start + NAL_UNIT_START_CODE_SIZE] != NAL_UNIT_START_CODE:
raise VideoFileInvalid("data must begin with start code")
def get_hevc_nal_unit_length(dat: bytes, nal_unit_start: int) -> int:
try:
pos = dat.index(NAL_UNIT_START_CODE, nal_unit_start + NAL_UNIT_START_CODE_SIZE)
except ValueError:
pos = -1
# length of NAL unit is byte count up to next NAL unit start index
nal_unit_len = (pos if pos != -1 else len(dat)) - nal_unit_start
if DEBUG:
print(" nal_unit_len:", nal_unit_len)
return nal_unit_len
def get_hevc_nal_unit_type(dat: bytes, nal_unit_start: int) -> HevcNalUnitType:
# 7.3.1.2 NAL unit header syntax
# nal_unit_header( ) { // descriptor
# forbidden_zero_bit f(1)
# nal_unit_type u(6)
# nuh_layer_id u(6)
# nuh_temporal_id_plus1 u(3)
# }
header_start = nal_unit_start + NAL_UNIT_START_CODE_SIZE
nal_unit_header = dat[header_start:header_start + NAL_UNIT_HEADER_SIZE]
if len(nal_unit_header) != 2:
raise VideoFileInvalid("data to short to contain nal unit header")
nal_unit_type = HevcNalUnitType((nal_unit_header[0] >> 1) & 0x3F)
if DEBUG:
print(" nal_unit_type:", nal_unit_type.name, f"({nal_unit_type.value})")
return nal_unit_type
def get_hevc_slice_type(dat: bytes, nal_unit_start: int, nal_unit_type: HevcNalUnitType) -> tuple[int, bool]:
# 7.3.2.9 Slice segment layer RBSP syntax
# slice_segment_layer_rbsp( ) {
# slice_segment_header( )
# slice_segment_data( )
# rbsp_slice_segment_trailing_bits( )
# }
# ...
# 7.3.6.1 General slice segment header syntax
# slice_segment_header( ) { // descriptor
# first_slice_segment_in_pic_flag u(1)
# if( nal_unit_type >= BLA_W_LP && nal_unit_type <= RSV_IRAP_VCL23 )
# no_output_of_prior_pics_flag u(1)
# slice_pic_parameter_set_id ue(v)
# if( !first_slice_segment_in_pic_flag ) {
# if( dependent_slice_segments_enabled_flag )
# dependent_slice_segment_flag u(1)
# slice_segment_address u(v)
# }
# if( !dependent_slice_segment_flag ) {
# for( i = 0; i < num_extra_slice_header_bits; i++ )
# slice_reserved_flag[ i ] u(1)
# slice_type ue(v)
# ...
rbsp_start = nal_unit_start + NAL_UNIT_START_CODE_SIZE + NAL_UNIT_HEADER_SIZE
skip_bits = 0
# 7.4.7.1 General slice segment header semantics
# first_slice_segment_in_pic_flag equal to 1 specifies that the slice segment is the first slice segment of the picture in
# decoding order. first_slice_segment_in_pic_flag equal to 0 specifies that the slice segment is not the first slice segment
# of the picture in decoding order.
is_first_slice = dat[rbsp_start] >> 7 & 1 == 1
if not is_first_slice:
# TODO: parse dependent_slice_segment_flag and slice_segment_address and get real slice_type
# for now since we don't use it return -1 for slice_type
return (-1, is_first_slice)
skip_bits += 1 # skip past first_slice_segment_in_pic_flag
if nal_unit_type >= HevcNalUnitType.BLA_W_LP and nal_unit_type <= HevcNalUnitType.RSV_IRAP_VCL23:
# 7.4.7.1 General slice segment header semantics
# no_output_of_prior_pics_flag affects the output of previously-decoded pictures in the decoded picture buffer after the
# decoding of an IDR or a BLA picture that is not the first picture in the bitstream as specified in Annex C.
skip_bits += 1 # skip past no_output_of_prior_pics_flag
# 7.4.7.1 General slice segment header semantics
# slice_pic_parameter_set_id specifies the value of pps_pic_parameter_set_id for the PPS in use.
# The value of slice_pic_parameter_set_id shall be in the range of 0 to 63, inclusive.
_, size = get_ue(dat, rbsp_start, skip_bits)
skip_bits += size # skip past slice_pic_parameter_set_id
# 7.4.3.3.1 General picture parameter set RBSP semanal_unit_lenntics
# num_extra_slice_header_bits specifies the number of extra slice header bits that are present in the slice header RBSP
# for coded pictures referring to the PPS. The value of num_extra_slice_header_bits shall be in the range of 0 to 2, inclusive,
# in bitstreams conforming to this version of this Specification. Other values for num_extra_slice_header_bits are reserved
# for future use by ITU-T | ISO/IEC. However, decoders shall allow num_extra_slice_header_bits to have any value.
# TODO: get from PPS_NUT pic_parameter_set_rbsp( ) for corresponding slice_pic_parameter_set_id
num_extra_slice_header_bits = 0
skip_bits += num_extra_slice_header_bits
# 7.4.7.1 General slice segment header semantics
# slice_type specifies the coding type of the slice according to Table 7-7.
# Table 7-7 - Name association to slice_type
# slice_type | Name of slice_type
# 0 | B (B slice)
# 1 | P (P slice)
# 2 | I (I slice)
# unsigned integer 0-th order Exp-Golomb-coded syntax element with the left bit first
slice_type, _ = get_ue(dat, rbsp_start, skip_bits)
if DEBUG:
print(" slice_type:", slice_type, f"(first slice: {is_first_slice})")
if slice_type > 2:
raise VideoFileInvalid("slice_type must be 0, 1, or 2")
return slice_type, is_first_slice
def hevc_index(hevc_file_name: str, allow_corrupt: bool=False) -> tuple[list, int, bytes]:
with FileReader(hevc_file_name) as f:
dat = f.read()
if len(dat) < NAL_UNIT_START_CODE_SIZE + 1:
raise VideoFileInvalid("data is too short")
if dat[0] != 0x00:
raise VideoFileInvalid("first byte must be 0x00")
prefix_dat = b""
frame_types = list()
i = 1 # skip past first byte 0x00
try:
while i < len(dat):
require_nal_unit_start(dat, i)
nal_unit_len = get_hevc_nal_unit_length(dat, i)
nal_unit_type = get_hevc_nal_unit_type(dat, i)
if nal_unit_type in HEVC_PARAMETER_SET_NAL_UNITS:
prefix_dat += dat[i:i+nal_unit_len]
elif nal_unit_type in HEVC_CODED_SLICE_SEGMENT_NAL_UNITS:
slice_type, is_first_slice = get_hevc_slice_type(dat, i, nal_unit_type)
if is_first_slice:
frame_types.append((slice_type, i))
i += nal_unit_len
except Exception as e:
if not allow_corrupt:
raise
print(f"ERROR: NAL unit skipped @ {i}\n", str(e))
return frame_types, len(dat), prefix_dat
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str)
parser.add_argument("output_prefix_file", type=str)
parser.add_argument("output_index_file", type=str)
args = parser.parse_args()
frame_types, dat_len, prefix_dat = hevc_index(args.input_file)
with open(args.output_prefix_file, "wb") as f:
f.write(prefix_dat)
with open(args.output_index_file, "wb") as f:
for ft, fp in frame_types:
f.write(struct.pack("<II", ft, fp))
f.write(struct.pack("<II", 0xFFFFFFFF, dat_len))
if __name__ == "__main__":
main()
|
2301_81045437/openpilot
|
tools/lib/vidindex.py
|
Python
|
mit
| 11,840
|
#!/usr/bin/env bash
set -e
if [ -z "$SKIP_PROMPT" ]; then
echo "--------------- macOS support ---------------"
echo "Running openpilot natively on macOS is not officially supported."
echo "It might build, some parts of it might work, but it's not fully tested, so there might be some issues."
echo
echo "Check out devcontainers for a seamless experience (see tools/README.md)."
echo "-------------------------------------------------"
echo -n "Are you sure you want to continue? [y/N] "
read -r response
if [[ ! "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
exit 1
fi
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
ROOT="$(cd $DIR/../ && pwd)"
ARCH=$(uname -m)
if [[ $SHELL == "/bin/zsh" ]]; then
RC_FILE="$HOME/.zshrc"
elif [[ $SHELL == "/bin/bash" ]]; then
RC_FILE="$HOME/.bash_profile"
fi
# Install brew if required
if [[ $(command -v brew) == "" ]]; then
echo "Installing Hombrew"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
echo "[ ] installed brew t=$SECONDS"
# make brew available now
if [[ $ARCH == "x86_64" ]]; then
echo 'eval "$(/usr/local/homebrew/bin/brew shellenv)"' >> $RC_FILE
eval "$(/usr/local/homebrew/bin/brew shellenv)"
else
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> $RC_FILE
eval "$(/opt/homebrew/bin/brew shellenv)"
fi
fi
brew bundle --file=- <<-EOS
brew "catch2"
brew "cmake"
brew "cppcheck"
brew "git-lfs"
brew "zlib"
brew "bzip2"
brew "capnp"
brew "coreutils"
brew "eigen"
brew "ffmpeg"
brew "glfw"
brew "libarchive"
brew "libusb"
brew "libtool"
brew "llvm"
brew "openssl@3.0"
brew "pyenv"
brew "pyenv-virtualenv"
brew "qt@5"
brew "zeromq"
cask "gcc-arm-embedded"
brew "portaudio"
EOS
echo "[ ] finished brew install t=$SECONDS"
BREW_PREFIX=$(brew --prefix)
# archive backend tools for pip dependencies
export LDFLAGS="$LDFLAGS -L${BREW_PREFIX}/opt/zlib/lib"
export LDFLAGS="$LDFLAGS -L${BREW_PREFIX}/opt/bzip2/lib"
export CPPFLAGS="$CPPFLAGS -I${BREW_PREFIX}/opt/zlib/include"
export CPPFLAGS="$CPPFLAGS -I${BREW_PREFIX}/opt/bzip2/include"
# pycurl curl/openssl backend dependencies
export LDFLAGS="$LDFLAGS -L${BREW_PREFIX}/opt/openssl@3/lib"
export CPPFLAGS="$CPPFLAGS -I${BREW_PREFIX}/opt/openssl@3/include"
export PYCURL_CURL_CONFIG=/usr/bin/curl-config
export PYCURL_SSL_LIBRARY=openssl
# install python dependencies
$DIR/install_python_dependencies.sh
echo "[ ] installed python dependencies t=$SECONDS"
# brew does not link qt5 by default
# check if qt5 can be linked, if not, prompt the user to link it
QT_BIN_LOCATION="$(command -v lupdate || :)"
if [ -n "$QT_BIN_LOCATION" ]; then
# if qt6 is linked, prompt the user to unlink it and link the right version
QT_BIN_VERSION="$(lupdate -version | awk '{print $NF}')"
if [[ ! "$QT_BIN_VERSION" =~ 5\.[0-9]+\.[0-9]+ ]]; then
echo
echo "lupdate/lrelease available at PATH is $QT_BIN_VERSION"
if [[ "$QT_BIN_LOCATION" == "$(brew --prefix)/"* ]]; then
echo "Run the following command to link qt5:"
echo "brew unlink qt@6 && brew link qt@5"
else
echo "Remove conflicting qt entries from PATH and run the following command to link qt5:"
echo "brew link qt@5"
fi
fi
else
brew link qt@5
fi
echo
echo "---- OPENPILOT SETUP DONE ----"
echo "Open a new shell or configure your active shell env by running:"
echo "source $RC_FILE"
|
2301_81045437/openpilot
|
tools/mac_setup.sh
|
Shell
|
mit
| 3,432
|
#!/usr/bin/env python3
import os
import sys
import platform
import shutil
import subprocess
import tarfile
import tempfile
import requests
import argparse
from functools import partial
from openpilot.common.basedir import BASEDIR
from openpilot.selfdrive.car.fingerprints import MIGRATION
from openpilot.tools.lib.helpers import save_log
from openpilot.tools.lib.logreader import LogReader, ReadMode
juggle_dir = os.path.dirname(os.path.realpath(__file__))
DEMO_ROUTE = "a2a0ccea32023010|2023-07-27--13-01-19"
RELEASES_URL = "https://github.com/commaai/PlotJuggler/releases/download/latest"
INSTALL_DIR = os.path.join(juggle_dir, "bin")
PLOTJUGGLER_BIN = os.path.join(juggle_dir, "bin/plotjuggler")
MINIMUM_PLOTJUGGLER_VERSION = (3, 5, 2)
MAX_STREAMING_BUFFER_SIZE = 1000
def install():
m = f"{platform.system()}-{platform.machine()}"
supported = ("Linux-x86_64", "Linux-aarch64", "Darwin-arm64", "Darwin-x86_64")
if m not in supported:
raise Exception(f"Unsupported platform: '{m}'. Supported platforms: {supported}")
if os.path.exists(INSTALL_DIR):
shutil.rmtree(INSTALL_DIR)
os.mkdir(INSTALL_DIR)
url = os.path.join(RELEASES_URL, m + ".tar.gz")
with requests.get(url, stream=True, timeout=10) as r, tempfile.NamedTemporaryFile() as tmp:
r.raise_for_status()
with open(tmp.name, 'wb') as tmpf:
for chunk in r.iter_content(chunk_size=1024 * 1024):
tmpf.write(chunk)
with tarfile.open(tmp.name) as tar:
tar.extractall(path=INSTALL_DIR)
def get_plotjuggler_version():
out = subprocess.check_output([PLOTJUGGLER_BIN, "-v"], encoding="utf-8").strip()
version = out.split(" ")[1]
return tuple(map(int, version.split(".")))
def start_juggler(fn=None, dbc=None, layout=None, route_or_segment_name=None):
env = os.environ.copy()
env["BASEDIR"] = BASEDIR
env["PATH"] = f"{INSTALL_DIR}:{os.getenv('PATH', '')}"
if dbc:
env["DBC_NAME"] = dbc
extra_args = ""
if fn is not None:
extra_args += f" -d {fn}"
if layout is not None:
extra_args += f" -l {layout}"
if route_or_segment_name is not None:
extra_args += f" --window_title \"{route_or_segment_name}\""
cmd = f'{PLOTJUGGLER_BIN} --buffer_size {MAX_STREAMING_BUFFER_SIZE} --plugin_folders {INSTALL_DIR}{extra_args}'
subprocess.call(cmd, shell=True, env=env, cwd=juggle_dir)
def process(can, lr):
return [d for d in lr if can or d.which() not in ['can', 'sendcan']]
def juggle_route(route_or_segment_name, can, layout, dbc=None):
sr = LogReader(route_or_segment_name, default_mode=ReadMode.AUTO_INTERACTIVE)
all_data = sr.run_across_segments(24, partial(process, can))
# Infer DBC name from logs
if dbc is None:
for cp in [m for m in all_data if m.which() == 'carParams']:
try:
DBC = __import__(f"openpilot.selfdrive.car.{cp.carParams.carName}.values", fromlist=['DBC']).DBC
fingerprint = cp.carParams.carFingerprint
dbc = DBC[MIGRATION.get(fingerprint, fingerprint)]['pt']
except Exception:
pass
break
with tempfile.NamedTemporaryFile(suffix='.rlog', dir=juggle_dir) as tmp:
save_log(tmp.name, all_data, compress=False)
del all_data
start_juggler(tmp.name, dbc, layout, route_or_segment_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A helper to run PlotJuggler on openpilot routes",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--demo", action="store_true", help="Use the demo route instead of providing one")
parser.add_argument("--can", action="store_true", help="Parse CAN data")
parser.add_argument("--stream", action="store_true", help="Start PlotJuggler in streaming mode")
parser.add_argument("--layout", nargs='?', help="Run PlotJuggler with a pre-defined layout")
parser.add_argument("--install", action="store_true", help="Install or update PlotJuggler + plugins")
parser.add_argument("--dbc", help="Set the DBC name to load for parsing CAN data. If not set, the DBC will be automatically inferred from the logs.")
parser.add_argument("route_or_segment_name", nargs='?', help="The route or segment name to plot (cabana share URL accepted)")
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
if args.install:
install()
sys.exit()
if not os.path.exists(PLOTJUGGLER_BIN):
print("PlotJuggler is missing. Downloading...")
install()
if get_plotjuggler_version() < MINIMUM_PLOTJUGGLER_VERSION:
print("PlotJuggler is out of date. Installing update...")
install()
if args.stream:
start_juggler(layout=args.layout)
else:
route_or_segment_name = DEMO_ROUTE if args.demo else args.route_or_segment_name.strip()
juggle_route(route_or_segment_name, args.can, args.layout, args.dbc)
|
2301_81045437/openpilot
|
tools/plotjuggler/juggle.py
|
Python
|
mit
| 4,827
|
#!/usr/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $DIR
if [ ! -d "$DIR/clpeak" ]; then
git clone https://github.com/krrishnarraj/clpeak.git
cd clpeak
git fetch
git checkout ec2d3e70e1abc7738b81f9277c7af79d89b2133b
git reset --hard origin/master
git submodule update --init --recursive --remote
git apply ../run_continuously.patch
fi
cd clpeak
mkdir build || true
cd build
cmake ..
cmake --build .
|
2301_81045437/openpilot
|
tools/profiling/clpeak/build.sh
|
Shell
|
mit
| 457
|
#!/usr/bin/bash
set -e
cd /sys/kernel/tracing
echo 1 > tracing_on
echo boot > trace_clock
echo 1000 > buffer_size_kb
# /sys/kernel/tracing/available_events
echo 1 > events/irq/enable
echo 1 > events/sched/enable
echo 1 > events/kgsl/enable
echo 1 > events/camera/enable
echo 1 > events/workqueue/enable
echo > trace
sleep 5
echo 0 > tracing_on
cp trace /tmp/trace
chown comma: /tmp/trace
echo /tmp/trace
|
2301_81045437/openpilot
|
tools/profiling/ftrace.sh
|
Shell
|
mit
| 409
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd $DIR
if [ ! -d palanteer ]; then
git clone https://github.com/dfeneyrou/palanteer
pip install wheel
sudo apt install libunwind-dev libdw-dev
fi
cd palanteer
git pull
mkdir -p build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Release
make -j$(nproc)
pip install --force-reinstall python/dist/palanteer*.whl
cp bin/palanteer $DIR/viewer
|
2301_81045437/openpilot
|
tools/profiling/palanteer/setup.sh
|
Shell
|
mit
| 430
|
#!/usr/bin/bash
if [ ! -d perfetto ]; then
git clone https://android.googlesource.com/platform/external/perfetto/
fi
cd perfetto
tools/install-build-deps --linux-arm
tools/gn gen --args='is_debug=false target_os="linux" target_cpu="arm64"' out/linux
tools/ninja -C out/linux tracebox traced traced_probes perfetto
|
2301_81045437/openpilot
|
tools/profiling/perfetto/build.sh
|
Shell
|
mit
| 319
|
#!/usr/bin/bash
DEST=tici:/data/openpilot/selfdrive/debug/profiling/perfetto
scp -r perfetto/out/linux/tracebox $DEST
scp -r perfetto/test/configs $DEST
|
2301_81045437/openpilot
|
tools/profiling/perfetto/copy.sh
|
Shell
|
mit
| 155
|
#!/usr/bin/bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)"
cd $DIR
OUT=trace_
sudo ./tracebox -o $OUT --txt -c configs/scheduling.cfg
sudo chown $USER:$USER $OUT
|
2301_81045437/openpilot
|
tools/profiling/perfetto/record.sh
|
Shell
|
mit
| 184
|
#!/usr/bin/bash
curl -LO https://get.perfetto.dev/trace_processor
chmod +x ./trace_processor
./trace_processor --httpd
|
2301_81045437/openpilot
|
tools/profiling/perfetto/server.sh
|
Shell
|
mit
| 121
|
#!/usr/bin/bash
DEST=tici:/data/openpilot/selfdrive/debug/profiling/perfetto
scp tici:/data/openpilot/selfdrive/debug/profiling/perfetto/trace_* .
|
2301_81045437/openpilot
|
tools/profiling/perfetto/traces.sh
|
Shell
|
mit
| 149
|
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
# find process with name passed in (excluding this process)
for PID in $(pgrep -f $1); do
if [ "$PID" != "$$" ]; then
ps -p $PID -o args
TRACE_PID=$PID
break
fi
done
if [ -z "$TRACE_PID" ]; then
echo "could not find PID for $1"
exit 1
fi
sudo env PATH=$PATH py-spy record -d 5 -o /tmp/perf$TRACE_PID.svg -p $TRACE_PID &&
google-chrome /tmp/perf$TRACE_PID.svg
|
2301_81045437/openpilot
|
tools/profiling/py-spy/profile.sh
|
Shell
|
mit
| 431
|
#!/bin/bash
# TODO: there's probably a better way to do this
cd SnapdragonProfiler/service
mv android real_android
ln -s agl/ android
|
2301_81045437/openpilot
|
tools/profiling/snapdragon/setup-agnos.sh
|
Shell
|
mit
| 136
|
#!/bin/bash
# install depends
sudo apt update
sudo apt-get install libc++1 libc++abi1 default-jre android-tools-adb gtk-sharp2
# setup mono
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
sudo apt install apt-transport-https ca-certificates
echo "deb https://download.mono-project.com/repo/ubuntu stable-xenial main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list
sudo apt update
sudo apt-get install -y mono-complete
echo "Setup successful, you should now be able to run the profiler with cd SnapdragonProfiler and ./run_sdp.sh"
|
2301_81045437/openpilot
|
tools/profiling/snapdragon/setup-profiler.sh
|
Shell
|
mit
| 610
|
#!/usr/bin/bash
set -e
RUBYOPT="-W0" irqtop -d1 -R
|
2301_81045437/openpilot
|
tools/profiling/watch-irqs.sh
|
Shell
|
mit
| 52
|
Import('env', 'qt_env', 'arch', 'common', 'messaging', 'visionipc', 'cereal')
base_frameworks = qt_env['FRAMEWORKS']
base_libs = [common, messaging, cereal, visionipc, 'zmq',
'capnp', 'kj', 'm', 'ssl', 'crypto', 'pthread', 'qt_util'] + qt_env["LIBS"]
if arch == "Darwin":
base_frameworks.append('OpenCL')
else:
base_libs.append('OpenCL')
replay_lib_src = ["replay.cc", "consoleui.cc", "camera.cc", "filereader.cc", "logreader.cc", "framereader.cc", "route.cc", "util.cc"]
replay_lib = qt_env.Library("qt_replay", replay_lib_src, LIBS=base_libs, FRAMEWORKS=base_frameworks)
Export('replay_lib')
replay_libs = [replay_lib, 'avutil', 'avcodec', 'avformat', 'bz2', 'curl', 'yuv', 'ncurses'] + base_libs
qt_env.Program("replay", ["main.cc"], LIBS=replay_libs, FRAMEWORKS=base_frameworks)
if GetOption('extras'):
qt_env.Program('tests/test_replay', ['tests/test_runner.cc', 'tests/test_replay.cc'], LIBS=[replay_libs, base_libs])
|
2301_81045437/openpilot
|
tools/replay/SConscript
|
Python
|
mit
| 947
|
#include "tools/replay/camera.h"
#include <capnp/dynamic.h>
#include <cassert>
#include "third_party/linux/include/msm_media_info.h"
#include "tools/replay/util.h"
const int BUFFER_COUNT = 40;
std::tuple<size_t, size_t, size_t> get_nv12_info(int width, int height) {
int nv12_width = VENUS_Y_STRIDE(COLOR_FMT_NV12, width);
int nv12_height = VENUS_Y_SCANLINES(COLOR_FMT_NV12, height);
assert(nv12_width == VENUS_UV_STRIDE(COLOR_FMT_NV12, width));
assert(nv12_height / 2 == VENUS_UV_SCANLINES(COLOR_FMT_NV12, height));
size_t nv12_buffer_size = 2346 * nv12_width; // comes from v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage
return {nv12_width, nv12_height, nv12_buffer_size};
}
CameraServer::CameraServer(std::pair<int, int> camera_size[MAX_CAMERAS]) {
for (int i = 0; i < MAX_CAMERAS; ++i) {
std::tie(cameras_[i].width, cameras_[i].height) = camera_size[i];
}
startVipcServer();
}
CameraServer::~CameraServer() {
for (auto &cam : cameras_) {
if (cam.thread.joinable()) {
cam.queue.push({});
cam.thread.join();
}
}
vipc_server_.reset(nullptr);
}
void CameraServer::startVipcServer() {
vipc_server_.reset(new VisionIpcServer("camerad"));
for (auto &cam : cameras_) {
cam.cached_buf.clear();
if (cam.width > 0 && cam.height > 0) {
rInfo("camera[%d] frame size %dx%d", cam.type, cam.width, cam.height);
auto [nv12_width, nv12_height, nv12_buffer_size] = get_nv12_info(cam.width, cam.height);
vipc_server_->create_buffers_with_sizes(cam.stream_type, BUFFER_COUNT, false, cam.width, cam.height,
nv12_buffer_size, nv12_width, nv12_width * nv12_height);
if (!cam.thread.joinable()) {
cam.thread = std::thread(&CameraServer::cameraThread, this, std::ref(cam));
}
}
}
vipc_server_->start_listener();
}
void CameraServer::cameraThread(Camera &cam) {
while (true) {
const auto [fr, event] = cam.queue.pop();
if (!fr) break;
capnp::FlatArrayMessageReader reader(event->data);
auto evt = reader.getRoot<cereal::Event>();
auto eidx = capnp::AnyStruct::Reader(evt).getPointerSection()[0].getAs<cereal::EncodeIndex>();
if (eidx.getType() != cereal::EncodeIndex::Type::FULL_H_E_V_C) continue;
int segment_id = eidx.getSegmentId();
uint32_t frame_id = eidx.getFrameId();
if (auto yuv = getFrame(cam, fr, segment_id, frame_id)) {
VisionIpcBufExtra extra = {
.frame_id = frame_id,
.timestamp_sof = eidx.getTimestampSof(),
.timestamp_eof = eidx.getTimestampEof(),
};
vipc_server_->send(yuv, &extra);
} else {
rError("camera[%d] failed to get frame: %lu", cam.type, segment_id);
}
// Prefetch the next frame
getFrame(cam, fr, segment_id + 1, frame_id + 1);
--publishing_;
}
}
VisionBuf *CameraServer::getFrame(Camera &cam, FrameReader *fr, int32_t segment_id, uint32_t frame_id) {
// Check if the frame is cached
auto buf_it = std::find_if(cam.cached_buf.begin(), cam.cached_buf.end(),
[frame_id](VisionBuf *buf) { return buf->get_frame_id() == frame_id; });
if (buf_it != cam.cached_buf.end()) return *buf_it;
VisionBuf *yuv_buf = vipc_server_->get_buffer(cam.stream_type);
if (fr->get(segment_id, yuv_buf)) {
yuv_buf->set_frame_id(frame_id);
cam.cached_buf.insert(yuv_buf);
return yuv_buf;
}
return nullptr;
}
void CameraServer::pushFrame(CameraType type, FrameReader *fr, const Event *event) {
auto &cam = cameras_[type];
if (cam.width != fr->width || cam.height != fr->height) {
cam.width = fr->width;
cam.height = fr->height;
waitForSent();
startVipcServer();
}
++publishing_;
cam.queue.push({fr, event});
}
void CameraServer::waitForSent() {
while (publishing_ > 0) {
std::this_thread::yield();
}
}
|
2301_81045437/openpilot
|
tools/replay/camera.cc
|
C++
|
mit
| 3,855
|
#pragma once
#include <memory>
#include <set>
#include <tuple>
#include <utility>
#include "cereal/visionipc/visionipc_server.h"
#include "common/queue.h"
#include "tools/replay/framereader.h"
#include "tools/replay/logreader.h"
std::tuple<size_t, size_t, size_t> get_nv12_info(int width, int height);
class CameraServer {
public:
CameraServer(std::pair<int, int> camera_size[MAX_CAMERAS] = nullptr);
~CameraServer();
void pushFrame(CameraType type, FrameReader* fr, const Event *event);
void waitForSent();
protected:
struct Camera {
CameraType type;
VisionStreamType stream_type;
int width;
int height;
std::thread thread;
SafeQueue<std::pair<FrameReader*, const Event *>> queue;
std::set<VisionBuf *> cached_buf;
};
void startVipcServer();
void cameraThread(Camera &cam);
VisionBuf *getFrame(Camera &cam, FrameReader *fr, int32_t segment_id, uint32_t frame_id);
Camera cameras_[MAX_CAMERAS] = {
{.type = RoadCam, .stream_type = VISION_STREAM_ROAD},
{.type = DriverCam, .stream_type = VISION_STREAM_DRIVER},
{.type = WideRoadCam, .stream_type = VISION_STREAM_WIDE_ROAD},
};
std::atomic<int> publishing_ = 0;
std::unique_ptr<VisionIpcServer> vipc_server_;
};
|
2301_81045437/openpilot
|
tools/replay/camera.h
|
C++
|
mit
| 1,238
|
#!/usr/bin/env python3
import argparse
import os
import time
import usb1
import threading
os.environ['FILEREADER_CACHE'] = '1'
from openpilot.common.realtime import config_realtime_process, Ratekeeper, DT_CTRL
from openpilot.selfdrive.boardd.boardd import can_capnp_to_can_list
from openpilot.tools.lib.logreader import LogReader
from panda import PandaJungle
# set both to cycle power or ignition
PWR_ON = int(os.getenv("PWR_ON", "0"))
PWR_OFF = int(os.getenv("PWR_OFF", "0"))
IGN_ON = int(os.getenv("ON", "0"))
IGN_OFF = int(os.getenv("OFF", "0"))
ENABLE_IGN = IGN_ON > 0 and IGN_OFF > 0
ENABLE_PWR = PWR_ON > 0 and PWR_OFF > 0
def send_thread(j: PandaJungle, flock):
if "FLASH" in os.environ:
with flock:
j.flash()
j.reset()
for i in [0, 1, 2, 3, 0xFFFF]:
j.can_clear(i)
j.set_can_speed_kbps(i, 500)
j.set_ignition(True)
j.set_panda_power(True)
j.set_can_loopback(False)
rk = Ratekeeper(1 / DT_CTRL, print_delay_threshold=None)
while True:
# handle cycling
if ENABLE_PWR:
i = (rk.frame*DT_CTRL) % (PWR_ON + PWR_OFF) < PWR_ON
j.set_panda_power(i)
if ENABLE_IGN:
i = (rk.frame*DT_CTRL) % (IGN_ON + IGN_OFF) < IGN_ON
j.set_ignition(i)
snd = CAN_MSGS[rk.frame % len(CAN_MSGS)]
snd = list(filter(lambda x: x[-1] <= 2, snd))
try:
j.can_send_many(snd)
except usb1.USBErrorTimeout:
# timeout is fine, just means the CAN TX buffer is full
pass
# Drain panda message buffer
j.can_recv()
rk.keep_time()
def connect():
config_realtime_process(3, 55)
serials = {}
flashing_lock = threading.Lock()
while True:
# look for new devices
for s in PandaJungle.list():
if s not in serials:
print("starting send thread for", s)
serials[s] = threading.Thread(target=send_thread, args=(PandaJungle(s), flashing_lock))
serials[s].start()
# try to join all send threads
cur_serials = serials.copy()
for s, t in cur_serials.items():
if t is not None:
t.join(0.01)
if not t.is_alive():
del serials[s]
time.sleep(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Replay CAN messages from a route to all connected pandas and jungles in a loop.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("route_or_segment_name", nargs='?', help="The route or segment name to replay. If not specified, a default public route will be used.")
args = parser.parse_args()
def process(lr):
return [can_capnp_to_can_list(m.can) for m in lr if m.which() == 'can']
print("Loading log...")
if args.route_or_segment_name is None:
args.route_or_segment_name = "77611a1fac303767/2020-03-24--09-50-38/1:3"
sr = LogReader(args.route_or_segment_name)
CP = sr.first("carParams")
print(f"carFingerprint (for hardcoding fingerprint): '{CP.carFingerprint}'")
CAN_MSGS = sr.run_across_segments(24, process)
print("Finished loading...")
if ENABLE_PWR:
print(f"Cycling power: on for {PWR_ON}s, off for {PWR_OFF}s")
if ENABLE_IGN:
print(f"Cycling ignition: on for {IGN_ON}s, off for {IGN_OFF}s")
connect()
|
2301_81045437/openpilot
|
tools/replay/can_replay.py
|
Python
|
mit
| 3,214
|
#include "tools/replay/consoleui.h"
#include <initializer_list>
#include <string>
#include <tuple>
#include <utility>
#include <QApplication>
#include "common/util.h"
#include "common/version.h"
namespace {
const int BORDER_SIZE = 3;
const std::initializer_list<std::pair<std::string, std::string>> keyboard_shortcuts[] = {
{
{"s", "+10s"},
{"shift+s", "-10s"},
{"m", "+60s"},
{"shift+m", "-60s"},
{"space", "Pause/Resume"},
{"e", "Next Engagement"},
{"d", "Next Disengagement"},
{"t", "Next User Tag"},
{"i", "Next Info"},
{"w", "Next Warning"},
{"c", "Next Critical"},
},
{
{"enter", "Enter seek request"},
{"+/-", "Playback speed"},
{"q", "Exit"},
},
};
enum Color {
Default,
Debug,
Yellow,
Green,
Red,
Cyan,
BrightWhite,
Engaged,
Disengaged,
};
void add_str(WINDOW *w, const char *str, Color color = Color::Default, bool bold = false) {
if (color != Color::Default) wattron(w, COLOR_PAIR(color));
if (bold) wattron(w, A_BOLD);
waddstr(w, str);
if (bold) wattroff(w, A_BOLD);
if (color != Color::Default) wattroff(w, COLOR_PAIR(color));
}
} // namespace
ConsoleUI::ConsoleUI(Replay *replay, QObject *parent) : replay(replay), sm({"carState", "liveParameters"}), QObject(parent) {
// Initialize curses
initscr();
clear();
curs_set(false);
cbreak(); // Line buffering disabled. pass on everything
noecho();
keypad(stdscr, true);
nodelay(stdscr, true); // non-blocking getchar()
// Initialize all the colors. https://www.ditig.com/256-colors-cheat-sheet
start_color();
init_pair(Color::Debug, 246, COLOR_BLACK); // #949494
init_pair(Color::Yellow, 184, COLOR_BLACK);
init_pair(Color::Red, COLOR_RED, COLOR_BLACK);
init_pair(Color::Cyan, COLOR_CYAN, COLOR_BLACK);
init_pair(Color::BrightWhite, 15, COLOR_BLACK);
init_pair(Color::Disengaged, COLOR_BLUE, COLOR_BLUE);
init_pair(Color::Engaged, 28, 28);
init_pair(Color::Green, 34, COLOR_BLACK);
initWindows();
qRegisterMetaType<uint64_t>("uint64_t");
qRegisterMetaType<ReplyMsgType>("ReplyMsgType");
installMessageHandler([this](ReplyMsgType type, const std::string msg) {
emit logMessageSignal(type, QString::fromStdString(msg));
});
installDownloadProgressHandler([this](uint64_t cur, uint64_t total, bool success) {
emit updateProgressBarSignal(cur, total, success);
});
QObject::connect(replay, &Replay::streamStarted, this, &ConsoleUI::updateSummary);
QObject::connect(¬ifier, SIGNAL(activated(int)), SLOT(readyRead()));
QObject::connect(this, &ConsoleUI::updateProgressBarSignal, this, &ConsoleUI::updateProgressBar);
QObject::connect(this, &ConsoleUI::logMessageSignal, this, &ConsoleUI::logMessage);
sm_timer.callOnTimeout(this, &ConsoleUI::updateStatus);
sm_timer.start(100);
getch_timer.start(1000, this);
readyRead();
}
ConsoleUI::~ConsoleUI() {
endwin();
}
void ConsoleUI::initWindows() {
getmaxyx(stdscr, max_height, max_width);
w.fill(nullptr);
w[Win::Title] = newwin(1, max_width, 0, 0);
w[Win::Stats] = newwin(2, max_width - 2 * BORDER_SIZE, 2, BORDER_SIZE);
w[Win::Timeline] = newwin(4, max_width - 2 * BORDER_SIZE, 5, BORDER_SIZE);
w[Win::TimelineDesc] = newwin(1, 100, 10, BORDER_SIZE);
w[Win::CarState] = newwin(3, 100, 12, BORDER_SIZE);
w[Win::DownloadBar] = newwin(1, 100, 16, BORDER_SIZE);
if (int log_height = max_height - 27; log_height > 4) {
w[Win::LogBorder] = newwin(log_height, max_width - 2 * (BORDER_SIZE - 1), 17, BORDER_SIZE - 1);
box(w[Win::LogBorder], 0, 0);
w[Win::Log] = newwin(log_height - 2, max_width - 2 * BORDER_SIZE, 18, BORDER_SIZE);
scrollok(w[Win::Log], true);
}
w[Win::Help] = newwin(5, max_width - (2 * BORDER_SIZE), max_height - 6, BORDER_SIZE);
// set the title bar
wbkgd(w[Win::Title], A_REVERSE);
mvwprintw(w[Win::Title], 0, 3, "openpilot replay %s", COMMA_VERSION);
// show windows on the real screen
refresh();
displayTimelineDesc();
displayHelp();
updateSummary();
updateTimeline();
for (auto win : w) {
if (win) wrefresh(win);
}
}
void ConsoleUI::timerEvent(QTimerEvent *ev) {
if (ev->timerId() != getch_timer.timerId()) return;
if (is_term_resized(max_height, max_width)) {
for (auto win : w) {
if (win) delwin(win);
}
endwin();
clear();
refresh();
initWindows();
rWarning("resize term %dx%d", max_height, max_width);
}
updateTimeline();
}
void ConsoleUI::updateStatus() {
auto write_item = [this](int y, int x, const char *key, const std::string &value, const std::string &unit,
bool bold = false, Color color = Color::BrightWhite) {
auto win = w[Win::CarState];
wmove(win, y, x);
add_str(win, key);
add_str(win, value.c_str(), color, bold);
add_str(win, unit.c_str());
};
static const std::pair<const char *, Color> status_text[] = {
{"loading...", Color::Red},
{"playing", Color::Green},
{"paused...", Color::Yellow},
};
sm.update(0);
if (status != Status::Paused) {
auto events = replay->events();
uint64_t current_mono_time = replay->routeStartTime() + replay->currentSeconds() * 1e9;
bool playing = !events->empty() && events->back().mono_time > current_mono_time;
status = playing ? Status::Playing : Status::Waiting;
}
auto [status_str, status_color] = status_text[status];
write_item(0, 0, "STATUS: ", status_str, " ", false, status_color);
std::string current_segment = " - " + std::to_string((int)(replay->currentSeconds() / 60));
write_item(0, 25, "TIME: ", replay->currentDateTime().toString("ddd MMMM dd hh:mm:ss").toStdString(), current_segment, true);
auto p = sm["liveParameters"].getLiveParameters();
write_item(1, 0, "STIFFNESS: ", util::string_format("%.2f %%", p.getStiffnessFactor() * 100), " ");
write_item(1, 25, "SPEED: ", util::string_format("%.2f", sm["carState"].getCarState().getVEgo()), " m/s");
write_item(2, 0, "STEER RATIO: ", util::string_format("%.2f", p.getSteerRatio()), "");
auto angle_offsets = util::string_format("%.2f|%.2f", p.getAngleOffsetAverageDeg(), p.getAngleOffsetDeg());
write_item(2, 25, "ANGLE OFFSET(AVG|INSTANT): ", angle_offsets, " deg");
wrefresh(w[Win::CarState]);
}
void ConsoleUI::displayHelp() {
for (int i = 0; i < std::size(keyboard_shortcuts); ++i) {
wmove(w[Win::Help], i * 2, 0);
for (auto &[key, desc] : keyboard_shortcuts[i]) {
wattron(w[Win::Help], A_REVERSE);
waddstr(w[Win::Help], (' ' + key + ' ').c_str());
wattroff(w[Win::Help], A_REVERSE);
waddstr(w[Win::Help], (' ' + desc + ' ').c_str());
}
}
wrefresh(w[Win::Help]);
}
void ConsoleUI::displayTimelineDesc() {
std::tuple<Color, const char *, bool> indicators[]{
{Color::Engaged, " Engaged ", false},
{Color::Disengaged, " Disengaged ", false},
{Color::Green, " Info ", true},
{Color::Yellow, " Warning ", true},
{Color::Red, " Critical ", true},
{Color::Cyan, " User Tag ", true},
};
for (auto [color, name, bold] : indicators) {
add_str(w[Win::TimelineDesc], "__", color, bold);
add_str(w[Win::TimelineDesc], name);
}
}
void ConsoleUI::logMessage(ReplyMsgType type, const QString &msg) {
if (auto win = w[Win::Log]) {
Color color = Color::Default;
if (type == ReplyMsgType::Debug) {
color = Color::Debug;
} else if (type == ReplyMsgType::Warning) {
color = Color::Yellow;
} else if (type == ReplyMsgType::Critical) {
color = Color::Red;
}
add_str(win, qPrintable(msg + "\n"), color);
wrefresh(win);
}
}
void ConsoleUI::updateProgressBar(uint64_t cur, uint64_t total, bool success) {
werase(w[Win::DownloadBar]);
if (success && cur < total) {
const int width = 35;
const float progress = cur / (double)total;
const int pos = width * progress;
wprintw(w[Win::DownloadBar], "Downloading [%s>%s] %d%% %s", std::string(pos, '=').c_str(),
std::string(width - pos, ' ').c_str(), int(progress * 100.0), formattedDataSize(total).c_str());
}
wrefresh(w[Win::DownloadBar]);
}
void ConsoleUI::updateSummary() {
const auto &route = replay->route();
mvwprintw(w[Win::Stats], 0, 0, "Route: %s, %lu segments", qPrintable(route->name()), route->segments().size());
mvwprintw(w[Win::Stats], 1, 0, "Car Fingerprint: %s", replay->carFingerprint().c_str());
wrefresh(w[Win::Stats]);
}
void ConsoleUI::updateTimeline() {
auto win = w[Win::Timeline];
int width = getmaxx(win);
werase(win);
wattron(win, COLOR_PAIR(Color::Disengaged));
mvwhline(win, 1, 0, ' ', width);
mvwhline(win, 2, 0, ' ', width);
wattroff(win, COLOR_PAIR(Color::Disengaged));
const int total_sec = replay->totalSeconds();
for (auto [begin, end, type] : replay->getTimeline()) {
int start_pos = (begin / total_sec) * width;
int end_pos = (end / total_sec) * width;
if (type == TimelineType::Engaged) {
mvwchgat(win, 1, start_pos, end_pos - start_pos + 1, A_COLOR, Color::Engaged, NULL);
mvwchgat(win, 2, start_pos, end_pos - start_pos + 1, A_COLOR, Color::Engaged, NULL);
} else if (type == TimelineType::UserFlag) {
mvwchgat(win, 3, start_pos, end_pos - start_pos + 1, ACS_S3, Color::Cyan, NULL);
} else {
auto color_id = Color::Green;
if (type != TimelineType::AlertInfo) {
color_id = type == TimelineType::AlertWarning ? Color::Yellow : Color::Red;
}
mvwchgat(win, 3, start_pos, end_pos - start_pos + 1, ACS_S3, color_id, NULL);
}
}
int cur_pos = ((double)replay->currentSeconds() / total_sec) * width;
wattron(win, COLOR_PAIR(Color::BrightWhite));
mvwaddch(win, 0, cur_pos, ACS_VLINE);
mvwaddch(win, 3, cur_pos, ACS_VLINE);
wattroff(win, COLOR_PAIR(Color::BrightWhite));
wrefresh(win);
}
void ConsoleUI::readyRead() {
int c;
while ((c = getch()) != ERR) {
handleKey(c);
}
}
void ConsoleUI::pauseReplay(bool pause) {
replay->pause(pause);
status = pause ? Status::Paused : Status::Waiting;
}
void ConsoleUI::handleKey(char c) {
if (c == '\n') {
// pause the replay and blocking getchar()
pauseReplay(true);
updateStatus();
getch_timer.stop();
curs_set(true);
nodelay(stdscr, false);
// Wait for user input
rWarning("Waiting for input...");
int y = getmaxy(stdscr) - 9;
move(y, BORDER_SIZE);
add_str(stdscr, "Enter seek request: ", Color::BrightWhite, true);
refresh();
// Seek to choice
echo();
int choice = 0;
scanw((char *)"%d", &choice);
noecho();
pauseReplay(false);
replay->seekTo(choice, false);
// Clean up and turn off the blocking mode
move(y, 0);
clrtoeol();
nodelay(stdscr, true);
curs_set(false);
refresh();
getch_timer.start(1000, this);
} else if (c == '+' || c == '=') {
auto it = std::upper_bound(speed_array.begin(), speed_array.end(), replay->getSpeed());
if (it != speed_array.end()) {
rWarning("playback speed: %.1fx", *it);
replay->setSpeed(*it);
}
} else if (c == '_' || c == '-') {
auto it = std::lower_bound(speed_array.begin(), speed_array.end(), replay->getSpeed());
if (it != speed_array.begin()) {
auto prev = std::prev(it);
rWarning("playback speed: %.1fx", *prev);
replay->setSpeed(*prev);
}
} else if (c == 'e') {
replay->seekToFlag(FindFlag::nextEngagement);
} else if (c == 'd') {
replay->seekToFlag(FindFlag::nextDisEngagement);
} else if (c == 't') {
replay->seekToFlag(FindFlag::nextUserFlag);
} else if (c == 'i') {
replay->seekToFlag(FindFlag::nextInfo);
} else if (c == 'w') {
replay->seekToFlag(FindFlag::nextWarning);
} else if (c == 'c') {
replay->seekToFlag(FindFlag::nextCritical);
} else if (c == 'm') {
replay->seekTo(+60, true);
} else if (c == 'M') {
replay->seekTo(-60, true);
} else if (c == 's') {
replay->seekTo(+10, true);
} else if (c == 'S') {
replay->seekTo(-10, true);
} else if (c == ' ') {
pauseReplay(!replay->isPaused());
} else if (c == 'q' || c == 'Q') {
qApp->exit();
}
}
|
2301_81045437/openpilot
|
tools/replay/consoleui.cc
|
C++
|
mit
| 12,114
|
#pragma once
#include <array>
#include <QBasicTimer>
#include <QObject>
#include <QSocketNotifier>
#include <QTimer>
#include <QTimerEvent>
#include "tools/replay/replay.h"
#include <ncurses.h>
class ConsoleUI : public QObject {
Q_OBJECT
public:
ConsoleUI(Replay *replay, QObject *parent = 0);
~ConsoleUI();
inline static const std::array speed_array = {0.2f, 0.5f, 1.0f, 2.0f, 3.0f};
private:
void initWindows();
void handleKey(char c);
void displayHelp();
void displayTimelineDesc();
void updateTimeline();
void updateSummary();
void updateStatus();
void pauseReplay(bool pause);
enum Status { Waiting, Playing, Paused };
enum Win { Title, Stats, Log, LogBorder, DownloadBar, Timeline, TimelineDesc, Help, CarState, Max};
std::array<WINDOW*, Win::Max> w{};
SubMaster sm;
Replay *replay;
QBasicTimer getch_timer;
QTimer sm_timer;
QSocketNotifier notifier{0, QSocketNotifier::Read, this};
int max_width, max_height;
Status status = Status::Waiting;
signals:
void updateProgressBarSignal(uint64_t cur, uint64_t total, bool success);
void logMessageSignal(ReplyMsgType type, const QString &msg);
private slots:
void readyRead();
void timerEvent(QTimerEvent *ev);
void updateProgressBar(uint64_t cur, uint64_t total, bool success);
void logMessage(ReplyMsgType type, const QString &msg);
};
|
2301_81045437/openpilot
|
tools/replay/consoleui.h
|
C++
|
mit
| 1,352
|
#include "tools/replay/filereader.h"
#include <fstream>
#include "common/util.h"
#include "system/hardware/hw.h"
#include "tools/replay/util.h"
std::string cacheFilePath(const std::string &url) {
static std::string cache_path = [] {
const std::string comma_cache = Path::download_cache_root();
util::create_directories(comma_cache, 0755);
return comma_cache.back() == '/' ? comma_cache : comma_cache + "/";
}();
return cache_path + sha256(getUrlWithoutQuery(url));
}
std::string FileReader::read(const std::string &file, std::atomic<bool> *abort) {
const bool is_remote = file.find("https://") == 0;
const std::string local_file = is_remote ? cacheFilePath(file) : file;
std::string result;
if ((!is_remote || cache_to_local_) && util::file_exists(local_file)) {
result = util::read_file(local_file);
} else if (is_remote) {
result = download(file, abort);
if (cache_to_local_ && !result.empty()) {
std::ofstream fs(local_file, std::ios::binary | std::ios::out);
fs.write(result.data(), result.size());
}
}
return result;
}
std::string FileReader::download(const std::string &url, std::atomic<bool> *abort) {
for (int i = 0; i <= max_retries_ && !(abort && *abort); ++i) {
if (i > 0) {
rWarning("download failed, retrying %d", i);
util::sleep_for(3000);
}
std::string result = httpGet(url, chunk_size_, abort);
if (!result.empty()) {
return result;
}
}
return {};
}
|
2301_81045437/openpilot
|
tools/replay/filereader.cc
|
C++
|
mit
| 1,478
|
#pragma once
#include <atomic>
#include <string>
class FileReader {
public:
FileReader(bool cache_to_local, size_t chunk_size = 0, int retries = 3)
: cache_to_local_(cache_to_local), chunk_size_(chunk_size), max_retries_(retries) {}
virtual ~FileReader() {}
std::string read(const std::string &file, std::atomic<bool> *abort = nullptr);
private:
std::string download(const std::string &url, std::atomic<bool> *abort);
size_t chunk_size_;
int max_retries_;
bool cache_to_local_;
};
std::string cacheFilePath(const std::string &url);
|
2301_81045437/openpilot
|
tools/replay/filereader.h
|
C++
|
mit
| 556
|
#include "tools/replay/framereader.h"
#include <map>
#include <memory>
#include <tuple>
#include <utility>
#include "common/util.h"
#include "third_party/libyuv/include/libyuv.h"
#include "tools/replay/util.h"
#ifdef __APPLE__
#define HW_DEVICE_TYPE AV_HWDEVICE_TYPE_VIDEOTOOLBOX
#define HW_PIX_FMT AV_PIX_FMT_VIDEOTOOLBOX
#else
#define HW_DEVICE_TYPE AV_HWDEVICE_TYPE_CUDA
#define HW_PIX_FMT AV_PIX_FMT_CUDA
#endif
namespace {
enum AVPixelFormat get_hw_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) {
enum AVPixelFormat *hw_pix_fmt = reinterpret_cast<enum AVPixelFormat *>(ctx->opaque);
for (const enum AVPixelFormat *p = pix_fmts; *p != -1; p++) {
if (*p == *hw_pix_fmt) return *p;
}
rWarning("Please run replay with the --no-hw-decoder flag!");
*hw_pix_fmt = AV_PIX_FMT_NONE;
return AV_PIX_FMT_YUV420P;
}
struct DecoderManager {
VideoDecoder *acquire(CameraType type, AVCodecParameters *codecpar, bool hw_decoder) {
auto key = std::tuple(type, codecpar->width, codecpar->height);
std::unique_lock lock(mutex_);
if (auto it = decoders_.find(key); it != decoders_.end()) {
return it->second.get();
}
auto decoder = std::make_unique<VideoDecoder>();
if (!decoder->open(codecpar, hw_decoder)) {
decoder.reset(nullptr);
}
decoders_[key] = std::move(decoder);
return decoders_[key].get();
}
std::mutex mutex_;
std::map<std::tuple<CameraType, int, int>, std::unique_ptr<VideoDecoder>> decoders_;
};
DecoderManager decoder_manager;
} // namespace
FrameReader::FrameReader() {
av_log_set_level(AV_LOG_QUIET);
}
FrameReader::~FrameReader() {
if (input_ctx) avformat_close_input(&input_ctx);
}
bool FrameReader::load(CameraType type, const std::string &url, bool no_hw_decoder, std::atomic<bool> *abort, bool local_cache, int chunk_size, int retries) {
auto local_file_path = url.find("https://") == 0 ? cacheFilePath(url) : url;
if (!util::file_exists(local_file_path)) {
FileReader f(local_cache, chunk_size, retries);
if (f.read(url, abort).empty()) {
return false;
}
}
return loadFromFile(type, local_file_path, no_hw_decoder, abort);
}
bool FrameReader::loadFromFile(CameraType type, const std::string &file, bool no_hw_decoder, std::atomic<bool> *abort) {
if (avformat_open_input(&input_ctx, file.c_str(), nullptr, nullptr) != 0 ||
avformat_find_stream_info(input_ctx, nullptr) < 0) {
rError("Failed to open input file or find video stream");
return false;
}
input_ctx->probesize = 10 * 1024 * 1024; // 10MB
decoder_ = decoder_manager.acquire(type, input_ctx->streams[0]->codecpar, !no_hw_decoder);
if (!decoder_) {
return false;
}
width = decoder_->width;
height = decoder_->height;
AVPacket pkt;
packets_info.reserve(60 * 20); // 20fps, one minute
while (!(abort && *abort) && av_read_frame(input_ctx, &pkt) == 0) {
packets_info.emplace_back(PacketInfo{.flags = pkt.flags, .pos = pkt.pos});
av_packet_unref(&pkt);
}
avio_seek(input_ctx->pb, 0, SEEK_SET);
return !packets_info.empty();
}
bool FrameReader::get(int idx, VisionBuf *buf) {
if (!buf || idx < 0 || idx >= packets_info.size()) {
return false;
}
return decoder_->decode(this, idx, buf);
}
// class VideoDecoder
VideoDecoder::VideoDecoder() {
av_frame_ = av_frame_alloc();
hw_frame_ = av_frame_alloc();
}
VideoDecoder::~VideoDecoder() {
if (hw_device_ctx) av_buffer_unref(&hw_device_ctx);
if (decoder_ctx) avcodec_free_context(&decoder_ctx);
av_frame_free(&av_frame_);
av_frame_free(&hw_frame_);
}
bool VideoDecoder::open(AVCodecParameters *codecpar, bool hw_decoder) {
const AVCodec *decoder = avcodec_find_decoder(codecpar->codec_id);
if (!decoder) return false;
decoder_ctx = avcodec_alloc_context3(decoder);
if (!decoder_ctx || avcodec_parameters_to_context(decoder_ctx, codecpar) != 0) {
rError("Failed to allocate or initialize codec context");
return false;
}
width = (decoder_ctx->width + 3) & ~3;
height = decoder_ctx->height;
if (hw_decoder && !initHardwareDecoder(HW_DEVICE_TYPE)) {
rWarning("No device with hardware decoder found. fallback to CPU decoding.");
}
if (avcodec_open2(decoder_ctx, decoder, nullptr) < 0) {
rError("Failed to open codec");
return false;
}
return true;
}
bool VideoDecoder::initHardwareDecoder(AVHWDeviceType hw_device_type) {
const AVCodecHWConfig *config = nullptr;
for (int i = 0; (config = avcodec_get_hw_config(decoder_ctx->codec, i)) != nullptr; i++) {
if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX && config->device_type == hw_device_type) {
hw_pix_fmt = config->pix_fmt;
break;
}
}
if (!config) {
rWarning("Hardware configuration not found");
return false;
}
int ret = av_hwdevice_ctx_create(&hw_device_ctx, hw_device_type, nullptr, nullptr, 0);
if (ret < 0) {
hw_pix_fmt = AV_PIX_FMT_NONE;
rWarning("Failed to create specified HW device %d.", ret);
return false;
}
decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
decoder_ctx->opaque = &hw_pix_fmt;
decoder_ctx->get_format = get_hw_format;
return true;
}
bool VideoDecoder::decode(FrameReader *reader, int idx, VisionBuf *buf) {
int from_idx = idx;
if (idx != reader->prev_idx + 1) {
// seeking to the nearest key frame
for (int i = idx; i >= 0; --i) {
if (reader->packets_info[i].flags & AV_PKT_FLAG_KEY) {
from_idx = i;
break;
}
}
avio_seek(reader->input_ctx->pb, reader->packets_info[from_idx].pos, SEEK_SET);
}
reader->prev_idx = idx;
bool result = false;
AVPacket pkt;
for (int i = from_idx; i <= idx; ++i) {
if (av_read_frame(reader->input_ctx, &pkt) == 0) {
AVFrame *f = decodeFrame(&pkt);
if (f && i == idx) {
result = copyBuffer(f, buf);
}
av_packet_unref(&pkt);
}
}
return result;
}
AVFrame *VideoDecoder::decodeFrame(AVPacket *pkt) {
int ret = avcodec_send_packet(decoder_ctx, pkt);
if (ret < 0) {
rError("Error sending a packet for decoding: %d", ret);
return nullptr;
}
ret = avcodec_receive_frame(decoder_ctx, av_frame_);
if (ret != 0) {
rError("avcodec_receive_frame error: %d", ret);
return nullptr;
}
if (av_frame_->format == hw_pix_fmt && av_hwframe_transfer_data(hw_frame_, av_frame_, 0) < 0) {
rError("error transferring frame data from GPU to CPU");
return nullptr;
}
return (av_frame_->format == hw_pix_fmt) ? hw_frame_ : av_frame_;
}
bool VideoDecoder::copyBuffer(AVFrame *f, VisionBuf *buf) {
if (hw_pix_fmt == HW_PIX_FMT) {
for (int i = 0; i < height/2; i++) {
memcpy(buf->y + (i*2 + 0)*buf->stride, f->data[0] + (i*2 + 0)*f->linesize[0], width);
memcpy(buf->y + (i*2 + 1)*buf->stride, f->data[0] + (i*2 + 1)*f->linesize[0], width);
memcpy(buf->uv + i*buf->stride, f->data[1] + i*f->linesize[1], width);
}
} else {
libyuv::I420ToNV12(f->data[0], f->linesize[0],
f->data[1], f->linesize[1],
f->data[2], f->linesize[2],
buf->y, buf->stride,
buf->uv, buf->stride,
width, height);
}
return true;
}
|
2301_81045437/openpilot
|
tools/replay/framereader.cc
|
C++
|
mit
| 7,237
|
#pragma once
#include <string>
#include <vector>
#include "cereal/visionipc/visionbuf.h"
#include "system/camerad/cameras/camera_common.h"
#include "tools/replay/filereader.h"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
class VideoDecoder;
class FrameReader {
public:
FrameReader();
~FrameReader();
bool load(CameraType type, const std::string &url, bool no_hw_decoder = false, std::atomic<bool> *abort = nullptr, bool local_cache = false,
int chunk_size = -1, int retries = 0);
bool loadFromFile(CameraType type, const std::string &file, bool no_hw_decoder = false, std::atomic<bool> *abort = nullptr);
bool get(int idx, VisionBuf *buf);
size_t getFrameCount() const { return packets_info.size(); }
int width = 0, height = 0;
VideoDecoder *decoder_ = nullptr;
AVFormatContext *input_ctx = nullptr;
int prev_idx = -1;
struct PacketInfo {
int flags;
int64_t pos;
};
std::vector<PacketInfo> packets_info;
};
class VideoDecoder {
public:
VideoDecoder();
~VideoDecoder();
bool open(AVCodecParameters *codecpar, bool hw_decoder);
bool decode(FrameReader *reader, int idx, VisionBuf *buf);
int width = 0, height = 0;
private:
bool initHardwareDecoder(AVHWDeviceType hw_device_type);
AVFrame *decodeFrame(AVPacket *pkt);
bool copyBuffer(AVFrame *f, VisionBuf *buf);
AVFrame *av_frame_, *hw_frame_;
AVCodecContext *decoder_ctx = nullptr;
AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE;
AVBufferRef *hw_device_ctx = nullptr;
};
|
2301_81045437/openpilot
|
tools/replay/framereader.h
|
C++
|
mit
| 1,533
|
import itertools
from typing import Any
import matplotlib.pyplot as plt
import numpy as np
import pygame
from matplotlib.backends.backend_agg import FigureCanvasAgg
from openpilot.common.transformations.camera import get_view_frame_from_calib_frame
from openpilot.selfdrive.controls.radard import RADAR_TO_CAMERA
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
class UIParams:
lidar_x, lidar_y, lidar_zoom = 384, 960, 6
lidar_car_x, lidar_car_y = lidar_x / 2., lidar_y / 1.1
car_hwidth = 1.7272 / 2 * lidar_zoom
car_front = 2.6924 * lidar_zoom
car_back = 1.8796 * lidar_zoom
car_color = 110
UP = UIParams
METER_WIDTH = 20
class Calibration:
def __init__(self, num_px, rpy, intrinsic, calib_scale):
self.intrinsic = intrinsic
self.extrinsics_matrix = get_view_frame_from_calib_frame(rpy[0], rpy[1], rpy[2], 0.0)[:,:3]
self.zoom = calib_scale
def car_space_to_ff(self, x, y, z):
car_space_projective = np.column_stack((x, y, z)).T
ep = self.extrinsics_matrix.dot(car_space_projective)
kep = self.intrinsic.dot(ep)
return (kep[:-1, :] / kep[-1, :]).T
def car_space_to_bb(self, x, y, z):
pts = self.car_space_to_ff(x, y, z)
return pts / self.zoom
_COLOR_CACHE : dict[tuple[int, int, int], Any] = {}
def find_color(lidar_surface, color):
if color in _COLOR_CACHE:
return _COLOR_CACHE[color]
tcolor = 0
ret = 255
for x in lidar_surface.get_palette():
if x[0:3] == color:
ret = tcolor
break
tcolor += 1
_COLOR_CACHE[color] = ret
return ret
def to_topdown_pt(y, x):
px, py = x * UP.lidar_zoom + UP.lidar_car_x, -y * UP.lidar_zoom + UP.lidar_car_y
if px > 0 and py > 0 and px < UP.lidar_x and py < UP.lidar_y:
return int(px), int(py)
return -1, -1
def draw_path(path, color, img, calibration, top_down, lid_color=None, z_off=0):
x, y, z = np.asarray(path.x), np.asarray(path.y), np.asarray(path.z) + z_off
pts = calibration.car_space_to_bb(x, y, z)
pts = np.round(pts).astype(int)
# draw lidar path point on lidar
# find color in 8 bit
if lid_color is not None and top_down is not None:
tcolor = find_color(top_down[0], lid_color)
for i in range(len(x)):
px, py = to_topdown_pt(x[i], y[i])
if px != -1:
top_down[1][px, py] = tcolor
height, width = img.shape[:2]
for x, y in pts:
if 1 < x < width - 1 and 1 < y < height - 1:
for a, b in itertools.permutations([-1, 0, -1], 2):
img[y + a, x + b] = color
def init_plots(arr, name_to_arr_idx, plot_xlims, plot_ylims, plot_names, plot_colors, plot_styles):
color_palette = { "r": (1, 0, 0),
"g": (0, 1, 0),
"b": (0, 0, 1),
"k": (0, 0, 0),
"y": (1, 1, 0),
"p": (0, 1, 1),
"m": (1, 0, 1)}
dpi = 90
fig = plt.figure(figsize=(575 / dpi, 600 / dpi), dpi=dpi)
canvas = FigureCanvasAgg(fig)
fig.set_facecolor((0.2, 0.2, 0.2))
axs = []
for pn in range(len(plot_ylims)):
ax = fig.add_subplot(len(plot_ylims), 1, len(axs)+1)
ax.set_xlim(plot_xlims[pn][0], plot_xlims[pn][1])
ax.set_ylim(plot_ylims[pn][0], plot_ylims[pn][1])
ax.patch.set_facecolor((0.4, 0.4, 0.4))
axs.append(ax)
plots, idxs, plot_select = [], [], []
for i, pl_list in enumerate(plot_names):
for j, item in enumerate(pl_list):
plot, = axs[i].plot(arr[:, name_to_arr_idx[item]],
label=item,
color=color_palette[plot_colors[i][j]],
linestyle=plot_styles[i][j])
plots.append(plot)
idxs.append(name_to_arr_idx[item])
plot_select.append(i)
axs[i].set_title(", ".join(f"{nm} ({cl})"
for (nm, cl) in zip(pl_list, plot_colors[i], strict=False)), fontsize=10)
axs[i].tick_params(axis="x", colors="white")
axs[i].tick_params(axis="y", colors="white")
axs[i].title.set_color("white")
if i < len(plot_ylims) - 1:
axs[i].set_xticks([])
canvas.draw()
def draw_plots(arr):
for ax in axs:
ax.draw_artist(ax.patch)
for i in range(len(plots)):
plots[i].set_ydata(arr[:, idxs[i]])
axs[plot_select[i]].draw_artist(plots[i])
raw_data = canvas.buffer_rgba()
plot_surface = pygame.image.frombuffer(raw_data, canvas.get_width_height(), "RGBA").convert()
return plot_surface
return draw_plots
def pygame_modules_have_loaded():
return pygame.display.get_init() and pygame.font.get_init()
def plot_model(m, img, calibration, top_down):
if calibration is None or top_down is None:
return
for lead in m.leadsV3:
if lead.prob < 0.5:
continue
x, y = lead.x[0], lead.y[0]
x_std = lead.xStd[0]
x -= RADAR_TO_CAMERA
_, py_top = to_topdown_pt(x + x_std, y)
px, py_bottom = to_topdown_pt(x - x_std, y)
top_down[1][int(round(px - 4)):int(round(px + 4)), py_top:py_bottom] = find_color(top_down[0], YELLOW)
for path, prob, _ in zip(m.laneLines, m.laneLineProbs, m.laneLineStds, strict=True):
color = (0, int(255 * prob), 0)
draw_path(path, color, img, calibration, top_down, YELLOW)
for edge, std in zip(m.roadEdges, m.roadEdgeStds, strict=True):
prob = max(1 - std, 0)
color = (int(255 * prob), 0, 0)
draw_path(edge, color, img, calibration, top_down, RED)
color = (255, 0, 0)
draw_path(m.position, color, img, calibration, top_down, RED, 1.22)
def plot_lead(rs, top_down):
for lead in [rs.leadOne, rs.leadTwo]:
if not lead.status:
continue
x = lead.dRel
px_left, py = to_topdown_pt(x, -10)
px_right, _ = to_topdown_pt(x, 10)
top_down[1][px_left:px_right, py] = find_color(top_down[0], RED)
def maybe_update_radar_points(lt, lid_overlay):
ar_pts = []
if lt is not None:
ar_pts = {}
for track in lt:
ar_pts[track.trackId] = [track.dRel, track.yRel, track.vRel, track.aRel, track.oncoming, track.stationary]
for ids, pt in ar_pts.items():
# negative here since radar is left positive
px, py = to_topdown_pt(pt[0], -pt[1])
if px != -1:
if pt[-1]:
color = 240
elif pt[-2]:
color = 230
else:
color = 255
if int(ids) == 1:
lid_overlay[px - 2:px + 2, py - 10:py + 10] = 100
else:
lid_overlay[px - 2:px + 2, py - 2:py + 2] = color
def get_blank_lid_overlay(UP):
lid_overlay = np.zeros((UP.lidar_x, UP.lidar_y), 'uint8')
# Draw the car.
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)):int(
round(UP.lidar_car_x + UP.car_hwidth)), int(round(UP.lidar_car_y -
UP.car_front))] = UP.car_color
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)):int(
round(UP.lidar_car_x + UP.car_hwidth)), int(round(UP.lidar_car_y +
UP.car_back))] = UP.car_color
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)), int(
round(UP.lidar_car_y - UP.car_front)):int(round(
UP.lidar_car_y + UP.car_back))] = UP.car_color
lid_overlay[int(round(UP.lidar_car_x + UP.car_hwidth)), int(
round(UP.lidar_car_y - UP.car_front)):int(round(
UP.lidar_car_y + UP.car_back))] = UP.car_color
return lid_overlay
|
2301_81045437/openpilot
|
tools/replay/lib/ui_helpers.py
|
Python
|
mit
| 7,331
|
#include "tools/replay/logreader.h"
#include <algorithm>
#include <utility>
#include "tools/replay/filereader.h"
#include "tools/replay/util.h"
bool LogReader::load(const std::string &url, std::atomic<bool> *abort, bool local_cache, int chunk_size, int retries) {
std::string data = FileReader(local_cache, chunk_size, retries).read(url, abort);
if (!data.empty() && url.find(".bz2") != std::string::npos)
data = decompressBZ2(data, abort);
bool success = !data.empty() && load(data.data(), data.size(), abort);
if (filters_.empty())
raw_ = std::move(data);
return success;
}
bool LogReader::load(const char *data, size_t size, std::atomic<bool> *abort) {
try {
events.reserve(65000);
kj::ArrayPtr<const capnp::word> words((const capnp::word *)data, size / sizeof(capnp::word));
while (words.size() > 0 && !(abort && *abort)) {
capnp::FlatArrayMessageReader reader(words);
auto event = reader.getRoot<cereal::Event>();
auto which = event.which();
auto event_data = kj::arrayPtr(words.begin(), reader.getEnd());
words = kj::arrayPtr(reader.getEnd(), words.end());
if (!filters_.empty()) {
if (which >= filters_.size() || !filters_[which])
continue;
auto buf = buffer_.allocate(event_data.size() * sizeof(capnp::word));
memcpy(buf, event_data.begin(), event_data.size() * sizeof(capnp::word));
event_data = kj::arrayPtr((const capnp::word *)buf, event_data.size());
}
uint64_t mono_time = event.getLogMonoTime();
const Event &evt = events.emplace_back(which, mono_time, event_data);
// Add encodeIdx packet again as a frame packet for the video stream
if (evt.which == cereal::Event::ROAD_ENCODE_IDX ||
evt.which == cereal::Event::DRIVER_ENCODE_IDX ||
evt.which == cereal::Event::WIDE_ROAD_ENCODE_IDX) {
auto idx = capnp::AnyStruct::Reader(event).getPointerSection()[0].getAs<cereal::EncodeIndex>();
if (uint64_t sof = idx.getTimestampSof()) {
mono_time = sof;
}
events.emplace_back(which, mono_time, event_data, idx.getSegmentNum());
}
}
} catch (const kj::Exception &e) {
rWarning("Failed to parse log : %s.\nRetrieved %zu events from corrupt log", e.getDescription().cStr(), events.size());
}
if (!events.empty() && !(abort && *abort)) {
events.shrink_to_fit();
std::sort(events.begin(), events.end());
return true;
}
return false;
}
|
2301_81045437/openpilot
|
tools/replay/logreader.cc
|
C++
|
mit
| 2,480
|
#pragma once
#include <string>
#include <vector>
#include "cereal/gen/cpp/log.capnp.h"
#include "system/camerad/cameras/camera_common.h"
#include "tools/replay/util.h"
const CameraType ALL_CAMERAS[] = {RoadCam, DriverCam, WideRoadCam};
const int MAX_CAMERAS = std::size(ALL_CAMERAS);
class Event {
public:
Event(cereal::Event::Which which, uint64_t mono_time, const kj::ArrayPtr<const capnp::word> &data, int eidx_segnum = -1)
: which(which), mono_time(mono_time), data(data), eidx_segnum(eidx_segnum) {}
bool operator<(const Event &other) const {
return mono_time < other.mono_time || (mono_time == other.mono_time && which < other.which);
}
uint64_t mono_time;
cereal::Event::Which which;
kj::ArrayPtr<const capnp::word> data;
int32_t eidx_segnum;
};
class LogReader {
public:
LogReader(const std::vector<bool> &filters = {}) { filters_ = filters; }
bool load(const std::string &url, std::atomic<bool> *abort = nullptr,
bool local_cache = false, int chunk_size = -1, int retries = 0);
bool load(const char *data, size_t size, std::atomic<bool> *abort = nullptr);
std::vector<Event> events;
private:
std::string raw_;
std::vector<bool> filters_;
MonotonicBuffer buffer_{1024 * 1024};
};
|
2301_81045437/openpilot
|
tools/replay/logreader.h
|
C++
|
mit
| 1,244
|
#include <QApplication>
#include <QCommandLineParser>
#include "common/prefix.h"
#include "tools/replay/consoleui.h"
#include "tools/replay/replay.h"
int main(int argc, char *argv[]) {
#ifdef __APPLE__
// With all sockets opened, we might hit the default limit of 256 on macOS
util::set_file_descriptor_limit(1024);
#endif
QCoreApplication app(argc, argv);
const std::tuple<QString, REPLAY_FLAGS, QString> flags[] = {
{"dcam", REPLAY_FLAG_DCAM, "load driver camera"},
{"ecam", REPLAY_FLAG_ECAM, "load wide road camera"},
{"no-loop", REPLAY_FLAG_NO_LOOP, "stop at the end of the route"},
{"no-cache", REPLAY_FLAG_NO_FILE_CACHE, "turn off local cache"},
{"qcam", REPLAY_FLAG_QCAMERA, "load qcamera"},
{"no-hw-decoder", REPLAY_FLAG_NO_HW_DECODER, "disable HW video decoding"},
{"no-vipc", REPLAY_FLAG_NO_VIPC, "do not output video"},
{"all", REPLAY_FLAG_ALL_SERVICES, "do output all messages including uiDebug, userFlag"
". this may causes issues when used along with UI"}
};
QCommandLineParser parser;
parser.setApplicationDescription("Mock openpilot components by publishing logged messages.");
parser.addHelpOption();
parser.addPositionalArgument("route", "the drive to replay. find your drives at connect.comma.ai");
parser.addOption({{"a", "allow"}, "whitelist of services to send", "allow"});
parser.addOption({{"b", "block"}, "blacklist of services to send", "block"});
parser.addOption({{"c", "cache"}, "cache <n> segments in memory. default is 5", "n"});
parser.addOption({{"s", "start"}, "start from <seconds>", "seconds"});
parser.addOption({"x", QString("playback <speed>. between %1 - %2")
.arg(ConsoleUI::speed_array.front()).arg(ConsoleUI::speed_array.back()), "speed"});
parser.addOption({"demo", "use a demo route instead of providing your own"});
parser.addOption({"data_dir", "local directory with routes", "data_dir"});
parser.addOption({"prefix", "set OPENPILOT_PREFIX", "prefix"});
for (auto &[name, _, desc] : flags) {
parser.addOption({name, desc});
}
parser.process(app);
const QStringList args = parser.positionalArguments();
if (args.empty() && !parser.isSet("demo")) {
parser.showHelp();
}
const QString route = args.empty() ? DEMO_ROUTE : args.first();
QStringList allow = parser.value("allow").isEmpty() ? QStringList{} : parser.value("allow").split(",");
QStringList block = parser.value("block").isEmpty() ? QStringList{} : parser.value("block").split(",");
uint32_t replay_flags = REPLAY_FLAG_NONE;
for (const auto &[name, flag, _] : flags) {
if (parser.isSet(name)) {
replay_flags |= flag;
}
}
std::unique_ptr<OpenpilotPrefix> op_prefix;
auto prefix = parser.value("prefix");
if (!prefix.isEmpty()) {
op_prefix.reset(new OpenpilotPrefix(prefix.toStdString()));
}
Replay *replay = new Replay(route, allow, block, nullptr, replay_flags, parser.value("data_dir"), &app);
if (!parser.value("c").isEmpty()) {
replay->setSegmentCacheLimit(parser.value("c").toInt());
}
if (!parser.value("x").isEmpty()) {
replay->setSpeed(std::clamp(parser.value("x").toFloat(),
ConsoleUI::speed_array.front(), ConsoleUI::speed_array.back()));
}
if (!replay->load()) {
return 0;
}
ConsoleUI console_ui(replay);
replay->start(parser.value("start").toInt());
return app.exec();
}
|
2301_81045437/openpilot
|
tools/replay/main.cc
|
C++
|
mit
| 3,465
|
#include "tools/replay/replay.h"
#include <QDebug>
#include <QtConcurrent>
#include <capnp/dynamic.h>
#include <csignal>
#include "cereal/services.h"
#include "common/params.h"
#include "common/timing.h"
#include "tools/replay/util.h"
static void interrupt_sleep_handler(int signal) {}
Replay::Replay(QString route, QStringList allow, QStringList block, SubMaster *sm_,
uint32_t flags, QString data_dir, QObject *parent) : sm(sm_), flags_(flags), QObject(parent) {
// Register signal handler for SIGUSR1
std::signal(SIGUSR1, interrupt_sleep_handler);
if (!(flags_ & REPLAY_FLAG_ALL_SERVICES)) {
block << "uiDebug" << "userFlag";
}
auto event_struct = capnp::Schema::from<cereal::Event>().asStruct();
sockets_.resize(event_struct.getUnionFields().size());
for (const auto &[name, _] : services) {
if (!block.contains(name.c_str()) && (allow.empty() || allow.contains(name.c_str()))) {
uint16_t which = event_struct.getFieldByName(name).getProto().getDiscriminantValue();
sockets_[which] = name.c_str();
}
}
if (!allow.isEmpty()) {
for (int i = 0; i < sockets_.size(); ++i) {
filters_.push_back(i == cereal::Event::Which::INIT_DATA || i == cereal::Event::Which::CAR_PARAMS || sockets_[i]);
}
}
std::vector<const char *> s;
std::copy_if(sockets_.begin(), sockets_.end(), std::back_inserter(s),
[](const char *name) { return name != nullptr; });
qDebug() << "services " << s;
qDebug() << "loading route " << route;
if (sm == nullptr) {
pm = std::make_unique<PubMaster>(s);
}
route_ = std::make_unique<Route>(route, data_dir);
}
Replay::~Replay() {
if (!stream_thread_ && segments_.empty()) return;
rInfo("shutdown: in progress...");
if (stream_thread_ != nullptr) {
exit_ =true;
paused_ = true;
stream_cv_.notify_one();
stream_thread_->quit();
stream_thread_->wait();
delete stream_thread_;
}
timeline_future.waitForFinished();
rInfo("shutdown: done");
}
bool Replay::load() {
if (!route_->load()) {
qCritical() << "failed to load route" << route_->name()
<< "from" << (route_->dir().isEmpty() ? "server" : route_->dir());
return false;
}
for (auto &[n, f] : route_->segments()) {
bool has_log = !f.rlog.isEmpty() || !f.qlog.isEmpty();
bool has_video = !f.road_cam.isEmpty() || !f.qcamera.isEmpty();
if (has_log && (has_video || hasFlag(REPLAY_FLAG_NO_VIPC))) {
segments_.insert({n, nullptr});
}
}
if (segments_.empty()) {
qCritical() << "no valid segments in route" << route_->name();
return false;
}
rInfo("load route %s with %zu valid segments", qPrintable(route_->name()), segments_.size());
return true;
}
void Replay::start(int seconds) {
seekTo(route_->identifier().begin_segment * 60 + seconds, false);
}
void Replay::updateEvents(const std::function<bool()> &update_events_function) {
pauseStreamThread();
{
std::unique_lock lk(stream_lock_);
events_ready_ = update_events_function();
paused_ = user_paused_;
}
stream_cv_.notify_one();
}
void Replay::seekTo(double seconds, bool relative) {
updateEvents([&]() {
seeking_to_seconds_ = relative ? seconds + currentSeconds() : seconds;
seeking_to_seconds_ = std::max(double(0.0), seeking_to_seconds_);
int target_segment = (int)seeking_to_seconds_ / 60;
if (segments_.count(target_segment) == 0) {
rWarning("can't seek to %d s segment %d is invalid", (int)seeking_to_seconds_, target_segment);
return true;
}
rInfo("seeking to %d s, segment %d", (int)seeking_to_seconds_, target_segment);
current_segment_ = target_segment;
cur_mono_time_ = route_start_ts_ + seeking_to_seconds_ * 1e9;
bool segment_merged = isSegmentMerged(target_segment);
if (segment_merged) {
emit seekedTo(seeking_to_seconds_);
// Reset seeking_to_seconds_ to indicate completion of seek
seeking_to_seconds_ = -1;
}
return segment_merged;
});
updateSegmentsCache();
}
void Replay::seekToFlag(FindFlag flag) {
if (auto next = find(flag)) {
seekTo(*next - 2, false); // seek to 2 seconds before next
}
}
void Replay::buildTimeline() {
uint64_t engaged_begin = 0;
bool engaged = false;
auto alert_status = cereal::ControlsState::AlertStatus::NORMAL;
auto alert_size = cereal::ControlsState::AlertSize::NONE;
uint64_t alert_begin = 0;
std::string alert_type;
const TimelineType timeline_types[] = {
[(int)cereal::ControlsState::AlertStatus::NORMAL] = TimelineType::AlertInfo,
[(int)cereal::ControlsState::AlertStatus::USER_PROMPT] = TimelineType::AlertWarning,
[(int)cereal::ControlsState::AlertStatus::CRITICAL] = TimelineType::AlertCritical,
};
const auto &route_segments = route_->segments();
for (auto it = route_segments.cbegin(); it != route_segments.cend() && !exit_; ++it) {
std::shared_ptr<LogReader> log(new LogReader());
if (!log->load(it->second.qlog.toStdString(), &exit_, !hasFlag(REPLAY_FLAG_NO_FILE_CACHE), 0, 3)) continue;
for (const Event &e : log->events) {
if (e.which == cereal::Event::Which::CONTROLS_STATE) {
capnp::FlatArrayMessageReader reader(e.data);
auto event = reader.getRoot<cereal::Event>();
auto cs = event.getControlsState();
if (engaged != cs.getEnabled()) {
if (engaged) {
std::lock_guard lk(timeline_lock);
timeline.push_back({toSeconds(engaged_begin), toSeconds(e.mono_time), TimelineType::Engaged});
}
engaged_begin = e.mono_time;
engaged = cs.getEnabled();
}
if (alert_type != cs.getAlertType().cStr() || alert_status != cs.getAlertStatus()) {
if (!alert_type.empty() && alert_size != cereal::ControlsState::AlertSize::NONE) {
std::lock_guard lk(timeline_lock);
timeline.push_back({toSeconds(alert_begin), toSeconds(e.mono_time), timeline_types[(int)alert_status]});
}
alert_begin = e.mono_time;
alert_type = cs.getAlertType().cStr();
alert_size = cs.getAlertSize();
alert_status = cs.getAlertStatus();
}
} else if (e.which == cereal::Event::Which::USER_FLAG) {
std::lock_guard lk(timeline_lock);
timeline.push_back({toSeconds(e.mono_time), toSeconds(e.mono_time), TimelineType::UserFlag});
}
}
std::sort(timeline.begin(), timeline.end(), [](auto &l, auto &r) { return std::get<2>(l) < std::get<2>(r); });
emit qLogLoaded(it->first, log);
}
}
std::optional<uint64_t> Replay::find(FindFlag flag) {
int cur_ts = currentSeconds();
for (auto [start_ts, end_ts, type] : getTimeline()) {
if (type == TimelineType::Engaged) {
if (flag == FindFlag::nextEngagement && start_ts > cur_ts) {
return start_ts;
} else if (flag == FindFlag::nextDisEngagement && end_ts > cur_ts) {
return end_ts;
}
} else if (start_ts > cur_ts) {
if ((flag == FindFlag::nextUserFlag && type == TimelineType::UserFlag) ||
(flag == FindFlag::nextInfo && type == TimelineType::AlertInfo) ||
(flag == FindFlag::nextWarning && type == TimelineType::AlertWarning) ||
(flag == FindFlag::nextCritical && type == TimelineType::AlertCritical)) {
return start_ts;
}
}
}
return std::nullopt;
}
void Replay::pause(bool pause) {
if (user_paused_ != pause) {
pauseStreamThread();
{
std::unique_lock lk(stream_lock_);
rWarning("%s at %.2f s", pause ? "paused..." : "resuming", currentSeconds());
paused_ = user_paused_ = pause;
}
stream_cv_.notify_one();
}
}
void Replay::pauseStreamThread() {
paused_ = true;
// Send SIGUSR1 to interrupt clock_nanosleep
if (stream_thread_ && stream_thread_id) {
pthread_kill(stream_thread_id, SIGUSR1);
}
}
void Replay::segmentLoadFinished(bool success) {
if (!success) {
Segment *seg = qobject_cast<Segment *>(sender());
rWarning("failed to load segment %d, removing it from current replay list", seg->seg_num);
updateEvents([&]() {
segments_.erase(seg->seg_num);
return !segments_.empty();
});
}
updateSegmentsCache();
}
void Replay::updateSegmentsCache() {
auto cur = segments_.lower_bound(current_segment_.load());
if (cur == segments_.end()) return;
// Calculate the range of segments to load
auto begin = std::prev(cur, std::min<int>(segment_cache_limit / 2, std::distance(segments_.begin(), cur)));
auto end = std::next(begin, std::min<int>(segment_cache_limit, std::distance(begin, segments_.end())));
begin = std::prev(end, std::min<int>(segment_cache_limit, std::distance(segments_.begin(), end)));
loadSegmentInRange(begin, cur, end);
mergeSegments(begin, end);
// free segments out of current semgnt window.
std::for_each(segments_.begin(), begin, [](auto &e) { e.second.reset(nullptr); });
std::for_each(end, segments_.end(), [](auto &e) { e.second.reset(nullptr); });
// start stream thread
const auto &cur_segment = cur->second;
if (stream_thread_ == nullptr && cur_segment->isLoaded()) {
startStream(cur_segment.get());
emit streamStarted();
}
}
void Replay::loadSegmentInRange(SegmentMap::iterator begin, SegmentMap::iterator cur, SegmentMap::iterator end) {
auto loadNext = [this](auto begin, auto end) {
auto it = std::find_if(begin, end, [](const auto &seg_it) { return !seg_it.second || !seg_it.second->isLoaded(); });
if (it != end && !it->second) {
rDebug("loading segment %d...", it->first);
it->second = std::make_unique<Segment>(it->first, route_->at(it->first), flags_, filters_);
QObject::connect(it->second.get(), &Segment::loadFinished, this, &Replay::segmentLoadFinished);
return true;
}
return false;
};
// Load forward segments, then try reverse
if (!loadNext(cur, end)) {
loadNext(std::make_reverse_iterator(cur), segments_.rend());
}
}
void Replay::mergeSegments(const SegmentMap::iterator &begin, const SegmentMap::iterator &end) {
std::set<int> segments_to_merge;
size_t new_events_size = 0;
for (auto it = begin; it != end; ++it) {
if (it->second && it->second->isLoaded()) {
segments_to_merge.insert(it->first);
new_events_size += it->second->log->events.size();
}
}
if (segments_to_merge == merged_segments_) return;
rDebug("merge segments %s", std::accumulate(segments_to_merge.begin(), segments_to_merge.end(), std::string{},
[](auto & a, int b) { return a + (a.empty() ? "" : ", ") + std::to_string(b); }).c_str());
std::vector<Event> new_events;
new_events.reserve(new_events_size);
// Merge events from segments_to_merge into new_events
for (int n : segments_to_merge) {
size_t size = new_events.size();
const auto &events = segments_.at(n)->log->events;
std::copy_if(events.begin(), events.end(), std::back_inserter(new_events),
[this](const Event &e) { return e.which < sockets_.size() && sockets_[e.which] != nullptr; });
std::inplace_merge(new_events.begin(), new_events.begin() + size, new_events.end());
}
if (stream_thread_) {
emit segmentsMerged();
}
updateEvents([&]() {
events_.swap(new_events);
merged_segments_ = segments_to_merge;
// Check if seeking is in progress
int target_segment = int(seeking_to_seconds_ / 60);
if (seeking_to_seconds_ >= 0 && segments_to_merge.count(target_segment) > 0) {
emit seekedTo(seeking_to_seconds_);
seeking_to_seconds_ = -1; // Reset seeking_to_seconds_ to indicate completion of seek
}
// Wake up the stream thread if the current segment is loaded or invalid.
return isSegmentMerged(current_segment_) || (segments_.count(current_segment_) == 0);
});
}
void Replay::startStream(const Segment *cur_segment) {
const auto &events = cur_segment->log->events;
route_start_ts_ = events.front().mono_time;
cur_mono_time_ += route_start_ts_ - 1;
// get datetime from INIT_DATA, fallback to datetime in the route name
route_date_time_ = route()->datetime();
auto it = std::find_if(events.cbegin(), events.cend(),
[](const Event &e) { return e.which == cereal::Event::Which::INIT_DATA; });
if (it != events.cend()) {
capnp::FlatArrayMessageReader reader(it->data);
auto event = reader.getRoot<cereal::Event>();
uint64_t wall_time = event.getInitData().getWallTimeNanos();
if (wall_time > 0) {
route_date_time_ = QDateTime::fromMSecsSinceEpoch(wall_time / 1e6);
}
}
// write CarParams
it = std::find_if(events.begin(), events.end(), [](const Event &e) { return e.which == cereal::Event::Which::CAR_PARAMS; });
if (it != events.end()) {
capnp::FlatArrayMessageReader reader(it->data);
auto event = reader.getRoot<cereal::Event>();
car_fingerprint_ = event.getCarParams().getCarFingerprint();
capnp::MallocMessageBuilder builder;
builder.setRoot(event.getCarParams());
auto words = capnp::messageToFlatArray(builder);
auto bytes = words.asBytes();
Params().put("CarParams", (const char *)bytes.begin(), bytes.size());
Params().put("CarParamsPersistent", (const char *)bytes.begin(), bytes.size());
} else {
rWarning("failed to read CarParams from current segment");
}
// start camera server
if (!hasFlag(REPLAY_FLAG_NO_VIPC)) {
std::pair<int, int> camera_size[MAX_CAMERAS] = {};
for (auto type : ALL_CAMERAS) {
if (auto &fr = cur_segment->frames[type]) {
camera_size[type] = {fr->width, fr->height};
}
}
camera_server_ = std::make_unique<CameraServer>(camera_size);
}
emit segmentsMerged();
// start stream thread
stream_thread_ = new QThread();
QObject::connect(stream_thread_, &QThread::started, [=]() { streamThread(); });
stream_thread_->start();
timeline_future = QtConcurrent::run(this, &Replay::buildTimeline);
}
void Replay::publishMessage(const Event *e) {
if (event_filter && event_filter(e, filter_opaque)) return;
if (sm == nullptr) {
auto bytes = e->data.asBytes();
int ret = pm->send(sockets_[e->which], (capnp::byte *)bytes.begin(), bytes.size());
if (ret == -1) {
rWarning("stop publishing %s due to multiple publishers error", sockets_[e->which]);
sockets_[e->which] = nullptr;
}
} else {
capnp::FlatArrayMessageReader reader(e->data);
auto event = reader.getRoot<cereal::Event>();
sm->update_msgs(nanos_since_boot(), {{sockets_[e->which], event}});
}
}
void Replay::publishFrame(const Event *e) {
CameraType cam;
switch (e->which) {
case cereal::Event::ROAD_ENCODE_IDX: cam = RoadCam; break;
case cereal::Event::DRIVER_ENCODE_IDX: cam = DriverCam; break;
case cereal::Event::WIDE_ROAD_ENCODE_IDX: cam = WideRoadCam; break;
default: return; // Invalid event type
}
if ((cam == DriverCam && !hasFlag(REPLAY_FLAG_DCAM)) || (cam == WideRoadCam && !hasFlag(REPLAY_FLAG_ECAM)))
return; // Camera isdisabled
if (isSegmentMerged(e->eidx_segnum)) {
auto &segment = segments_.at(e->eidx_segnum);
if (auto &frame = segment->frames[cam]; frame) {
camera_server_->pushFrame(cam, frame.get(), e);
}
}
}
void Replay::streamThread() {
stream_thread_id = pthread_self();
cereal::Event::Which cur_which = cereal::Event::Which::INIT_DATA;
std::unique_lock lk(stream_lock_);
while (true) {
stream_cv_.wait(lk, [=]() { return exit_ || ( events_ready_ && !paused_); });
if (exit_) break;
Event event(cur_which, cur_mono_time_, {});
auto first = std::upper_bound(events_.cbegin(), events_.cend(), event);
if (first == events_.cend()) {
rInfo("waiting for events...");
events_ready_ = false;
continue;
}
auto it = publishEvents(first, events_.cend());
// Ensure frames are sent before unlocking to prevent race conditions
if (camera_server_) {
camera_server_->waitForSent();
}
if (it != events_.cend()) {
cur_which = it->which;
} else if (!hasFlag(REPLAY_FLAG_NO_LOOP)) {
// Check for loop end and restart if necessary
int last_segment = segments_.rbegin()->first;
if (current_segment_ >= last_segment && isSegmentMerged(last_segment)) {
rInfo("reaches the end of route, restart from beginning");
QMetaObject::invokeMethod(this, std::bind(&Replay::seekTo, this, 0, false), Qt::QueuedConnection);
}
}
}
}
std::vector<Event>::const_iterator Replay::publishEvents(std::vector<Event>::const_iterator first,
std::vector<Event>::const_iterator last) {
uint64_t evt_start_ts = cur_mono_time_;
uint64_t loop_start_ts = nanos_since_boot();
double prev_replay_speed = speed_;
for (; !paused_ && first != last; ++first) {
const Event &evt = *first;
int segment = toSeconds(evt.mono_time) / 60;
if (current_segment_ != segment) {
current_segment_ = segment;
QMetaObject::invokeMethod(this, &Replay::updateSegmentsCache, Qt::QueuedConnection);
}
// Skip events if socket is not present
if (!sockets_[evt.which]) continue;
const uint64_t current_nanos = nanos_since_boot();
const int64_t time_diff = (evt.mono_time - evt_start_ts) / speed_ - (current_nanos - loop_start_ts);
// Reset timestamps for potential synchronization issues:
// - A negative time_diff may indicate slow execution or system wake-up,
// - A time_diff exceeding 1 second suggests a skipped segment.
if ((time_diff < -1e9 || time_diff >= 1e9) || speed_ != prev_replay_speed) {
evt_start_ts = evt.mono_time;
loop_start_ts = current_nanos;
prev_replay_speed = speed_;
} else if (time_diff > 0) {
precise_nano_sleep(time_diff);
}
if (paused_) break;
cur_mono_time_ = evt.mono_time;
if (evt.eidx_segnum == -1) {
publishMessage(&evt);
} else if (camera_server_) {
if (speed_ > 1.0) {
camera_server_->waitForSent();
}
publishFrame(&evt);
}
}
return first;
}
|
2301_81045437/openpilot
|
tools/replay/replay.cc
|
C++
|
mit
| 18,050
|
#pragma once
#include <algorithm>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <tuple>
#include <vector>
#include <utility>
#include <QThread>
#include "tools/replay/camera.h"
#include "tools/replay/route.h"
const QString DEMO_ROUTE = "a2a0ccea32023010|2023-07-27--13-01-19";
// one segment uses about 100M of memory
constexpr int MIN_SEGMENTS_CACHE = 5;
enum REPLAY_FLAGS {
REPLAY_FLAG_NONE = 0x0000,
REPLAY_FLAG_DCAM = 0x0002,
REPLAY_FLAG_ECAM = 0x0004,
REPLAY_FLAG_NO_LOOP = 0x0010,
REPLAY_FLAG_NO_FILE_CACHE = 0x0020,
REPLAY_FLAG_QCAMERA = 0x0040,
REPLAY_FLAG_NO_HW_DECODER = 0x0100,
REPLAY_FLAG_NO_VIPC = 0x0400,
REPLAY_FLAG_ALL_SERVICES = 0x0800,
};
enum class FindFlag {
nextEngagement,
nextDisEngagement,
nextUserFlag,
nextInfo,
nextWarning,
nextCritical
};
enum class TimelineType { None, Engaged, AlertInfo, AlertWarning, AlertCritical, UserFlag };
typedef bool (*replayEventFilter)(const Event *, void *);
Q_DECLARE_METATYPE(std::shared_ptr<LogReader>);
class Replay : public QObject {
Q_OBJECT
public:
Replay(QString route, QStringList allow, QStringList block, SubMaster *sm = nullptr,
uint32_t flags = REPLAY_FLAG_NONE, QString data_dir = "", QObject *parent = 0);
~Replay();
bool load();
void start(int seconds = 0);
void pause(bool pause);
void seekToFlag(FindFlag flag);
void seekTo(double seconds, bool relative);
inline bool isPaused() const { return user_paused_; }
// the filter is called in streaming thread.try to return quickly from it to avoid blocking streaming.
// the filter function must return true if the event should be filtered.
// otherwise it must return false.
inline void installEventFilter(replayEventFilter filter, void *opaque) {
filter_opaque = opaque;
event_filter = filter;
}
inline int segmentCacheLimit() const { return segment_cache_limit; }
inline void setSegmentCacheLimit(int n) { segment_cache_limit = std::max(MIN_SEGMENTS_CACHE, n); }
inline bool hasFlag(REPLAY_FLAGS flag) const { return flags_ & flag; }
inline void addFlag(REPLAY_FLAGS flag) { flags_ |= flag; }
inline void removeFlag(REPLAY_FLAGS flag) { flags_ &= ~flag; }
inline const Route* route() const { return route_.get(); }
inline double currentSeconds() const { return double(cur_mono_time_ - route_start_ts_) / 1e9; }
inline QDateTime routeDateTime() const { return route_date_time_; }
inline QDateTime currentDateTime() const { return route_date_time_.addSecs(currentSeconds()); }
inline uint64_t routeStartTime() const { return route_start_ts_; }
inline double toSeconds(uint64_t mono_time) const { return (mono_time - route_start_ts_) / 1e9; }
inline int totalSeconds() const { return (!segments_.empty()) ? (segments_.rbegin()->first + 1) * 60 : 0; }
inline void setSpeed(float speed) { speed_ = speed; }
inline float getSpeed() const { return speed_; }
inline const std::vector<Event> *events() const { return &events_; }
inline const std::map<int, std::unique_ptr<Segment>> &segments() const { return segments_; }
inline const std::string &carFingerprint() const { return car_fingerprint_; }
inline const std::vector<std::tuple<double, double, TimelineType>> getTimeline() {
std::lock_guard lk(timeline_lock);
return timeline;
}
signals:
void streamStarted();
void segmentsMerged();
void seekedTo(double sec);
void qLogLoaded(int segnum, std::shared_ptr<LogReader> qlog);
protected slots:
void segmentLoadFinished(bool success);
protected:
typedef std::map<int, std::unique_ptr<Segment>> SegmentMap;
std::optional<uint64_t> find(FindFlag flag);
void pauseStreamThread();
void startStream(const Segment *cur_segment);
void streamThread();
void updateSegmentsCache();
void loadSegmentInRange(SegmentMap::iterator begin, SegmentMap::iterator cur, SegmentMap::iterator end);
void mergeSegments(const SegmentMap::iterator &begin, const SegmentMap::iterator &end);
void updateEvents(const std::function<bool()>& update_events_function);
std::vector<Event>::const_iterator publishEvents(std::vector<Event>::const_iterator first,
std::vector<Event>::const_iterator last);
void publishMessage(const Event *e);
void publishFrame(const Event *e);
void buildTimeline();
inline bool isSegmentMerged(int n) const { return merged_segments_.count(n) > 0; }
pthread_t stream_thread_id = 0;
QThread *stream_thread_ = nullptr;
std::mutex stream_lock_;
bool user_paused_ = false;
std::condition_variable stream_cv_;
std::atomic<int> current_segment_ = 0;
double seeking_to_seconds_ = -1;
SegmentMap segments_;
// the following variables must be protected with stream_lock_
std::atomic<bool> exit_ = false;
std::atomic<bool> paused_ = false;
bool events_ready_ = false;
QDateTime route_date_time_;
uint64_t route_start_ts_ = 0;
std::atomic<uint64_t> cur_mono_time_ = 0;
std::vector<Event> events_;
std::set<int> merged_segments_;
// messaging
SubMaster *sm = nullptr;
std::unique_ptr<PubMaster> pm;
std::vector<const char*> sockets_;
std::vector<bool> filters_;
std::unique_ptr<Route> route_;
std::unique_ptr<CameraServer> camera_server_;
std::atomic<uint32_t> flags_ = REPLAY_FLAG_NONE;
std::mutex timeline_lock;
QFuture<void> timeline_future;
std::vector<std::tuple<double, double, TimelineType>> timeline;
std::string car_fingerprint_;
std::atomic<float> speed_ = 1.0;
replayEventFilter event_filter = nullptr;
void *filter_opaque = nullptr;
int segment_cache_limit = MIN_SEGMENTS_CACHE;
};
|
2301_81045437/openpilot
|
tools/replay/replay.h
|
C++
|
mit
| 5,638
|
#include "tools/replay/route.h"
#include <QDir>
#include <QEventLoop>
#include <QJsonArray>
#include <QJsonDocument>
#include <QRegularExpression>
#include <QtConcurrent>
#include <array>
#include "selfdrive/ui/qt/api.h"
#include "system/hardware/hw.h"
#include "tools/replay/replay.h"
#include "tools/replay/util.h"
Route::Route(const QString &route, const QString &data_dir) : data_dir_(data_dir) {
route_ = parseRoute(route);
}
RouteIdentifier Route::parseRoute(const QString &str) {
RouteIdentifier identifier = {};
QRegularExpression rx(R"(^((?<dongle_id>[a-z0-9]{16})[|_/])?(?<timestamp>.{20})((?<separator>--|/)(?<range>((-?\d+(:(-?\d+)?)?)|(:-?\d+))))?$)");
if (auto match = rx.match(str); match.hasMatch()) {
identifier.dongle_id = match.captured("dongle_id");
identifier.timestamp = match.captured("timestamp");
identifier.str = identifier.dongle_id + "|" + identifier.timestamp;
auto range_str = match.captured("range");
if (auto separator = match.captured("separator"); separator == "/" && !range_str.isEmpty()) {
auto range = range_str.split(":");
identifier.begin_segment = identifier.end_segment = range[0].toInt();
if (range.size() == 2) {
identifier.end_segment = range[1].isEmpty() ? -1 : range[1].toInt();
}
} else if (separator == "--") {
identifier.begin_segment = range_str.toInt();
}
}
return identifier;
}
bool Route::load() {
if (route_.str.isEmpty() || (data_dir_.isEmpty() && route_.dongle_id.isEmpty())) {
rInfo("invalid route format");
return false;
}
date_time_ = QDateTime::fromString(route_.timestamp, "yyyy-MM-dd--HH-mm-ss");
bool ret = data_dir_.isEmpty() ? loadFromServer() : loadFromLocal();
if (ret) {
if (route_.begin_segment == -1) route_.begin_segment = segments_.rbegin()->first;
if (route_.end_segment == -1) route_.end_segment = segments_.rbegin()->first;
for (auto it = segments_.begin(); it != segments_.end(); /**/) {
if (it->first < route_.begin_segment || it->first > route_.end_segment) {
it = segments_.erase(it);
} else {
++it;
}
}
}
return !segments_.empty();
}
bool Route::loadFromServer(int retries) {
for (int i = 1; i <= retries; ++i) {
QString result;
QEventLoop loop;
HttpRequest http(nullptr, !Hardware::PC());
QObject::connect(&http, &HttpRequest::requestDone, [&loop, &result](const QString &json, bool success, QNetworkReply::NetworkError err) {
result = json;
loop.exit((int)err);
});
http.sendRequest(CommaApi::BASE_URL + "/v1/route/" + route_.str + "/files");
auto err = (QNetworkReply::NetworkError)loop.exec();
if (err == QNetworkReply::NoError) {
return loadFromJson(result);
} else if (err == QNetworkReply::ContentAccessDenied || err == QNetworkReply::AuthenticationRequiredError) {
rWarning(">> Unauthorized. Authenticate with tools/lib/auth.py <<");
return false;
}
rWarning("Retrying %d/%d", i, retries);
util::sleep_for(3000);
}
return false;
}
bool Route::loadFromJson(const QString &json) {
QRegExp rx(R"(\/(\d+)\/)");
for (const auto &value : QJsonDocument::fromJson(json.trimmed().toUtf8()).object()) {
for (const auto &url : value.toArray()) {
QString url_str = url.toString();
if (rx.indexIn(url_str) != -1) {
addFileToSegment(rx.cap(1).toInt(), url_str);
}
}
}
return !segments_.empty();
}
bool Route::loadFromLocal() {
QDirIterator it(data_dir_, {QString("%1--*").arg(route_.timestamp)}, QDir::Dirs | QDir::NoDotAndDotDot);
while (it.hasNext()) {
QString segment = it.next();
const int seg_num = segment.mid(segment.lastIndexOf("--") + 2).toInt();
QDir segment_dir(segment);
for (const auto &f : segment_dir.entryList(QDir::Files)) {
addFileToSegment(seg_num, segment_dir.absoluteFilePath(f));
}
}
return !segments_.empty();
}
void Route::addFileToSegment(int n, const QString &file) {
QString name = QUrl(file).fileName();
const int pos = name.lastIndexOf("--");
name = pos != -1 ? name.mid(pos + 2) : name;
if (name == "rlog.bz2" || name == "rlog") {
segments_[n].rlog = file;
} else if (name == "qlog.bz2" || name == "qlog") {
segments_[n].qlog = file;
} else if (name == "fcamera.hevc") {
segments_[n].road_cam = file;
} else if (name == "dcamera.hevc") {
segments_[n].driver_cam = file;
} else if (name == "ecamera.hevc") {
segments_[n].wide_road_cam = file;
} else if (name == "qcamera.ts") {
segments_[n].qcamera = file;
}
}
// class Segment
Segment::Segment(int n, const SegmentFile &files, uint32_t flags, const std::vector<bool> &filters)
: seg_num(n), flags(flags), filters_(filters) {
// [RoadCam, DriverCam, WideRoadCam, log]. fallback to qcamera/qlog
const std::array file_list = {
(flags & REPLAY_FLAG_QCAMERA) || files.road_cam.isEmpty() ? files.qcamera : files.road_cam,
flags & REPLAY_FLAG_DCAM ? files.driver_cam : "",
flags & REPLAY_FLAG_ECAM ? files.wide_road_cam : "",
files.rlog.isEmpty() ? files.qlog : files.rlog,
};
for (int i = 0; i < file_list.size(); ++i) {
if (!file_list[i].isEmpty() && (!(flags & REPLAY_FLAG_NO_VIPC) || i >= MAX_CAMERAS)) {
++loading_;
synchronizer_.addFuture(QtConcurrent::run(this, &Segment::loadFile, i, file_list[i].toStdString()));
}
}
}
Segment::~Segment() {
disconnect();
abort_ = true;
synchronizer_.setCancelOnWait(true);
synchronizer_.waitForFinished();
}
void Segment::loadFile(int id, const std::string file) {
const bool local_cache = !(flags & REPLAY_FLAG_NO_FILE_CACHE);
bool success = false;
if (id < MAX_CAMERAS) {
frames[id] = std::make_unique<FrameReader>();
success = frames[id]->load((CameraType)id, file, flags & REPLAY_FLAG_NO_HW_DECODER, &abort_, local_cache, 20 * 1024 * 1024, 3);
} else {
log = std::make_unique<LogReader>(filters_);
success = log->load(file, &abort_, local_cache, 0, 3);
}
if (!success) {
// abort all loading jobs.
abort_ = true;
}
if (--loading_ == 0) {
emit loadFinished(!abort_);
}
}
|
2301_81045437/openpilot
|
tools/replay/route.cc
|
C++
|
mit
| 6,149
|
#pragma once
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <QDateTime>
#include <QFutureSynchronizer>
#include "tools/replay/framereader.h"
#include "tools/replay/logreader.h"
#include "tools/replay/util.h"
struct RouteIdentifier {
QString dongle_id;
QString timestamp;
int begin_segment = 0;
int end_segment = -1;
QString str;
};
struct SegmentFile {
QString rlog;
QString qlog;
QString road_cam;
QString driver_cam;
QString wide_road_cam;
QString qcamera;
};
class Route {
public:
Route(const QString &route, const QString &data_dir = {});
bool load();
inline const QString &name() const { return route_.str; }
inline const QDateTime datetime() const { return date_time_; }
inline const QString &dir() const { return data_dir_; }
inline const RouteIdentifier &identifier() const { return route_; }
inline const std::map<int, SegmentFile> &segments() const { return segments_; }
inline const SegmentFile &at(int n) { return segments_.at(n); }
static RouteIdentifier parseRoute(const QString &str);
protected:
bool loadFromLocal();
bool loadFromServer(int retries = 3);
bool loadFromJson(const QString &json);
void addFileToSegment(int seg_num, const QString &file);
RouteIdentifier route_ = {};
QString data_dir_;
std::map<int, SegmentFile> segments_;
QDateTime date_time_;
};
class Segment : public QObject {
Q_OBJECT
public:
Segment(int n, const SegmentFile &files, uint32_t flags, const std::vector<bool> &filters = {});
~Segment();
inline bool isLoaded() const { return !loading_ && !abort_; }
const int seg_num = 0;
std::unique_ptr<LogReader> log;
std::unique_ptr<FrameReader> frames[MAX_CAMERAS] = {};
signals:
void loadFinished(bool success);
protected:
void loadFile(int id, const std::string file);
std::atomic<bool> abort_ = false;
std::atomic<int> loading_ = 0;
QFutureSynchronizer<void> synchronizer_;
uint32_t flags;
std::vector<bool> filters_;
};
|
2301_81045437/openpilot
|
tools/replay/route.h
|
C++
|
mit
| 1,991
|
#include <chrono>
#include <thread>
#include <QEventLoop>
#include "catch2/catch.hpp"
#include "common/util.h"
#include "tools/replay/replay.h"
#include "tools/replay/util.h"
const std::string TEST_RLOG_URL = "https://commadataci.blob.core.windows.net/openpilotci/0c94aa1e1296d7c6/2021-05-05--19-48-37/0/rlog.bz2";
const std::string TEST_RLOG_CHECKSUM = "5b966d4bb21a100a8c4e59195faeb741b975ccbe268211765efd1763d892bfb3";
const int TEST_REPLAY_SEGMENTS = std::getenv("TEST_REPLAY_SEGMENTS") ? atoi(std::getenv("TEST_REPLAY_SEGMENTS")) : 1;
bool download_to_file(const std::string &url, const std::string &local_file, int chunk_size = 5 * 1024 * 1024, int retries = 3) {
do {
if (httpDownload(url, local_file, chunk_size)) {
return true;
}
std::this_thread::sleep_for(std::chrono::milliseconds(500));
} while (--retries >= 0);
return false;
}
TEST_CASE("httpMultiPartDownload") {
char filename[] = "/tmp/XXXXXX";
close(mkstemp(filename));
const size_t chunk_size = 5 * 1024 * 1024;
std::string content;
SECTION("download to file") {
REQUIRE(download_to_file(TEST_RLOG_URL, filename, chunk_size));
content = util::read_file(filename);
}
SECTION("download to buffer") {
for (int i = 0; i < 3 && content.empty(); ++i) {
content = httpGet(TEST_RLOG_URL, chunk_size);
std::this_thread::sleep_for(std::chrono::milliseconds(500));
}
REQUIRE(!content.empty());
}
REQUIRE(content.size() == 9112651);
REQUIRE(sha256(content) == TEST_RLOG_CHECKSUM);
}
TEST_CASE("FileReader") {
auto enable_local_cache = GENERATE(true, false);
std::string cache_file = cacheFilePath(TEST_RLOG_URL);
system(("rm " + cache_file + " -f").c_str());
FileReader reader(enable_local_cache);
std::string content = reader.read(TEST_RLOG_URL);
REQUIRE(sha256(content) == TEST_RLOG_CHECKSUM);
if (enable_local_cache) {
REQUIRE(sha256(util::read_file(cache_file)) == TEST_RLOG_CHECKSUM);
} else {
REQUIRE(util::file_exists(cache_file) == false);
}
}
TEST_CASE("LogReader") {
SECTION("corrupt log") {
FileReader reader(true);
std::string corrupt_content = reader.read(TEST_RLOG_URL);
corrupt_content.resize(corrupt_content.length() / 2);
corrupt_content = decompressBZ2(corrupt_content);
LogReader log;
REQUIRE(log.load(corrupt_content.data(), corrupt_content.size()));
REQUIRE(log.events.size() > 0);
}
}
void read_segment(int n, const SegmentFile &segment_file, uint32_t flags) {
QEventLoop loop;
Segment segment(n, segment_file, flags);
QObject::connect(&segment, &Segment::loadFinished, [&]() {
REQUIRE(segment.isLoaded() == true);
REQUIRE(segment.log != nullptr);
REQUIRE(segment.frames[RoadCam] != nullptr);
if (flags & REPLAY_FLAG_DCAM) {
REQUIRE(segment.frames[DriverCam] != nullptr);
}
if (flags & REPLAY_FLAG_ECAM) {
REQUIRE(segment.frames[WideRoadCam] != nullptr);
}
// test LogReader & FrameReader
REQUIRE(segment.log->events.size() > 0);
REQUIRE(std::is_sorted(segment.log->events.begin(), segment.log->events.end()));
for (auto cam : ALL_CAMERAS) {
auto &fr = segment.frames[cam];
if (!fr) continue;
if (cam == RoadCam || cam == WideRoadCam) {
REQUIRE(fr->getFrameCount() == 1200);
}
auto [nv12_width, nv12_height, nv12_buffer_size] = get_nv12_info(fr->width, fr->height);
VisionBuf buf;
buf.allocate(nv12_buffer_size);
buf.init_yuv(fr->width, fr->height, nv12_width, nv12_width * nv12_height);
// sequence get 100 frames
for (int i = 0; i < 100; ++i) {
REQUIRE(fr->get(i, &buf));
}
}
loop.quit();
});
loop.exec();
}
std::string download_demo_route() {
static std::string data_dir;
if (data_dir == "") {
char tmp_path[] = "/tmp/root_XXXXXX";
data_dir = mkdtemp(tmp_path);
Route remote_route(DEMO_ROUTE);
assert(remote_route.load());
// Create a local route from remote for testing
const std::string route_name = DEMO_ROUTE.mid(17).toStdString();
for (int i = 0; i < 2; ++i) {
std::string log_path = util::string_format("%s/%s--%d/", data_dir.c_str(), route_name.c_str(), i);
util::create_directories(log_path, 0755);
REQUIRE(download_to_file(remote_route.at(i).rlog.toStdString(), log_path + "rlog.bz2"));
REQUIRE(download_to_file(remote_route.at(i).qcamera.toStdString(), log_path + "qcamera.ts"));
}
}
return data_dir;
}
TEST_CASE("Local route") {
std::string data_dir = download_demo_route();
auto flags = GENERATE(0, REPLAY_FLAG_QCAMERA);
Route route(DEMO_ROUTE, QString::fromStdString(data_dir));
REQUIRE(route.load());
REQUIRE(route.segments().size() == 2);
for (int i = 0; i < TEST_REPLAY_SEGMENTS; ++i) {
read_segment(i, route.at(i), flags);
}
}
TEST_CASE("Remote route") {
auto flags = GENERATE(0, REPLAY_FLAG_QCAMERA);
Route route(DEMO_ROUTE);
REQUIRE(route.load());
REQUIRE(route.segments().size() == 13);
for (int i = 0; i < TEST_REPLAY_SEGMENTS; ++i) {
read_segment(i, route.at(i), flags);
}
}
TEST_CASE("seek_to") {
QEventLoop loop;
int seek_to = util::random_int(0, 2 * 59);
Replay replay(DEMO_ROUTE, {}, {}, nullptr, REPLAY_FLAG_NO_VIPC);
QObject::connect(&replay, &Replay::seekedTo, [&](double sec) {
INFO("seek to " << seek_to << "s seeked to" << sec);
REQUIRE(sec >= seek_to);
loop.quit();
});
REQUIRE(replay.load());
replay.start();
replay.seekTo(seek_to, false);
loop.exec();
}
|
2301_81045437/openpilot
|
tools/replay/tests/test_replay.cc
|
C++
|
mit
| 5,517
|
#define CATCH_CONFIG_RUNNER
#include "catch2/catch.hpp"
#include <QCoreApplication>
int main(int argc, char **argv) {
// unit tests for Qt
QCoreApplication app(argc, argv);
const int res = Catch::Session().run(argc, argv);
return (res < 0xff ? res : 0xff);
}
|
2301_81045437/openpilot
|
tools/replay/tests/test_runner.cc
|
C++
|
mit
| 268
|
#!/usr/bin/env python3
import argparse
import os
import sys
import cv2
import numpy as np
import pygame
import cereal.messaging as messaging
from openpilot.common.numpy_fast import clip
from openpilot.common.basedir import BASEDIR
from openpilot.common.transformations.camera import DEVICE_CAMERAS
from openpilot.tools.replay.lib.ui_helpers import (UP,
BLACK, GREEN,
YELLOW, Calibration,
get_blank_lid_overlay, init_plots,
maybe_update_radar_points, plot_lead,
plot_model,
pygame_modules_have_loaded)
from cereal.visionipc import VisionIpcClient, VisionStreamType
os.environ['BASEDIR'] = BASEDIR
ANGLE_SCALE = 5.0
def ui_thread(addr):
cv2.setNumThreads(1)
pygame.init()
pygame.font.init()
assert pygame_modules_have_loaded()
disp_info = pygame.display.Info()
max_height = disp_info.current_h
hor_mode = os.getenv("HORIZONTAL") is not None
hor_mode = True if max_height < 960+300 else hor_mode
if hor_mode:
size = (640+384+640, 960)
write_x = 5
write_y = 680
else:
size = (640+384, 960+300)
write_x = 645
write_y = 970
pygame.display.set_caption("openpilot debug UI")
screen = pygame.display.set_mode(size, pygame.DOUBLEBUF)
alert1_font = pygame.font.SysFont("arial", 30)
alert2_font = pygame.font.SysFont("arial", 20)
info_font = pygame.font.SysFont("arial", 15)
camera_surface = pygame.surface.Surface((640, 480), 0, 24).convert()
top_down_surface = pygame.surface.Surface((UP.lidar_x, UP.lidar_y), 0, 8)
sm = messaging.SubMaster(['carState', 'longitudinalPlan', 'carControl', 'radarState', 'liveCalibration', 'controlsState',
'liveTracks', 'modelV2', 'liveParameters', 'roadCameraState'], addr=addr)
img = np.zeros((480, 640, 3), dtype='uint8')
imgff = None
num_px = 0
calibration = None
lid_overlay_blank = get_blank_lid_overlay(UP)
# plots
name_to_arr_idx = { "gas": 0,
"computer_gas": 1,
"user_brake": 2,
"computer_brake": 3,
"v_ego": 4,
"v_pid": 5,
"angle_steers_des": 6,
"angle_steers": 7,
"angle_steers_k": 8,
"steer_torque": 9,
"v_override": 10,
"v_cruise": 11,
"a_ego": 12,
"a_target": 13}
plot_arr = np.zeros((100, len(name_to_arr_idx.values())))
plot_xlims = [(0, plot_arr.shape[0]), (0, plot_arr.shape[0]), (0, plot_arr.shape[0]), (0, plot_arr.shape[0])]
plot_ylims = [(-0.1, 1.1), (-ANGLE_SCALE, ANGLE_SCALE), (0., 75.), (-3.0, 2.0)]
plot_names = [["gas", "computer_gas", "user_brake", "computer_brake"],
["angle_steers", "angle_steers_des", "angle_steers_k", "steer_torque"],
["v_ego", "v_override", "v_pid", "v_cruise"],
["a_ego", "a_target"]]
plot_colors = [["b", "b", "g", "r", "y"],
["b", "g", "y", "r"],
["b", "g", "r", "y"],
["b", "r"]]
plot_styles = [["-", "-", "-", "-", "-"],
["-", "-", "-", "-"],
["-", "-", "-", "-"],
["-", "-"]]
draw_plots = init_plots(plot_arr, name_to_arr_idx, plot_xlims, plot_ylims, plot_names, plot_colors, plot_styles)
vipc_client = VisionIpcClient("camerad", VisionStreamType.VISION_STREAM_ROAD, True)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill((64, 64, 64))
lid_overlay = lid_overlay_blank.copy()
top_down = top_down_surface, lid_overlay
# ***** frame *****
if not vipc_client.is_connected():
vipc_client.connect(True)
yuv_img_raw = vipc_client.recv()
if yuv_img_raw is None or not yuv_img_raw.data.any():
continue
sm.update(0)
camera = DEVICE_CAMERAS[("tici", str(sm['roadCameraState'].sensor))]
imgff = np.frombuffer(yuv_img_raw.data, dtype=np.uint8).reshape((len(yuv_img_raw.data) // vipc_client.stride, vipc_client.stride))
num_px = vipc_client.width * vipc_client.height
rgb = cv2.cvtColor(imgff[:vipc_client.height * 3 // 2, :vipc_client.width], cv2.COLOR_YUV2RGB_NV12)
qcam = "QCAM" in os.environ
bb_scale = (528 if qcam else camera.fcam.width) / 640.
calib_scale = camera.fcam.width / 640.
zoom_matrix = np.asarray([
[bb_scale, 0., 0.],
[0., bb_scale, 0.],
[0., 0., 1.]])
cv2.warpAffine(rgb, zoom_matrix[:2], (img.shape[1], img.shape[0]), dst=img, flags=cv2.WARP_INVERSE_MAP)
intrinsic_matrix = camera.fcam.intrinsics
w = sm['controlsState'].lateralControlState.which()
if w == 'lqrStateDEPRECATED':
angle_steers_k = sm['controlsState'].lateralControlState.lqrStateDEPRECATED.steeringAngleDeg
elif w == 'indiState':
angle_steers_k = sm['controlsState'].lateralControlState.indiState.steeringAngleDeg
else:
angle_steers_k = np.inf
plot_arr[:-1] = plot_arr[1:]
plot_arr[-1, name_to_arr_idx['angle_steers']] = sm['carState'].steeringAngleDeg
plot_arr[-1, name_to_arr_idx['angle_steers_des']] = sm['carControl'].actuators.steeringAngleDeg
plot_arr[-1, name_to_arr_idx['angle_steers_k']] = angle_steers_k
plot_arr[-1, name_to_arr_idx['gas']] = sm['carState'].gas
# TODO gas is deprecated
plot_arr[-1, name_to_arr_idx['computer_gas']] = clip(sm['carControl'].actuators.accel/4.0, 0.0, 1.0)
plot_arr[-1, name_to_arr_idx['user_brake']] = sm['carState'].brake
plot_arr[-1, name_to_arr_idx['steer_torque']] = sm['carControl'].actuators.steer * ANGLE_SCALE
# TODO brake is deprecated
plot_arr[-1, name_to_arr_idx['computer_brake']] = clip(-sm['carControl'].actuators.accel/4.0, 0.0, 1.0)
plot_arr[-1, name_to_arr_idx['v_ego']] = sm['carState'].vEgo
plot_arr[-1, name_to_arr_idx['v_pid']] = sm['controlsState'].vPid
plot_arr[-1, name_to_arr_idx['v_cruise']] = sm['carState'].cruiseState.speed
plot_arr[-1, name_to_arr_idx['a_ego']] = sm['carState'].aEgo
if len(sm['longitudinalPlan'].accels):
plot_arr[-1, name_to_arr_idx['a_target']] = sm['longitudinalPlan'].accels[0]
if sm.recv_frame['modelV2']:
plot_model(sm['modelV2'], img, calibration, top_down)
if sm.recv_frame['radarState']:
plot_lead(sm['radarState'], top_down)
# draw all radar points
maybe_update_radar_points(sm['liveTracks'], top_down[1])
if sm.updated['liveCalibration'] and num_px:
rpyCalib = np.asarray(sm['liveCalibration'].rpyCalib)
calibration = Calibration(num_px, rpyCalib, intrinsic_matrix, calib_scale)
# *** blits ***
pygame.surfarray.blit_array(camera_surface, img.swapaxes(0, 1))
screen.blit(camera_surface, (0, 0))
# display alerts
alert_line1 = alert1_font.render(sm['controlsState'].alertText1, True, (255, 0, 0))
alert_line2 = alert2_font.render(sm['controlsState'].alertText2, True, (255, 0, 0))
screen.blit(alert_line1, (180, 150))
screen.blit(alert_line2, (180, 190))
if hor_mode:
screen.blit(draw_plots(plot_arr), (640+384, 0))
else:
screen.blit(draw_plots(plot_arr), (0, 600))
pygame.surfarray.blit_array(*top_down)
screen.blit(top_down[0], (640, 0))
SPACING = 25
lines = [
info_font.render("ENABLED", True, GREEN if sm['controlsState'].enabled else BLACK),
info_font.render("SPEED: " + str(round(sm['carState'].vEgo, 1)) + " m/s", True, YELLOW),
info_font.render("LONG CONTROL STATE: " + str(sm['controlsState'].longControlState), True, YELLOW),
info_font.render("LONG MPC SOURCE: " + str(sm['longitudinalPlan'].longitudinalPlanSource), True, YELLOW),
None,
info_font.render("ANGLE OFFSET (AVG): " + str(round(sm['liveParameters'].angleOffsetAverageDeg, 2)) + " deg", True, YELLOW),
info_font.render("ANGLE OFFSET (INSTANT): " + str(round(sm['liveParameters'].angleOffsetDeg, 2)) + " deg", True, YELLOW),
info_font.render("STIFFNESS: " + str(round(sm['liveParameters'].stiffnessFactor * 100., 2)) + " %", True, YELLOW),
info_font.render("STEER RATIO: " + str(round(sm['liveParameters'].steerRatio, 2)), True, YELLOW)
]
for i, line in enumerate(lines):
if line is not None:
screen.blit(line, (write_x, write_y + i * SPACING))
# this takes time...vsync or something
pygame.display.flip()
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Show replay data in a UI.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("ip_address", nargs="?", default="127.0.0.1",
help="The ip address on which to receive zmq messages.")
parser.add_argument("--frame-address", default=None,
help="The frame address (fully qualified ZMQ endpoint for frames) on which to receive zmq messages.")
return parser
if __name__ == "__main__":
args = get_arg_parser().parse_args(sys.argv[1:])
if args.ip_address != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
ui_thread(args.ip_address)
|
2301_81045437/openpilot
|
tools/replay/ui.py
|
Python
|
mit
| 9,396
|
#!/usr/bin/env python3
import argparse
import bisect
import select
import sys
import termios
import time
import tty
from collections import defaultdict
import cereal.messaging as messaging
from openpilot.tools.lib.framereader import FrameReader
from openpilot.tools.lib.logreader import LogReader
from openpilot.tools.lib.openpilotci import get_url
IGNORE = ['initData', 'sentinel']
def input_ready():
return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])
def replay(route, segment, loop):
route = route.replace('|', '/')
lr = LogReader(get_url(route, segment))
fr = FrameReader(get_url(route, segment, "fcamera"), readahead=True)
# Build mapping from frameId to segmentId from roadEncodeIdx, type == fullHEVC
msgs = [m for m in lr if m.which() not in IGNORE]
msgs = sorted(msgs, key=lambda m: m.logMonoTime)
times = [m.logMonoTime for m in msgs]
frame_idx = {m.roadEncodeIdx.frameId: m.roadEncodeIdx.segmentId for m in msgs if m.which() == 'roadEncodeIdx' and m.roadEncodeIdx.type == 'fullHEVC'}
socks = {}
lag = 0.0
i = 0
max_i = len(msgs) - 2
while True:
msg = msgs[i].as_builder()
next_msg = msgs[i + 1]
start_time = time.time()
w = msg.which()
if w == 'roadCameraState':
try:
img = fr.get(frame_idx[msg.roadCameraState.frameId], pix_fmt="rgb24")
img = img[0][:, :, ::-1] # Convert RGB to BGR, which is what the camera outputs
msg.roadCameraState.image = img.flatten().tobytes()
except (KeyError, ValueError):
pass
if w not in socks:
socks[w] = messaging.pub_sock(w)
try:
if socks[w]:
socks[w].send(msg.to_bytes())
except messaging.messaging_pyx.MultiplePublishersError:
socks[w] = None
lag += (next_msg.logMonoTime - msg.logMonoTime) / 1e9
lag -= time.time() - start_time
dt = max(lag, 0.0)
lag -= dt
time.sleep(dt)
if lag < -1.0 and i % 1000 == 0:
print(f"{-lag:.2f} s behind")
if input_ready():
key = sys.stdin.read(1)
# Handle pause
if key == " ":
while True:
if input_ready() and sys.stdin.read(1) == " ":
break
time.sleep(0.01)
# Handle seek
dt = defaultdict(int, s=10, S=-10)[key]
new_time = msgs[i].logMonoTime + dt * 1e9
i = bisect.bisect_left(times, new_time)
i = (i + 1) % max_i if loop else min(i + 1, max_i)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--loop", action='store_true')
parser.add_argument("route")
parser.add_argument("segment")
args = parser.parse_args()
orig_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin)
try:
replay(args.route, args.segment, args.loop)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
except Exception:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)
raise
|
2301_81045437/openpilot
|
tools/replay/unlog_ci_segment.py
|
Python
|
mit
| 2,921
|
#include "tools/replay/util.h"
#include <bzlib.h>
#include <curl/curl.h>
#include <openssl/sha.h>
#include <cassert>
#include <algorithm>
#include <cmath>
#include <cstdarg>
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <mutex>
#include <numeric>
#include <utility>
#include "common/timing.h"
#include "common/util.h"
ReplayMessageHandler message_handler = nullptr;
void installMessageHandler(ReplayMessageHandler handler) { message_handler = handler; }
void logMessage(ReplyMsgType type, const char *fmt, ...) {
static std::mutex lock;
std::lock_guard lk(lock);
char *msg_buf = nullptr;
va_list args;
va_start(args, fmt);
int ret = vasprintf(&msg_buf, fmt, args);
va_end(args);
if (ret <= 0 || !msg_buf) return;
if (message_handler) {
message_handler(type, msg_buf);
} else {
if (type == ReplyMsgType::Debug) {
std::cout << "\033[38;5;248m" << msg_buf << "\033[00m" << std::endl;
} else if (type == ReplyMsgType::Warning) {
std::cout << "\033[38;5;227m" << msg_buf << "\033[00m" << std::endl;
} else if (type == ReplyMsgType::Critical) {
std::cout << "\033[38;5;196m" << msg_buf << "\033[00m" << std::endl;
} else {
std::cout << msg_buf << std::endl;
}
}
free(msg_buf);
}
namespace {
struct CURLGlobalInitializer {
CURLGlobalInitializer() { curl_global_init(CURL_GLOBAL_DEFAULT); }
~CURLGlobalInitializer() { curl_global_cleanup(); }
};
static CURLGlobalInitializer curl_initializer;
template <class T>
struct MultiPartWriter {
T *buf;
size_t *total_written;
size_t offset;
size_t end;
size_t write(char *data, size_t size, size_t count) {
size_t bytes = size * count;
if ((offset + bytes) > end) return 0;
if constexpr (std::is_same<T, std::string>::value) {
memcpy(buf->data() + offset, data, bytes);
} else if constexpr (std::is_same<T, std::ofstream>::value) {
buf->seekp(offset);
buf->write(data, bytes);
}
offset += bytes;
*total_written += bytes;
return bytes;
}
};
template <class T>
size_t write_cb(char *data, size_t size, size_t count, void *userp) {
auto w = (MultiPartWriter<T> *)userp;
return w->write(data, size, count);
}
size_t dumy_write_cb(char *data, size_t size, size_t count, void *userp) { return size * count; }
struct DownloadStats {
void installDownloadProgressHandler(DownloadProgressHandler handler) {
std::lock_guard lk(lock);
download_progress_handler = handler;
}
void add(const std::string &url, uint64_t total_bytes) {
std::lock_guard lk(lock);
items[url] = {0, total_bytes};
}
void remove(const std::string &url) {
std::lock_guard lk(lock);
items.erase(url);
}
void update(const std::string &url, uint64_t downloaded, bool success = true) {
std::lock_guard lk(lock);
items[url].first = downloaded;
auto stat = std::accumulate(items.begin(), items.end(), std::pair<int, int>{}, [=](auto &a, auto &b){
return std::pair{a.first + b.second.first, a.second + b.second.second};
});
double tm = millis_since_boot();
if (download_progress_handler && ((tm - prev_tm) > 500 || !success || stat.first >= stat.second)) {
download_progress_handler(stat.first, stat.second, success);
prev_tm = tm;
}
}
std::mutex lock;
std::map<std::string, std::pair<uint64_t, uint64_t>> items;
double prev_tm = 0;
DownloadProgressHandler download_progress_handler = nullptr;
};
static DownloadStats download_stats;
} // namespace
void installDownloadProgressHandler(DownloadProgressHandler handler) {
download_stats.installDownloadProgressHandler(handler);
}
std::string formattedDataSize(size_t size) {
if (size < 1024) {
return std::to_string(size) + " B";
} else if (size < 1024 * 1024) {
return util::string_format("%.2f KB", (float)size / 1024);
} else {
return util::string_format("%.2f MB", (float)size / (1024 * 1024));
}
}
size_t getRemoteFileSize(const std::string &url, std::atomic<bool> *abort) {
CURL *curl = curl_easy_init();
if (!curl) return -1;
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, dumy_write_cb);
curl_easy_setopt(curl, CURLOPT_HEADER, 1);
curl_easy_setopt(curl, CURLOPT_NOBODY, 1);
CURLM *cm = curl_multi_init();
curl_multi_add_handle(cm, curl);
int still_running = 1;
while (still_running > 0 && !(abort && *abort)) {
CURLMcode mc = curl_multi_perform(cm, &still_running);
if (mc != CURLM_OK) break;
if (still_running > 0) {
curl_multi_wait(cm, nullptr, 0, 1000, nullptr);
}
}
double content_length = -1;
curl_easy_getinfo(curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &content_length);
curl_multi_remove_handle(cm, curl);
curl_easy_cleanup(curl);
curl_multi_cleanup(cm);
return content_length > 0 ? (size_t)content_length : 0;
}
std::string getUrlWithoutQuery(const std::string &url) {
size_t idx = url.find("?");
return (idx == std::string::npos ? url : url.substr(0, idx));
}
template <class T>
bool httpDownload(const std::string &url, T &buf, size_t chunk_size, size_t content_length, std::atomic<bool> *abort) {
download_stats.add(url, content_length);
int parts = 1;
if (chunk_size > 0 && content_length > 10 * 1024 * 1024) {
parts = std::nearbyint(content_length / (float)chunk_size);
parts = std::clamp(parts, 1, 5);
}
CURLM *cm = curl_multi_init();
size_t written = 0;
std::map<CURL *, MultiPartWriter<T>> writers;
const int part_size = content_length / parts;
for (int i = 0; i < parts; ++i) {
CURL *eh = curl_easy_init();
writers[eh] = {
.buf = &buf,
.total_written = &written,
.offset = (size_t)(i * part_size),
.end = i == parts - 1 ? content_length : (i + 1) * part_size,
};
curl_easy_setopt(eh, CURLOPT_WRITEFUNCTION, write_cb<T>);
curl_easy_setopt(eh, CURLOPT_WRITEDATA, (void *)(&writers[eh]));
curl_easy_setopt(eh, CURLOPT_URL, url.c_str());
curl_easy_setopt(eh, CURLOPT_RANGE, util::string_format("%d-%d", writers[eh].offset, writers[eh].end - 1).c_str());
curl_easy_setopt(eh, CURLOPT_HTTPGET, 1);
curl_easy_setopt(eh, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(eh, CURLOPT_FOLLOWLOCATION, 1);
curl_multi_add_handle(cm, eh);
}
int still_running = 1;
size_t prev_written = 0;
while (still_running > 0 && !(abort && *abort)) {
CURLMcode mc = curl_multi_perform(cm, &still_running);
if (mc != CURLM_OK) {
break;
}
if (still_running > 0) {
curl_multi_wait(cm, nullptr, 0, 1000, nullptr);
}
if (((written - prev_written) / (double)content_length) >= 0.01) {
download_stats.update(url, written);
prev_written = written;
}
}
CURLMsg *msg;
int msgs_left = -1;
int complete = 0;
while ((msg = curl_multi_info_read(cm, &msgs_left)) && !(abort && *abort)) {
if (msg->msg == CURLMSG_DONE) {
if (msg->data.result == CURLE_OK) {
long res_status = 0;
curl_easy_getinfo(msg->easy_handle, CURLINFO_RESPONSE_CODE, &res_status);
if (res_status == 206) {
complete++;
} else {
rWarning("Download failed: http error code: %d", res_status);
}
} else {
rWarning("Download failed: connection failure: %d", msg->data.result);
}
}
}
bool success = complete == parts;
download_stats.update(url, written, success);
download_stats.remove(url);
for (const auto &[e, w] : writers) {
curl_multi_remove_handle(cm, e);
curl_easy_cleanup(e);
}
curl_multi_cleanup(cm);
return success;
}
std::string httpGet(const std::string &url, size_t chunk_size, std::atomic<bool> *abort) {
size_t size = getRemoteFileSize(url, abort);
if (size == 0) return {};
std::string result(size, '\0');
return httpDownload(url, result, chunk_size, size, abort) ? result : "";
}
bool httpDownload(const std::string &url, const std::string &file, size_t chunk_size, std::atomic<bool> *abort) {
size_t size = getRemoteFileSize(url, abort);
if (size == 0) return false;
std::ofstream of(file, std::ios::binary | std::ios::out);
of.seekp(size - 1).write("\0", 1);
return httpDownload(url, of, chunk_size, size, abort);
}
std::string decompressBZ2(const std::string &in, std::atomic<bool> *abort) {
return decompressBZ2((std::byte *)in.data(), in.size(), abort);
}
std::string decompressBZ2(const std::byte *in, size_t in_size, std::atomic<bool> *abort) {
if (in_size == 0) return {};
bz_stream strm = {};
int bzerror = BZ2_bzDecompressInit(&strm, 0, 0);
assert(bzerror == BZ_OK);
strm.next_in = (char *)in;
strm.avail_in = in_size;
std::string out(in_size * 5, '\0');
do {
strm.next_out = (char *)(&out[strm.total_out_lo32]);
strm.avail_out = out.size() - strm.total_out_lo32;
const char *prev_write_pos = strm.next_out;
bzerror = BZ2_bzDecompress(&strm);
if (bzerror == BZ_OK && prev_write_pos == strm.next_out) {
// content is corrupt
bzerror = BZ_STREAM_END;
rWarning("decompressBZ2 error : content is corrupt");
break;
}
if (bzerror == BZ_OK && strm.avail_in > 0 && strm.avail_out == 0) {
out.resize(out.size() * 2);
}
} while (bzerror == BZ_OK && !(abort && *abort));
BZ2_bzDecompressEnd(&strm);
if (bzerror == BZ_STREAM_END && !(abort && *abort)) {
out.resize(strm.total_out_lo32);
out.shrink_to_fit();
return out;
}
return {};
}
void precise_nano_sleep(int64_t nanoseconds) {
#ifdef __APPLE__
const long estimate_ns = 1 * 1e6; // 1ms
struct timespec req = {.tv_nsec = estimate_ns};
uint64_t start_sleep = nanos_since_boot();
while (nanoseconds > estimate_ns) {
nanosleep(&req, nullptr);
uint64_t end_sleep = nanos_since_boot();
nanoseconds -= (end_sleep - start_sleep);
start_sleep = end_sleep;
}
// spin wait
if (nanoseconds > 0) {
while ((nanos_since_boot() - start_sleep) <= nanoseconds) {
std::this_thread::yield();
}
}
#else
struct timespec req, rem;
req.tv_sec = nanoseconds / 1e9;
req.tv_nsec = nanoseconds % (int64_t)1e9;
while (clock_nanosleep(CLOCK_MONOTONIC, 0, &req, &rem) && errno == EINTR) {
// Retry sleep if interrupted by a signal
req = rem;
}
#endif
}
std::string sha256(const std::string &str) {
unsigned char hash[SHA256_DIGEST_LENGTH];
SHA256_CTX sha256;
SHA256_Init(&sha256);
SHA256_Update(&sha256, str.c_str(), str.size());
SHA256_Final(hash, &sha256);
return util::hexdump(hash, SHA256_DIGEST_LENGTH);
}
// MonotonicBuffer
void *MonotonicBuffer::allocate(size_t bytes, size_t alignment) {
assert(bytes > 0);
void *p = std::align(alignment, bytes, current_buf, available);
if (p == nullptr) {
available = next_buffer_size = std::max(next_buffer_size, bytes);
current_buf = buffers.emplace_back(std::aligned_alloc(alignment, next_buffer_size));
next_buffer_size *= growth_factor;
p = current_buf;
}
current_buf = (char *)current_buf + bytes;
available -= bytes;
return p;
}
MonotonicBuffer::~MonotonicBuffer() {
for (auto buf : buffers) {
free(buf);
}
}
|
2301_81045437/openpilot
|
tools/replay/util.cc
|
C++
|
mit
| 11,155
|
#pragma once
#include <atomic>
#include <deque>
#include <functional>
#include <string>
enum class ReplyMsgType {
Info,
Debug,
Warning,
Critical
};
typedef std::function<void(ReplyMsgType type, const std::string msg)> ReplayMessageHandler;
void installMessageHandler(ReplayMessageHandler);
void logMessage(ReplyMsgType type, const char* fmt, ...);
#define rInfo(fmt, ...) ::logMessage(ReplyMsgType::Info, fmt, ## __VA_ARGS__)
#define rDebug(fmt, ...) ::logMessage(ReplyMsgType::Debug, fmt, ## __VA_ARGS__)
#define rWarning(fmt, ...) ::logMessage(ReplyMsgType::Warning, fmt, ## __VA_ARGS__)
#define rError(fmt, ...) ::logMessage(ReplyMsgType::Critical , fmt, ## __VA_ARGS__)
class MonotonicBuffer {
public:
MonotonicBuffer(size_t initial_size) : next_buffer_size(initial_size) {}
~MonotonicBuffer();
void *allocate(size_t bytes, size_t alignment = 16ul);
void deallocate(void *p) {}
private:
void *current_buf = nullptr;
size_t next_buffer_size = 0;
size_t available = 0;
std::deque<void *> buffers;
static constexpr float growth_factor = 1.5;
};
std::string sha256(const std::string &str);
void precise_nano_sleep(int64_t nanoseconds);
std::string decompressBZ2(const std::string &in, std::atomic<bool> *abort = nullptr);
std::string decompressBZ2(const std::byte *in, size_t in_size, std::atomic<bool> *abort = nullptr);
std::string getUrlWithoutQuery(const std::string &url);
size_t getRemoteFileSize(const std::string &url, std::atomic<bool> *abort = nullptr);
std::string httpGet(const std::string &url, size_t chunk_size = 0, std::atomic<bool> *abort = nullptr);
typedef std::function<void(uint64_t cur, uint64_t total, bool success)> DownloadProgressHandler;
void installDownloadProgressHandler(DownloadProgressHandler);
bool httpDownload(const std::string &url, const std::string &file, size_t chunk_size = 0, std::atomic<bool> *abort = nullptr);
std::string formattedDataSize(size_t size);
|
2301_81045437/openpilot
|
tools/replay/util.h
|
C++
|
mit
| 1,936
|
#!/usr/bin/env python3
import sys
import argparse
import multiprocessing
import rerun as rr
import rerun.blueprint as rrb
from functools import partial
from openpilot.tools.lib.logreader import LogReader
from cereal.services import SERVICE_LIST
NUM_CPUS = multiprocessing.cpu_count()
DEMO_ROUTE = "a2a0ccea32023010|2023-07-27--13-01-19"
def log_msg(msg, parent_key=''):
stack = [(msg, parent_key)]
while stack:
current_msg, current_parent_key = stack.pop()
if isinstance(current_msg, list):
for index, item in enumerate(current_msg):
new_key = f"{current_parent_key}/{index}"
if isinstance(item, (int, float)):
rr.log(str(new_key), rr.Scalar(item))
elif isinstance(item, dict):
stack.append((item, new_key))
elif isinstance(current_msg, dict):
for key, value in current_msg.items():
new_key = f"{current_parent_key}/{key}"
if isinstance(value, (int, float)):
rr.log(str(new_key), rr.Scalar(value))
elif isinstance(value, dict):
stack.append((value, new_key))
elif isinstance(value, list):
for index, item in enumerate(value):
if isinstance(item, (int, float)):
rr.log(f"{new_key}/{index}", rr.Scalar(item))
else:
pass # Not a plottable value
def createBlueprint():
blueprint = None
timeSeriesViews = []
for topic in sorted(SERVICE_LIST.keys()):
timeSeriesViews.append(rrb.TimeSeriesView(name=topic, origin=f"/{topic}/", visible=False))
rr.log(topic, rr.SeriesLine(name=topic), timeless=True)
blueprint = rrb.Blueprint(rrb.Grid(rrb.Vertical(*timeSeriesViews,rrb.SelectionPanel(expanded=False),rrb.TimePanel(expanded=False)),
rrb.Spatial2DView(name="thumbnail", origin="/thumbnail")))
return blueprint
def log_thumbnail(thumbnailMsg):
bytesImgData = thumbnailMsg.get('thumbnail')
rr.log("/thumbnail", rr.ImageEncoded(contents=bytesImgData))
@rr.shutdown_at_exit
def process(lr):
rr.init("rerun_test")
rr.connect()
ret = []
for msg in lr:
ret.append(msg)
rr.set_time_nanos("TIMELINE", msg.logMonoTime)
if msg.which() != "thumbnail":
log_msg(msg.to_dict()[msg.which()], msg.which())
else:
log_thumbnail(msg.to_dict()[msg.which()])
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A helper to run rerun on openpilot routes",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--demo", action="store_true", help="Use the demo route instead of providing one")
parser.add_argument("route_or_segment_name", nargs='?', help="The route or segment name to plot")
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
blueprint = createBlueprint()
rr.init("rerun_test", default_blueprint=blueprint)
rr.spawn(connect=False) # child processes stream data to Viewer
route_or_segment_name = DEMO_ROUTE if args.demo else args.route_or_segment_name.strip()
print("Getting route log paths")
lr = LogReader(route_or_segment_name)
lr.run_across_segments(NUM_CPUS, partial(process))
|
2301_81045437/openpilot
|
tools/rerun/run.py
|
Python
|
mit
| 3,200
|
#!/usr/bin/env python3
import sys
if len(sys.argv) < 4:
print(f"{sys.argv[0]} <route> <segment> <frame number> [front|wide|driver]")
print('example: ./fetch_image_from_route.py "02c45f73a2e5c6e9|2020-06-01--18-03-08" 3 500 driver')
exit(0)
cameras = {
"front": "cameras",
"wide": "ecameras",
"driver": "dcameras"
}
import requests
from PIL import Image
from openpilot.tools.lib.auth_config import get_token
from openpilot.tools.lib.framereader import FrameReader
jwt = get_token()
route = sys.argv[1]
segment = int(sys.argv[2])
frame = int(sys.argv[3])
camera = cameras[sys.argv[4]] if len(sys.argv) > 4 and sys.argv[4] in cameras else "cameras"
url = f'https://api.commadotai.com/v1/route/{route}/files'
r = requests.get(url, headers={"Authorization": f"JWT {jwt}"}, timeout=10)
assert r.status_code == 200
print("got api response")
segments = r.json()[camera]
if segment >= len(segments):
raise Exception("segment %d not found, got %d segments" % (segment, len(segments)))
fr = FrameReader(segments[segment])
if frame >= fr.frame_count:
raise Exception("frame %d not found, got %d frames" % (frame, fr.frame_count))
im = Image.fromarray(fr.get(frame, count=1, pix_fmt="rgb24")[0])
fn = f"uxxx_{route.replace('|', '_')}_{segment}_{frame}.png"
im.save(fn)
print(f"saved {fn}")
|
2301_81045437/openpilot
|
tools/scripts/fetch_image_from_route.py
|
Python
|
mit
| 1,302
|
#!/usr/bin/env python
import argparse
import os
import sys
from openpilot.common.basedir import BASEDIR
from openpilot.tools.lib.logreader import LogReader
os.environ['BASEDIR'] = BASEDIR
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Unlogging and save to file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("route", type=(lambda x: x.replace("#", "|")), nargs="?",
help="The route whose messages will be published.")
parser.add_argument("--out_path", nargs='?', default='/data/ubloxRaw.stream',
help="Output pickle file path")
return parser
def main():
args = get_arg_parser().parse_args(sys.argv[1:])
lr = LogReader(args.route)
with open(args.out_path, 'wb') as f:
try:
done = False
i = 0
while not done:
msg = next(lr)
if not msg:
break
smsg = msg.as_builder()
typ = smsg.which()
if typ == 'ubloxRaw':
f.write(smsg.to_bytes())
i += 1
except StopIteration:
print('All done')
print(f'Writed {i} msgs')
if __name__ == "__main__":
main()
|
2301_81045437/openpilot
|
tools/scripts/save_ubloxraw_stream.py
|
Python
|
mit
| 1,177
|
#!/usr/bin/env python3
import requests
from openpilot.common.params import Params
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"{sys.argv[0]} <github username>")
exit(1)
username = sys.argv[1]
keys = requests.get(f"https://github.com/{username}.keys", timeout=10)
if keys.status_code == 200:
params = Params()
params.put_bool("SshEnabled", True)
params.put("GithubSshKeys", keys.text)
params.put("GithubUsername", username)
print("Setup ssh keys successfully")
else:
print("Error getting public keys from github")
|
2301_81045437/openpilot
|
tools/scripts/setup_ssh_keys.py
|
Python
|
mit
| 583
|
#!/bin/bash
while true; do
if ls /dev/serial/by-id/usb-FTDI_FT230X* 2> /dev/null; then
sudo screen /dev/serial/by-id/usb-FTDI_FT230X* 115200
fi
sleep 0.005
done
|
2301_81045437/openpilot
|
tools/serial/connect.sh
|
Shell
|
mit
| 172
|
import signal
import threading
import functools
from collections import namedtuple
from enum import Enum
from multiprocessing import Process, Queue, Value
from abc import ABC, abstractmethod
from openpilot.common.params import Params
from openpilot.common.numpy_fast import clip
from openpilot.common.realtime import Ratekeeper
from openpilot.selfdrive.test.helpers import set_params_enabled
from openpilot.selfdrive.car.honda.values import CruiseButtons
from openpilot.tools.sim.lib.common import SimulatorState, World
from openpilot.tools.sim.lib.simulated_car import SimulatedCar
from openpilot.tools.sim.lib.simulated_sensors import SimulatedSensors
QueueMessage = namedtuple("QueueMessage", ["type", "info"], defaults=[None])
class QueueMessageType(Enum):
START_STATUS = 0
CONTROL_COMMAND = 1
TERMINATION_INFO = 2
CLOSE_STATUS = 3
def control_cmd_gen(cmd: str):
return QueueMessage(QueueMessageType.CONTROL_COMMAND, cmd)
def rk_loop(function, hz, exit_event: threading.Event):
rk = Ratekeeper(hz, None)
while not exit_event.is_set():
function()
rk.keep_time()
class SimulatorBridge(ABC):
TICKS_PER_FRAME = 5
def __init__(self, dual_camera, high_quality):
set_params_enabled()
self.params = Params()
self.params.put_bool("ExperimentalLongitudinalEnabled", True)
self.rk = Ratekeeper(100, None)
self.dual_camera = dual_camera
self.high_quality = high_quality
self._exit_event = threading.Event()
self._threads = []
self._keep_alive = True
self.started = Value('i', False)
signal.signal(signal.SIGTERM, self._on_shutdown)
self._exit = threading.Event()
self.simulator_state = SimulatorState()
self.world: World | None = None
self.past_startup_engaged = False
self.startup_button_prev = True
self.test_run = False
def _on_shutdown(self, signal, frame):
self.shutdown()
def shutdown(self):
self._keep_alive = False
def bridge_keep_alive(self, q: Queue, retries: int):
try:
self._run(q)
finally:
self.close("bridge terminated")
def close(self, reason):
self.started.value = False
self._exit_event.set()
if self.world is not None:
self.world.close(reason)
def run(self, queue, retries=-1):
bridge_p = Process(name="bridge", target=self.bridge_keep_alive, args=(queue, retries))
bridge_p.start()
return bridge_p
def print_status(self):
print(
f"""
State:
Ignition: {self.simulator_state.ignition} Engaged: {self.simulator_state.is_engaged}
""")
@abstractmethod
def spawn_world(self, q: Queue) -> World:
pass
def _run(self, q: Queue):
self.world = self.spawn_world(q)
self.simulated_car = SimulatedCar()
self.simulated_sensors = SimulatedSensors(self.dual_camera)
self.simulated_car_thread = threading.Thread(target=rk_loop, args=(functools.partial(self.simulated_car.update, self.simulator_state),
100, self._exit_event))
self.simulated_car_thread.start()
self.simulated_camera_thread = threading.Thread(target=rk_loop, args=(functools.partial(self.simulated_sensors.send_camera_images, self.world),
20, self._exit_event))
self.simulated_camera_thread.start()
# Simulation tends to be slow in the initial steps. This prevents lagging later
for _ in range(20):
self.world.tick()
while self._keep_alive:
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0.0
self.simulator_state.cruise_button = 0
self.simulator_state.left_blinker = False
self.simulator_state.right_blinker = False
throttle_manual = steer_manual = brake_manual = 0.
# Read manual controls
if not q.empty():
message = q.get()
if message.type == QueueMessageType.CONTROL_COMMAND:
m = message.info.split('_')
if m[0] == "steer":
steer_manual = float(m[1])
elif m[0] == "throttle":
throttle_manual = float(m[1])
elif m[0] == "brake":
brake_manual = float(m[1])
elif m[0] == "cruise":
if m[1] == "down":
self.simulator_state.cruise_button = CruiseButtons.DECEL_SET
elif m[1] == "up":
self.simulator_state.cruise_button = CruiseButtons.RES_ACCEL
elif m[1] == "cancel":
self.simulator_state.cruise_button = CruiseButtons.CANCEL
elif m[1] == "main":
self.simulator_state.cruise_button = CruiseButtons.MAIN
elif m[0] == "blinker":
if m[1] == "left":
self.simulator_state.left_blinker = True
elif m[1] == "right":
self.simulator_state.right_blinker = True
elif m[0] == "ignition":
self.simulator_state.ignition = not self.simulator_state.ignition
elif m[0] == "reset":
self.world.reset()
elif m[0] == "quit":
break
self.simulator_state.user_brake = brake_manual
self.simulator_state.user_gas = throttle_manual
self.simulator_state.user_torque = steer_manual * -10000
steer_manual = steer_manual * -40
# Update openpilot on current sensor state
self.simulated_sensors.update(self.simulator_state, self.world)
self.simulated_car.sm.update(0)
controlsState = self.simulated_car.sm['controlsState']
self.simulator_state.is_engaged = controlsState.active
if self.simulator_state.is_engaged:
throttle_op = clip(self.simulated_car.sm['carControl'].actuators.accel / 1.6, 0.0, 1.0)
brake_op = clip(-self.simulated_car.sm['carControl'].actuators.accel / 4.0, 0.0, 1.0)
steer_op = self.simulated_car.sm['carControl'].actuators.steeringAngleDeg
self.past_startup_engaged = True
elif not self.past_startup_engaged and controlsState.engageable:
self.simulator_state.cruise_button = CruiseButtons.DECEL_SET if self.startup_button_prev else CruiseButtons.MAIN # force engagement on startup
self.startup_button_prev = not self.startup_button_prev
throttle_out = throttle_op if self.simulator_state.is_engaged else throttle_manual
brake_out = brake_op if self.simulator_state.is_engaged else brake_manual
steer_out = steer_op if self.simulator_state.is_engaged else steer_manual
self.world.apply_controls(steer_out, throttle_out, brake_out)
self.world.read_state()
self.world.read_sensors(self.simulator_state)
if self.world.exit_event.is_set():
self.shutdown()
if self.rk.frame % self.TICKS_PER_FRAME == 0:
self.world.tick()
self.world.read_cameras()
# don't print during test, so no print/IO Block between OP and metadrive processes
if not self.test_run and self.rk.frame % 25 == 0:
self.print_status()
self.started.value = True
self.rk.keep_time()
|
2301_81045437/openpilot
|
tools/sim/bridge/common.py
|
Python
|
mit
| 7,036
|
import math
from multiprocessing import Queue
from metadrive.component.sensors.base_camera import _cuda_enable
from metadrive.component.map.pg_map import MapGenerateMethod
from openpilot.tools.sim.bridge.common import SimulatorBridge
from openpilot.tools.sim.bridge.metadrive.metadrive_common import RGBCameraRoad, RGBCameraWide
from openpilot.tools.sim.bridge.metadrive.metadrive_world import MetaDriveWorld
from openpilot.tools.sim.lib.camerad import W, H
def straight_block(length):
return {
"id": "S",
"pre_block_socket_index": 0,
"length": length
}
def curve_block(length, angle=45, direction=0):
return {
"id": "C",
"pre_block_socket_index": 0,
"length": length,
"radius": length,
"angle": angle,
"dir": direction
}
def create_map(track_size=60):
curve_len = track_size * 2
return dict(
type=MapGenerateMethod.PG_MAP_FILE,
lane_num=2,
lane_width=4.5,
config=[
None,
straight_block(track_size),
curve_block(curve_len, 90),
straight_block(track_size),
curve_block(curve_len, 90),
straight_block(track_size),
curve_block(curve_len, 90),
straight_block(track_size),
curve_block(curve_len, 90),
]
)
class MetaDriveBridge(SimulatorBridge):
TICKS_PER_FRAME = 5
def __init__(self, dual_camera, high_quality, test_duration=math.inf, test_run=False):
super().__init__(dual_camera, high_quality)
self.should_render = False
self.test_run = test_run
self.test_duration = test_duration if self.test_run else math.inf
def spawn_world(self, queue: Queue):
sensors = {
"rgb_road": (RGBCameraRoad, W, H, )
}
if self.dual_camera:
sensors["rgb_wide"] = (RGBCameraWide, W, H)
config = dict(
use_render=self.should_render,
vehicle_config=dict(
enable_reverse=False,
image_source="rgb_road",
),
sensors=sensors,
image_on_cuda=_cuda_enable,
image_observation=True,
interface_panel=[],
out_of_route_done=False,
on_continuous_line_done=False,
crash_vehicle_done=False,
crash_object_done=False,
arrive_dest_done=False,
traffic_density=0.0, # traffic is incredibly expensive
map_config=create_map(),
decision_repeat=1,
physics_world_step_size=self.TICKS_PER_FRAME/100,
preload_models=False
)
return MetaDriveWorld(queue, config, self.test_duration, self.test_run, self.dual_camera)
|
2301_81045437/openpilot
|
tools/sim/bridge/metadrive/metadrive_bridge.py
|
Python
|
mit
| 2,474
|
import numpy as np
from metadrive.component.sensors.rgb_camera import RGBCamera
from panda3d.core import Texture, GraphicsOutput
class CopyRamRGBCamera(RGBCamera):
"""Camera which copies its content into RAM during the render process, for faster image grabbing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cpu_texture = Texture()
self.buffer.addRenderTexture(self.cpu_texture, GraphicsOutput.RTMCopyRam)
def get_rgb_array_cpu(self):
origin_img = self.cpu_texture
img = np.frombuffer(origin_img.getRamImage().getData(), dtype=np.uint8)
img = img.reshape((origin_img.getYSize(), origin_img.getXSize(), -1))
img = img[:,:,:3] # RGBA to RGB
# img = np.swapaxes(img, 1, 0)
img = img[::-1] # Flip on vertical axis
return img
class RGBCameraWide(CopyRamRGBCamera):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lens = self.get_lens()
lens.setFov(120)
lens.setNear(0.1)
class RGBCameraRoad(CopyRamRGBCamera):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lens = self.get_lens()
lens.setFov(40)
lens.setNear(0.1)
|
2301_81045437/openpilot
|
tools/sim/bridge/metadrive/metadrive_common.py
|
Python
|
mit
| 1,179
|
import math
import time
import numpy as np
from collections import namedtuple
from panda3d.core import Vec3
from multiprocessing.connection import Connection
from metadrive.engine.core.engine_core import EngineCore
from metadrive.engine.core.image_buffer import ImageBuffer
from metadrive.envs.metadrive_env import MetaDriveEnv
from metadrive.obs.image_obs import ImageObservation
from openpilot.common.realtime import Ratekeeper
from openpilot.tools.sim.lib.common import vec3
from openpilot.tools.sim.lib.camerad import W, H
C3_POSITION = Vec3(0.0, 0, 1.22)
C3_HPR = Vec3(0, 0,0)
metadrive_simulation_state = namedtuple("metadrive_simulation_state", ["running", "done", "done_info"])
metadrive_vehicle_state = namedtuple("metadrive_vehicle_state", ["velocity", "position", "bearing", "steering_angle"])
def apply_metadrive_patches(arrive_dest_done=True):
# By default, metadrive won't try to use cuda images unless it's used as a sensor for vehicles, so patch that in
def add_image_sensor_patched(self, name: str, cls, args):
if self.global_config["image_on_cuda"]:# and name == self.global_config["vehicle_config"]["image_source"]:
sensor = cls(*args, self, cuda=True)
else:
sensor = cls(*args, self, cuda=False)
assert isinstance(sensor, ImageBuffer), "This API is for adding image sensor"
self.sensors[name] = sensor
EngineCore.add_image_sensor = add_image_sensor_patched
# we aren't going to use the built-in observation stack, so disable it to save time
def observe_patched(self, *args, **kwargs):
return self.state
ImageObservation.observe = observe_patched
# disable destination, we want to loop forever
def arrive_destination_patch(self, *args, **kwargs):
return False
if not arrive_dest_done:
MetaDriveEnv._is_arrive_destination = arrive_destination_patch
def metadrive_process(dual_camera: bool, config: dict, camera_array, wide_camera_array, image_lock,
controls_recv: Connection, simulation_state_send: Connection, vehicle_state_send: Connection,
exit_event, op_engaged, test_duration, test_run):
arrive_dest_done = config.pop("arrive_dest_done", True)
apply_metadrive_patches(arrive_dest_done)
road_image = np.frombuffer(camera_array.get_obj(), dtype=np.uint8).reshape((H, W, 3))
if dual_camera:
assert wide_camera_array is not None
wide_road_image = np.frombuffer(wide_camera_array.get_obj(), dtype=np.uint8).reshape((H, W, 3))
env = MetaDriveEnv(config)
def get_current_lane_info(vehicle):
_, lane_info, on_lane = vehicle.navigation._get_current_lane(vehicle)
lane_idx = lane_info[2] if lane_info is not None else None
return lane_idx, on_lane
def reset():
env.reset()
env.vehicle.config["max_speed_km_h"] = 1000
lane_idx_prev, _ = get_current_lane_info(env.vehicle)
simulation_state = metadrive_simulation_state(
running=True,
done=False,
done_info=None,
)
simulation_state_send.send(simulation_state)
return lane_idx_prev
lane_idx_prev = reset()
start_time = None
def get_cam_as_rgb(cam):
cam = env.engine.sensors[cam]
cam.get_cam().reparentTo(env.vehicle.origin)
cam.get_cam().setPos(C3_POSITION)
cam.get_cam().setHpr(C3_HPR)
img = cam.perceive(to_float=False)
if type(img) != np.ndarray:
img = img.get() # convert cupy array to numpy
return img
rk = Ratekeeper(100, None)
steer_ratio = 8
vc = [0,0]
while not exit_event.is_set():
vehicle_state = metadrive_vehicle_state(
velocity=vec3(x=float(env.vehicle.velocity[0]), y=float(env.vehicle.velocity[1]), z=0),
position=env.vehicle.position,
bearing=float(math.degrees(env.vehicle.heading_theta)),
steering_angle=env.vehicle.steering * env.vehicle.MAX_STEERING
)
vehicle_state_send.send(vehicle_state)
if controls_recv.poll(0):
while controls_recv.poll(0):
steer_angle, gas, should_reset = controls_recv.recv()
steer_metadrive = steer_angle * 1 / (env.vehicle.MAX_STEERING * steer_ratio)
steer_metadrive = np.clip(steer_metadrive, -1, 1)
vc = [steer_metadrive, gas]
if should_reset:
lane_idx_prev = reset()
start_time = None
is_engaged = op_engaged.is_set()
if is_engaged and start_time is None:
start_time = time.monotonic()
if rk.frame % 5 == 0:
_, _, terminated, _, _ = env.step(vc)
timeout = True if start_time is not None and time.monotonic() - start_time >= test_duration else False
lane_idx_curr, on_lane = get_current_lane_info(env.vehicle)
out_of_lane = lane_idx_curr != lane_idx_prev or not on_lane
lane_idx_prev = lane_idx_curr
if terminated or ((out_of_lane or timeout) and test_run):
if terminated:
done_result = env.done_function("default_agent")
elif out_of_lane:
done_result = (True, {"out_of_lane" : True})
elif timeout:
done_result = (True, {"timeout" : True})
simulation_state = metadrive_simulation_state(
running=False,
done=done_result[0],
done_info=done_result[1],
)
simulation_state_send.send(simulation_state)
if dual_camera:
wide_road_image[...] = get_cam_as_rgb("rgb_wide")
road_image[...] = get_cam_as_rgb("rgb_road")
image_lock.release()
rk.keep_time()
|
2301_81045437/openpilot
|
tools/sim/bridge/metadrive/metadrive_process.py
|
Python
|
mit
| 5,406
|
import ctypes
import functools
import multiprocessing
import numpy as np
import time
from multiprocessing import Pipe, Array
from openpilot.tools.sim.bridge.common import QueueMessage, QueueMessageType
from openpilot.tools.sim.bridge.metadrive.metadrive_process import (metadrive_process, metadrive_simulation_state,
metadrive_vehicle_state)
from openpilot.tools.sim.lib.common import SimulatorState, World
from openpilot.tools.sim.lib.camerad import W, H
class MetaDriveWorld(World):
def __init__(self, status_q, config, test_duration, test_run, dual_camera=False):
super().__init__(dual_camera)
self.status_q = status_q
self.camera_array = Array(ctypes.c_uint8, W*H*3)
self.road_image = np.frombuffer(self.camera_array.get_obj(), dtype=np.uint8).reshape((H, W, 3))
self.wide_camera_array = None
if dual_camera:
self.wide_camera_array = Array(ctypes.c_uint8, W*H*3)
self.wide_road_image = np.frombuffer(self.wide_camera_array.get_obj(), dtype=np.uint8).reshape((H, W, 3))
self.controls_send, self.controls_recv = Pipe()
self.simulation_state_send, self.simulation_state_recv = Pipe()
self.vehicle_state_send, self.vehicle_state_recv = Pipe()
self.exit_event = multiprocessing.Event()
self.op_engaged = multiprocessing.Event()
self.test_run = test_run
self.first_engage = None
self.last_check_timestamp = 0
self.distance_moved = 0
self.metadrive_process = multiprocessing.Process(name="metadrive process", target=
functools.partial(metadrive_process, dual_camera, config,
self.camera_array, self.wide_camera_array, self.image_lock,
self.controls_recv, self.simulation_state_send,
self.vehicle_state_send, self.exit_event, self.op_engaged, test_duration, self.test_run))
self.metadrive_process.start()
self.status_q.put(QueueMessage(QueueMessageType.START_STATUS, "starting"))
print("----------------------------------------------------------")
print("---- Spawning Metadrive world, this might take awhile ----")
print("----------------------------------------------------------")
self.vehicle_last_pos = self.vehicle_state_recv.recv().position # wait for a state message to ensure metadrive is launched
self.status_q.put(QueueMessage(QueueMessageType.START_STATUS, "started"))
self.steer_ratio = 15
self.vc = [0.0,0.0]
self.reset_time = 0
self.should_reset = False
def apply_controls(self, steer_angle, throttle_out, brake_out):
if (time.monotonic() - self.reset_time) > 2:
self.vc[0] = steer_angle
if throttle_out:
self.vc[1] = throttle_out
else:
self.vc[1] = -brake_out
else:
self.vc[0] = 0
self.vc[1] = 0
self.controls_send.send([*self.vc, self.should_reset])
self.should_reset = False
def read_state(self):
while self.simulation_state_recv.poll(0):
md_state: metadrive_simulation_state = self.simulation_state_recv.recv()
if md_state.done:
self.status_q.put(QueueMessage(QueueMessageType.TERMINATION_INFO, md_state.done_info))
self.exit_event.set()
def read_sensors(self, state: SimulatorState):
while self.vehicle_state_recv.poll(0):
md_vehicle: metadrive_vehicle_state = self.vehicle_state_recv.recv()
curr_pos = md_vehicle.position
state.velocity = md_vehicle.velocity
state.bearing = md_vehicle.bearing
state.steering_angle = md_vehicle.steering_angle
state.gps.from_xy(curr_pos)
state.valid = True
is_engaged = state.is_engaged
if is_engaged and self.first_engage is None:
self.first_engage = time.monotonic()
self.op_engaged.set()
# check moving 5 seconds after engaged, doesn't move right away
after_engaged_check = is_engaged and time.monotonic() - self.first_engage >= 5 and self.test_run
x_dist = abs(curr_pos[0] - self.vehicle_last_pos[0])
y_dist = abs(curr_pos[1] - self.vehicle_last_pos[1])
dist_threshold = 1
if x_dist >= dist_threshold or y_dist >= dist_threshold: # position not the same during staying still, > threshold is considered moving
self.distance_moved += x_dist + y_dist
time_check_threshold = 30
current_time = time.monotonic()
since_last_check = current_time - self.last_check_timestamp
if since_last_check >= time_check_threshold:
if after_engaged_check and self.distance_moved == 0:
self.status_q.put(QueueMessage(QueueMessageType.TERMINATION_INFO, {"vehicle_not_moving" : True}))
self.exit_event.set()
self.last_check_timestamp = current_time
self.distance_moved = 0
self.vehicle_last_pos = curr_pos
def read_cameras(self):
pass
def tick(self):
pass
def reset(self):
self.should_reset = True
def close(self, reason: str):
self.status_q.put(QueueMessage(QueueMessageType.CLOSE_STATUS, reason))
self.exit_event.set()
self.metadrive_process.join()
|
2301_81045437/openpilot
|
tools/sim/bridge/metadrive/metadrive_world.py
|
Python
|
mit
| 5,199
|
#!/bin/bash
export PASSIVE="0"
export NOBOARD="1"
export SIMULATION="1"
export SKIP_FW_QUERY="1"
export FINGERPRINT="HONDA_CIVIC_2022"
export BLOCK="${BLOCK},camerad,loggerd,encoderd,micd,logmessaged"
if [[ "$CI" ]]; then
# TODO: offscreen UI should work
export BLOCK="${BLOCK},ui"
fi
python -c "from openpilot.selfdrive.test.helpers import set_params_enabled; set_params_enabled()"
SCRIPT_DIR=$(dirname "$0")
OPENPILOT_DIR=$SCRIPT_DIR/../../
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd $OPENPILOT_DIR/system/manager && exec ./manager.py
|
2301_81045437/openpilot
|
tools/sim/launch_openpilot.sh
|
Shell
|
mit
| 573
|
import numpy as np
import os
import pyopencl as cl
import pyopencl.array as cl_array
from cereal.visionipc import VisionIpcServer, VisionStreamType
from cereal import messaging
from openpilot.common.basedir import BASEDIR
from openpilot.tools.sim.lib.common import W, H
class Camerad:
"""Simulates the camerad daemon"""
def __init__(self, dual_camera):
self.pm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState'])
self.frame_road_id = 0
self.frame_wide_id = 0
self.vipc_server = VisionIpcServer("camerad")
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5, False, W, H)
if dual_camera:
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H)
self.vipc_server.start_listener()
# set up for pyopencl rgb to yuv conversion
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG "
kernel_fn = os.path.join(BASEDIR, "tools/sim/rgb_to_nv12.cl")
with open(kernel_fn) as f:
prg = cl.Program(self.ctx, f.read()).build(cl_arg)
self.krnl = prg.rgb_to_nv12
self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4
self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4
def cam_send_yuv_road(self, yuv):
self._send_yuv(yuv, self.frame_road_id, 'roadCameraState', VisionStreamType.VISION_STREAM_ROAD)
self.frame_road_id += 1
def cam_send_yuv_wide_road(self, yuv):
self._send_yuv(yuv, self.frame_wide_id, 'wideRoadCameraState', VisionStreamType.VISION_STREAM_WIDE_ROAD)
self.frame_wide_id += 1
# Returns: yuv bytes
def rgb_to_yuv(self, rgb):
assert rgb.shape == (H, W, 3), f"{rgb.shape}"
assert rgb.dtype == np.uint8
rgb_cl = cl_array.to_device(self.queue, rgb)
yuv_cl = cl_array.empty_like(rgb_cl)
self.krnl(self.queue, (self.Wdiv4, self.Hdiv4), None, rgb_cl.data, yuv_cl.data).wait()
yuv = np.resize(yuv_cl.get(), rgb.size // 2)
return yuv.data.tobytes()
def _send_yuv(self, yuv, frame_id, pub_type, yuv_type):
eof = int(frame_id * 0.05 * 1e9)
self.vipc_server.send(yuv_type, yuv, frame_id, eof, eof)
dat = messaging.new_message(pub_type, valid=True)
msg = {
"frameId": frame_id,
"transform": [1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0]
}
setattr(dat, pub_type, msg)
self.pm.send(pub_type, dat)
|
2301_81045437/openpilot
|
tools/sim/lib/camerad.py
|
Python
|
mit
| 2,547
|
import math
import multiprocessing
import numpy as np
from abc import ABC, abstractmethod
from collections import namedtuple
W, H = 1928, 1208
vec3 = namedtuple("vec3", ["x", "y", "z"])
class GPSState:
def __init__(self):
self.latitude = 0
self.longitude = 0
self.altitude = 0
def from_xy(self, xy):
"""Simulates a lat/lon from an xy coordinate on a plane, for simple simlation. TODO: proper global projection?"""
BASE_LAT = 32.75308505188913
BASE_LON = -117.2095393365393
DEG_TO_METERS = 100000
self.latitude = float(BASE_LAT + xy[0] / DEG_TO_METERS)
self.longitude = float(BASE_LON + xy[1] / DEG_TO_METERS)
self.altitude = 0
class IMUState:
def __init__(self):
self.accelerometer: vec3 = vec3(0,0,0)
self.gyroscope: vec3 = vec3(0,0,0)
self.bearing: float = 0
class SimulatorState:
def __init__(self):
self.valid = False
self.is_engaged = False
self.ignition = True
self.velocity: vec3 = None
self.bearing: float = 0
self.gps = GPSState()
self.imu = IMUState()
self.steering_angle: float = 0
self.user_gas: float = 0
self.user_brake: float = 0
self.user_torque: float = 0
self.cruise_button = 0
self.left_blinker = False
self.right_blinker = False
@property
def speed(self):
return math.sqrt(self.velocity.x ** 2 + self.velocity.y ** 2 + self.velocity.z ** 2)
class World(ABC):
def __init__(self, dual_camera):
self.dual_camera = dual_camera
self.image_lock = multiprocessing.Semaphore(value=0)
self.road_image = np.zeros((H, W, 3), dtype=np.uint8)
self.wide_road_image = np.zeros((H, W, 3), dtype=np.uint8)
self.exit_event = multiprocessing.Event()
@abstractmethod
def apply_controls(self, steer_sim, throttle_out, brake_out):
pass
@abstractmethod
def tick(self):
pass
@abstractmethod
def read_state(self):
pass
@abstractmethod
def read_sensors(self, simulator_state: SimulatorState):
pass
@abstractmethod
def read_cameras(self):
pass
@abstractmethod
def close(self, reason: str):
pass
@abstractmethod
def reset(self):
pass
|
2301_81045437/openpilot
|
tools/sim/lib/common.py
|
Python
|
mit
| 2,156
|
import sys
import termios
import time
from multiprocessing import Queue
from termios import (BRKINT, CS8, CSIZE, ECHO, ICANON, ICRNL, IEXTEN, INPCK,
ISTRIP, IXON, PARENB, VMIN, VTIME)
from typing import NoReturn
from openpilot.tools.sim.bridge.common import QueueMessage, control_cmd_gen
# Indexes for termios list.
IFLAG = 0
OFLAG = 1
CFLAG = 2
LFLAG = 3
ISPEED = 4
OSPEED = 5
CC = 6
KEYBOARD_HELP = """
| key | functionality |
|------|-----------------------|
| 1 | Cruise Resume / Accel |
| 2 | Cruise Set / Decel |
| 3 | Cruise Cancel |
| r | Reset Simulation |
| i | Toggle Ignition |
| q | Exit all |
| wasd | Control manually |
"""
def getch() -> str:
STDIN_FD = sys.stdin.fileno()
old_settings = termios.tcgetattr(STDIN_FD)
try:
# set
mode = old_settings.copy()
mode[IFLAG] &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON)
#mode[OFLAG] &= ~(OPOST)
mode[CFLAG] &= ~(CSIZE | PARENB)
mode[CFLAG] |= CS8
mode[LFLAG] &= ~(ECHO | ICANON | IEXTEN)
mode[CC][VMIN] = 1
mode[CC][VTIME] = 0
termios.tcsetattr(STDIN_FD, termios.TCSAFLUSH, mode)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(STDIN_FD, termios.TCSADRAIN, old_settings)
return ch
def print_keyboard_help():
print(f"Keyboard Commands:\n{KEYBOARD_HELP}")
def keyboard_poll_thread(q: 'Queue[QueueMessage]'):
print_keyboard_help()
while True:
c = getch()
if c == '1':
q.put(control_cmd_gen("cruise_up"))
elif c == '2':
q.put(control_cmd_gen("cruise_down"))
elif c == '3':
q.put(control_cmd_gen("cruise_cancel"))
elif c == 'w':
q.put(control_cmd_gen(f"throttle_{1.0}"))
elif c == 'a':
q.put(control_cmd_gen(f"steer_{-0.15}"))
elif c == 's':
q.put(control_cmd_gen(f"brake_{1.0}"))
elif c == 'd':
q.put(control_cmd_gen(f"steer_{0.15}"))
elif c == 'z':
q.put(control_cmd_gen("blinker_left"))
elif c == 'x':
q.put(control_cmd_gen("blinker_right"))
elif c == 'i':
q.put(control_cmd_gen("ignition"))
elif c == 'r':
q.put(control_cmd_gen("reset"))
elif c == 'q':
q.put(control_cmd_gen("quit"))
break
else:
print_keyboard_help()
def test(q: 'Queue[str]') -> NoReturn:
while True:
print([q.get_nowait() for _ in range(q.qsize())] or None)
time.sleep(0.25)
if __name__ == '__main__':
from multiprocessing import Process, Queue
q: 'Queue[QueueMessage]' = Queue()
p = Process(target=test, args=(q,))
p.daemon = True
p.start()
keyboard_poll_thread(q)
|
2301_81045437/openpilot
|
tools/sim/lib/keyboard_ctrl.py
|
Python
|
mit
| 2,639
|
#!/usr/bin/env python3
# set up wheel
import array
import os
import struct
from fcntl import ioctl
from typing import NoReturn
from openpilot.tools.sim.bridge.common import control_cmd_gen
# Iterate over the joystick devices.
print('Available devices:')
for fn in os.listdir('/dev/input'):
if fn.startswith('js'):
print(f' /dev/input/{fn}')
# We'll store the states here.
axis_states: dict[str, float] = {}
button_states: dict[str, float] = {}
# These constants were borrowed from linux/input.h
axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
0x12f : 'dead',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
axis_name_list: list[str] = []
button_name_list: list[str] = []
def wheel_poll_thread(q: 'Queue[str]') -> NoReturn:
# Open the joystick device.
fn = '/dev/input/js0'
print(f'Opening {fn}...')
jsdev = open(fn, 'rb')
# Get the device name.
#buf = bytearray(63)
buf = array.array('B', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
print(f'Device name: {js_name}')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for _axis in buf[:num_axes]:
axis_name = axis_names.get(_axis, f'unknown(0x{_axis:02x})')
axis_name_list.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, f'unknown(0x{btn:03x})')
button_name_list.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_name_list)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_name_list)))
# Enable FF
import evdev
from evdev import ecodes, InputDevice
device = evdev.list_devices()[0]
evtdev = InputDevice(device)
val = 24000
evtdev.write(ecodes.EV_FF, ecodes.FF_AUTOCENTER, val)
while True:
evbuf = jsdev.read(8)
value, mtype, number = struct.unpack('4xhBB', evbuf)
# print(mtype, number, value)
if mtype & 0x02: # wheel & paddles
axis = axis_name_list[number]
if axis == "z": # gas
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = (1 - fvalue) * 50
q.put(control_cmd_gen(f"throttle_{normalized:f}"))
elif axis == "rz": # brake
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = (1 - fvalue) * 50
q.put(control_cmd_gen(f"brake_{normalized:f}"))
elif axis == "x": # steer angle
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = fvalue
q.put(control_cmd_gen(f"steer_{normalized:f}"))
elif mtype & 0x01: # buttons
if value == 1: # press down
if number in [0, 19]: # X
q.put(control_cmd_gen("cruise_down"))
elif number in [3, 18]: # triangle
q.put(control_cmd_gen("cruise_up"))
elif number in [1, 6]: # square
q.put(control_cmd_gen("cruise_cancel"))
elif number in [10, 21]: # R3
q.put(control_cmd_gen("reverse_switch"))
if __name__ == '__main__':
from multiprocessing import Process, Queue
q: 'Queue[str]' = Queue()
p = Process(target=wheel_poll_thread, args=(q,))
p.start()
|
2301_81045437/openpilot
|
tools/sim/lib/manual_ctrl.py
|
Python
|
mit
| 4,726
|
import cereal.messaging as messaging
from opendbc.can.packer import CANPacker
from opendbc.can.parser import CANParser
from openpilot.common.params import Params
from openpilot.selfdrive.boardd.boardd_api_impl import can_list_to_can_capnp
from openpilot.tools.sim.lib.common import SimulatorState
from panda.python import Panda
class SimulatedCar:
"""Simulates a honda civic 2022 (panda state + can messages) to OpenPilot"""
packer = CANPacker("honda_civic_ex_2022_can_generated")
def __init__(self):
self.pm = messaging.PubMaster(['can', 'pandaStates'])
self.sm = messaging.SubMaster(['carControl', 'controlsState', 'carParams'])
self.cp = self.get_car_can_parser()
self.idx = 0
self.params = Params()
self.obd_multiplexing = False
@staticmethod
def get_car_can_parser():
dbc_f = 'honda_civic_ex_2022_can_generated'
checks = [
]
return CANParser(dbc_f, checks, 0)
def send_can_messages(self, simulator_state: SimulatorState):
if not simulator_state.valid:
return
msg = []
# *** powertrain bus ***
speed = simulator_state.speed * 3.6 # convert m/s to kph
msg.append(self.packer.make_can_msg("ENGINE_DATA", 0, {"XMISSION_SPEED": speed}))
msg.append(self.packer.make_can_msg("WHEEL_SPEEDS", 0, {
"WHEEL_SPEED_FL": speed,
"WHEEL_SPEED_FR": speed,
"WHEEL_SPEED_RL": speed,
"WHEEL_SPEED_RR": speed
}))
msg.append(self.packer.make_can_msg("SCM_BUTTONS", 0, {"CRUISE_BUTTONS": simulator_state.cruise_button}))
msg.append(self.packer.make_can_msg("GEARBOX", 0, {"GEAR": 4, "GEAR_SHIFTER": 8}))
msg.append(self.packer.make_can_msg("GAS_PEDAL_2", 0, {}))
msg.append(self.packer.make_can_msg("SEATBELT_STATUS", 0, {"SEATBELT_DRIVER_LATCHED": 1}))
msg.append(self.packer.make_can_msg("STEER_STATUS", 0, {"STEER_TORQUE_SENSOR": simulator_state.user_torque}))
msg.append(self.packer.make_can_msg("STEERING_SENSORS", 0, {"STEER_ANGLE": simulator_state.steering_angle}))
msg.append(self.packer.make_can_msg("VSA_STATUS", 0, {}))
msg.append(self.packer.make_can_msg("STANDSTILL", 0, {"WHEELS_MOVING": 1 if simulator_state.speed >= 1.0 else 0}))
msg.append(self.packer.make_can_msg("STEER_MOTOR_TORQUE", 0, {}))
msg.append(self.packer.make_can_msg("EPB_STATUS", 0, {}))
msg.append(self.packer.make_can_msg("DOORS_STATUS", 0, {}))
msg.append(self.packer.make_can_msg("CRUISE_PARAMS", 0, {}))
msg.append(self.packer.make_can_msg("CRUISE", 0, {}))
msg.append(self.packer.make_can_msg("CRUISE_FAULT_STATUS", 0, {}))
msg.append(self.packer.make_can_msg("SCM_FEEDBACK", 0,
{
"MAIN_ON": 1,
"LEFT_BLINKER": simulator_state.left_blinker,
"RIGHT_BLINKER": simulator_state.right_blinker
}))
msg.append(self.packer.make_can_msg("POWERTRAIN_DATA", 0,
{
"ACC_STATUS": int(simulator_state.is_engaged),
"PEDAL_GAS": simulator_state.user_gas,
"BRAKE_PRESSED": simulator_state.user_brake > 0
}))
msg.append(self.packer.make_can_msg("CAR_SPEED", 0, {}))
# *** cam bus ***
msg.append(self.packer.make_can_msg("STEERING_CONTROL", 2, {}))
msg.append(self.packer.make_can_msg("ACC_HUD", 2, {}))
msg.append(self.packer.make_can_msg("LKAS_HUD", 2, {}))
self.pm.send('can', can_list_to_can_capnp(msg))
def send_panda_state(self, simulator_state):
self.sm.update(0)
if self.params.get_bool("ObdMultiplexingEnabled") != self.obd_multiplexing:
self.obd_multiplexing = not self.obd_multiplexing
self.params.put_bool("ObdMultiplexingChanged", True)
dat = messaging.new_message('pandaStates', 1)
dat.valid = True
dat.pandaStates[0] = {
'ignitionLine': simulator_state.ignition,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaBosch',
'alternativeExperience': self.sm["carParams"].alternativeExperience,
'safetyParam': Panda.FLAG_HONDA_RADARLESS | Panda.FLAG_HONDA_BOSCH_LONG,
}
self.pm.send('pandaStates', dat)
def update(self, simulator_state: SimulatorState):
self.send_can_messages(simulator_state)
if self.idx % 50 == 0: # only send panda states at 2hz
self.send_panda_state(simulator_state)
self.idx += 1
|
2301_81045437/openpilot
|
tools/sim/lib/simulated_car.py
|
Python
|
mit
| 4,566
|
import time
from cereal import log
import cereal.messaging as messaging
from openpilot.common.realtime import DT_DMON
from openpilot.tools.sim.lib.camerad import Camerad
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from openpilot.tools.sim.lib.common import World, SimulatorState
class SimulatedSensors:
"""Simulates the C3 sensors (acc, gyro, gps, peripherals, dm state, cameras) to OpenPilot"""
def __init__(self, dual_camera=False):
self.pm = messaging.PubMaster(['accelerometer', 'gyroscope', 'gpsLocationExternal', 'driverStateV2', 'driverMonitoringState', 'peripheralState'])
self.camerad = Camerad(dual_camera=dual_camera)
self.last_perp_update = 0
self.last_dmon_update = 0
def send_imu_message(self, simulator_state: 'SimulatorState'):
for _ in range(5):
dat = messaging.new_message('accelerometer', valid=True)
dat.accelerometer.sensor = 4
dat.accelerometer.type = 0x10
dat.accelerometer.timestamp = dat.logMonoTime # TODO: use the IMU timestamp
dat.accelerometer.init('acceleration')
dat.accelerometer.acceleration.v = [simulator_state.imu.accelerometer.x, simulator_state.imu.accelerometer.y, simulator_state.imu.accelerometer.z]
self.pm.send('accelerometer', dat)
# copied these numbers from locationd
dat = messaging.new_message('gyroscope', valid=True)
dat.gyroscope.sensor = 5
dat.gyroscope.type = 0x10
dat.gyroscope.timestamp = dat.logMonoTime # TODO: use the IMU timestamp
dat.gyroscope.init('gyroUncalibrated')
dat.gyroscope.gyroUncalibrated.v = [simulator_state.imu.gyroscope.x, simulator_state.imu.gyroscope.y, simulator_state.imu.gyroscope.z]
self.pm.send('gyroscope', dat)
def send_gps_message(self, simulator_state: 'SimulatorState'):
if not simulator_state.valid:
return
# transform from vel to NED
velNED = [
-simulator_state.velocity.y,
simulator_state.velocity.x,
simulator_state.velocity.z,
]
for _ in range(10):
dat = messaging.new_message('gpsLocationExternal', valid=True)
dat.gpsLocationExternal = {
"unixTimestampMillis": int(time.time() * 1000),
"flags": 1, # valid fix
"horizontalAccuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": simulator_state.imu.bearing,
"latitude": simulator_state.gps.latitude,
"longitude": simulator_state.gps.longitude,
"altitude": simulator_state.gps.altitude,
"speed": simulator_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
self.pm.send('gpsLocationExternal', dat)
def send_peripheral_state(self):
dat = messaging.new_message('peripheralState')
dat.valid = True
dat.peripheralState = {
'pandaType': log.PandaState.PandaType.blackPanda,
'voltage': 12000,
'current': 5678,
'fanSpeedRpm': 1000
}
self.pm.send('peripheralState', dat)
def send_fake_driver_monitoring(self):
# dmonitoringmodeld output
dat = messaging.new_message('driverStateV2')
dat.driverStateV2.leftDriverData.faceOrientation = [0., 0., 0.]
dat.driverStateV2.leftDriverData.faceProb = 1.0
dat.driverStateV2.rightDriverData.faceOrientation = [0., 0., 0.]
dat.driverStateV2.rightDriverData.faceProb = 1.0
self.pm.send('driverStateV2', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState', valid=True)
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
self.pm.send('driverMonitoringState', dat)
def send_camera_images(self, world: 'World'):
world.image_lock.acquire()
yuv = self.camerad.rgb_to_yuv(world.road_image)
self.camerad.cam_send_yuv_road(yuv)
if world.dual_camera:
yuv = self.camerad.rgb_to_yuv(world.wide_road_image)
self.camerad.cam_send_yuv_wide_road(yuv)
def update(self, simulator_state: 'SimulatorState', world: 'World'):
now = time.time()
self.send_imu_message(simulator_state)
self.send_gps_message(simulator_state)
if (now - self.last_dmon_update) > DT_DMON/2:
self.send_fake_driver_monitoring()
self.last_dmon_update = now
if (now - self.last_perp_update) > 0.25:
self.send_peripheral_state()
self.last_perp_update = now
|
2301_81045437/openpilot
|
tools/sim/lib/simulated_sensors.py
|
Python
|
mit
| 4,441
|
#define RGB_TO_Y(r, g, b) ((((mul24(b, 13) + mul24(g, 65) + mul24(r, 33)) + 64) >> 7) + 16)
#define RGB_TO_U(r, g, b) ((mul24(b, 56) - mul24(g, 37) - mul24(r, 19) + 0x8080) >> 8)
#define RGB_TO_V(r, g, b) ((mul24(r, 56) - mul24(g, 47) - mul24(b, 9) + 0x8080) >> 8)
#define AVERAGE(x, y, z, w) ((convert_ushort(x) + convert_ushort(y) + convert_ushort(z) + convert_ushort(w) + 1) >> 1)
inline void convert_2_ys(__global uchar * out_yuv, int yi, const uchar8 rgbs1) {
uchar2 yy = (uchar2)(
RGB_TO_Y(rgbs1.s2, rgbs1.s1, rgbs1.s0),
RGB_TO_Y(rgbs1.s5, rgbs1.s4, rgbs1.s3)
);
#ifdef CL_DEBUG
if(yi >= RGB_SIZE)
printf("Y vector2 overflow, %d > %d\n", yi, RGB_SIZE);
#endif
vstore2(yy, 0, out_yuv + yi);
}
inline void convert_4_ys(__global uchar * out_yuv, int yi, const uchar8 rgbs1, const uchar8 rgbs3) {
const uchar4 yy = (uchar4)(
RGB_TO_Y(rgbs1.s2, rgbs1.s1, rgbs1.s0),
RGB_TO_Y(rgbs1.s5, rgbs1.s4, rgbs1.s3),
RGB_TO_Y(rgbs3.s0, rgbs1.s7, rgbs1.s6),
RGB_TO_Y(rgbs3.s3, rgbs3.s2, rgbs3.s1)
);
#ifdef CL_DEBUG
if(yi > RGB_SIZE - 4)
printf("Y vector4 overflow, %d > %d\n", yi, RGB_SIZE - 4);
#endif
vstore4(yy, 0, out_yuv + yi);
}
inline void convert_uv(__global uchar * out_yuv, int uvi,
const uchar8 rgbs1, const uchar8 rgbs2) {
// U & V: average of 2x2 pixels square
const short ab = AVERAGE(rgbs1.s0, rgbs1.s3, rgbs2.s0, rgbs2.s3);
const short ag = AVERAGE(rgbs1.s1, rgbs1.s4, rgbs2.s1, rgbs2.s4);
const short ar = AVERAGE(rgbs1.s2, rgbs1.s5, rgbs2.s2, rgbs2.s5);
#ifdef CL_DEBUG
if(uvi >= RGB_SIZE + RGB_SIZE / 2)
printf("UV overflow, %d >= %d\n", uvi, RGB_SIZE + RGB_SIZE / 2);
#endif
out_yuv[uvi] = RGB_TO_U(ar, ag, ab);
out_yuv[uvi+1] = RGB_TO_V(ar, ag, ab);
}
inline void convert_2_uvs(__global uchar * out_yuv, int uvi,
const uchar8 rgbs1, const uchar8 rgbs2, const uchar8 rgbs3, const uchar8 rgbs4) {
// U & V: average of 2x2 pixels square
const short ab1 = AVERAGE(rgbs1.s0, rgbs1.s3, rgbs2.s0, rgbs2.s3);
const short ag1 = AVERAGE(rgbs1.s1, rgbs1.s4, rgbs2.s1, rgbs2.s4);
const short ar1 = AVERAGE(rgbs1.s2, rgbs1.s5, rgbs2.s2, rgbs2.s5);
const short ab2 = AVERAGE(rgbs1.s6, rgbs3.s1, rgbs2.s6, rgbs4.s1);
const short ag2 = AVERAGE(rgbs1.s7, rgbs3.s2, rgbs2.s7, rgbs4.s2);
const short ar2 = AVERAGE(rgbs3.s0, rgbs3.s3, rgbs4.s0, rgbs4.s3);
uchar4 uv = (uchar4)(
RGB_TO_U(ar1, ag1, ab1),
RGB_TO_V(ar1, ag1, ab1),
RGB_TO_U(ar2, ag2, ab2),
RGB_TO_V(ar2, ag2, ab2)
);
#ifdef CL_DEBUG1
if(uvi > RGB_SIZE + RGB_SIZE / 2 - 4)
printf("UV2 overflow, %d >= %d\n", uvi, RGB_SIZE + RGB_SIZE / 2 - 2);
#endif
vstore4(uv, 0, out_yuv + uvi);
}
__kernel void rgb_to_nv12(__global uchar const * const rgb,
__global uchar * out_yuv)
{
const int dx = get_global_id(0);
const int dy = get_global_id(1);
const int col = mul24(dx, 4); // Current column in rgb image
const int row = mul24(dy, 4); // Current row in rgb image
const int bgri_start = mad24(row, RGB_STRIDE, mul24(col, 3)); // Start offset of rgb data being converted
const int yi_start = mad24(row, WIDTH, col); // Start offset in the target yuv buffer
int uvi = mad24(row / 2, WIDTH, RGB_SIZE + col);
int num_col = min(WIDTH - col, 4);
int num_row = min(HEIGHT - row, 4);
if(num_row == 4) {
const uchar8 rgbs0_0 = vload8(0, rgb + bgri_start);
const uchar8 rgbs0_1 = vload8(0, rgb + bgri_start + 8);
const uchar8 rgbs1_0 = vload8(0, rgb + bgri_start + RGB_STRIDE);
const uchar8 rgbs1_1 = vload8(0, rgb + bgri_start + RGB_STRIDE + 8);
const uchar8 rgbs2_0 = vload8(0, rgb + bgri_start + RGB_STRIDE * 2);
const uchar8 rgbs2_1 = vload8(0, rgb + bgri_start + RGB_STRIDE * 2 + 8);
const uchar8 rgbs3_0 = vload8(0, rgb + bgri_start + RGB_STRIDE * 3);
const uchar8 rgbs3_1 = vload8(0, rgb + bgri_start + RGB_STRIDE * 3 + 8);
if(num_col == 4) {
convert_4_ys(out_yuv, yi_start, rgbs0_0, rgbs0_1);
convert_4_ys(out_yuv, yi_start + WIDTH, rgbs1_0, rgbs1_1);
convert_4_ys(out_yuv, yi_start + WIDTH * 2, rgbs2_0, rgbs2_1);
convert_4_ys(out_yuv, yi_start + WIDTH * 3, rgbs3_0, rgbs3_1);
convert_2_uvs(out_yuv, uvi, rgbs0_0, rgbs1_0, rgbs0_1, rgbs1_1);
convert_2_uvs(out_yuv, uvi + WIDTH, rgbs2_0, rgbs3_0, rgbs2_1, rgbs3_1);
} else if(num_col == 2) {
convert_2_ys(out_yuv, yi_start, rgbs0_0);
convert_2_ys(out_yuv, yi_start + WIDTH, rgbs1_0);
convert_2_ys(out_yuv, yi_start + WIDTH * 2, rgbs2_0);
convert_2_ys(out_yuv, yi_start + WIDTH * 3, rgbs3_0);
convert_uv(out_yuv, uvi, rgbs0_0, rgbs1_0);
convert_uv(out_yuv, uvi + WIDTH, rgbs2_0, rgbs3_0);
}
} else {
const uchar8 rgbs0_0 = vload8(0, rgb + bgri_start);
const uchar8 rgbs0_1 = vload8(0, rgb + bgri_start + 8);
const uchar8 rgbs1_0 = vload8(0, rgb + bgri_start + RGB_STRIDE);
const uchar8 rgbs1_1 = vload8(0, rgb + bgri_start + RGB_STRIDE + 8);
if(num_col == 4) {
convert_4_ys(out_yuv, yi_start, rgbs0_0, rgbs0_1);
convert_4_ys(out_yuv, yi_start + WIDTH, rgbs1_0, rgbs1_1);
convert_2_uvs(out_yuv, uvi, rgbs0_0, rgbs1_0, rgbs0_1, rgbs1_1);
} else if(num_col == 2) {
convert_2_ys(out_yuv, yi_start, rgbs0_0);
convert_2_ys(out_yuv, yi_start + WIDTH, rgbs1_0);
convert_uv(out_yuv, uvi, rgbs0_0, rgbs1_0);
}
}
}
|
2301_81045437/openpilot
|
tools/sim/rgb_to_nv12.cl
|
OpenCL
|
mit
| 5,378
|
#!/usr/bin/env python
import argparse
from typing import Any
from multiprocessing import Queue
from openpilot.tools.sim.bridge.metadrive.metadrive_bridge import MetaDriveBridge
def create_bridge(dual_camera, high_quality):
queue: Any = Queue()
simulator_bridge = MetaDriveBridge(dual_camera, high_quality)
simulator_process = simulator_bridge.run(queue)
return queue, simulator_process, simulator_bridge
def main():
_, simulator_process, _ = create_bridge(True, False)
simulator_process.join()
def parse_args(add_args=None):
parser = argparse.ArgumentParser(description='Bridge between the simulator and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--high_quality', action='store_true')
parser.add_argument('--dual_camera', action='store_true')
return parser.parse_args(add_args)
if __name__ == "__main__":
args = parse_args()
queue, simulator_process, simulator_bridge = create_bridge(args.dual_camera, args.high_quality)
if args.joystick:
# start input poll for joystick
from openpilot.tools.sim.lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(queue)
else:
# start input poll for keyboard
from openpilot.tools.sim.lib.keyboard_ctrl import keyboard_poll_thread
keyboard_poll_thread(queue)
simulator_bridge.shutdown()
simulator_process.join()
|
2301_81045437/openpilot
|
tools/sim/run_bridge.py
|
Python
|
mit
| 1,371
|
#!/bin/bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)"
cd $DIR
OPENPILOT_DIR="/tmp/openpilot"
if ! [[ -z "$MOUNT_OPENPILOT" ]]; then
OPENPILOT_DIR="$(dirname $(dirname $DIR))"
EXTRA_ARGS="-v $OPENPILOT_DIR:$OPENPILOT_DIR -e PYTHONPATH=$OPENPILOT_DIR:$PYTHONPATH"
fi
if [[ "$CI" ]]; then
CMD="CI=1 ${OPENPILOT_DIR}/tools/sim/tests/test_metadrive_integration.py"
else
# expose X to the container
xhost +local:root
docker pull ghcr.io/commaai/openpilot-sim:latest
CMD="./tmux_script.sh $*"
EXTRA_ARGS="${EXTRA_ARGS} -it"
fi
docker kill openpilot_client || true
docker run --net=host\
--name openpilot_client \
--rm \
--gpus all \
--device=/dev/dri:/dev/dri \
--device=/dev/input:/dev/input \
-v /tmp/.X11-unix:/tmp/.X11-unix \
--shm-size 1G \
-e DISPLAY=$DISPLAY \
-e QT_X11_NO_MITSHM=1 \
-w "$OPENPILOT_DIR/tools/sim" \
$EXTRA_ARGS \
ghcr.io/commaai/openpilot-sim:latest \
/bin/bash -c "$CMD"
|
2301_81045437/openpilot
|
tools/sim/start_openpilot_docker.sh
|
Shell
|
mit
| 958
|
import pytest
def pytest_addoption(parser):
parser.addoption("--test_duration", action="store", default=60, type=int, help="Seconds to run metadrive drive")
@pytest.fixture
def test_duration(request):
return request.config.getoption("--test_duration")
|
2301_81045437/openpilot
|
tools/sim/tests/conftest.py
|
Python
|
mit
| 258
|
#!/usr/bin/env python3
# type: ignore
import os
import time
import argparse
import signal
from collections import defaultdict
import cereal.messaging as messaging
from openpilot.tools.lib.logreader import LogReader
def sigint_handler(signal, frame):
exit(0)
signal.signal(signal.SIGINT, sigint_handler)
class SteeringAccuracyTool:
all_groups = {"germany": (45, "45 - up m/s // 162 - up km/h // 101 - up mph"),
"veryfast": (35, "35 - 45 m/s // 126 - 162 km/h // 78 - 101 mph"),
"fast": (25, "25 - 35 m/s // 90 - 126 km/h // 56 - 78 mph"),
"medium": (15, "15 - 25 m/s // 54 - 90 km/h // 34 - 56 mph"),
"slow": (5, " 5 - 15 m/s // 18 - 54 km/h // 11 - 34 mph"),
"crawl": (0, " 0 - 5 m/s // 0 - 18 km/h // 0 - 11 mph")}
def __init__(self, args):
self.msg_cnt = 0
self.cnt = 0
self.total_error = 0
if args.group == "all":
self.display_groups = self.all_groups.keys()
elif args.group in self.all_groups.keys():
self.display_groups = [args.group]
else:
raise ValueError("invalid speed group, see help")
self.speed_group_stats = {}
for group in self.all_groups:
self.speed_group_stats[group] = defaultdict(lambda: {'err': 0, "cnt": 0, "=": 0, "+": 0, "-": 0, "steer": 0, "limited": 0, "saturated": 0, "dpp": 0})
def update(self, sm):
self.msg_cnt += 1
lateralControlState = sm['controlsState'].lateralControlState
control_type = list(lateralControlState.to_dict().keys())[0]
control_state = lateralControlState.__getattr__(control_type)
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
steer = sm['carOutput'].actuatorsOutput.steer
standstill = sm['carState'].standstill
steer_limited = abs(sm['carControl'].actuators.steer - sm['carControl'].actuatorsOutput.steer) > 1e-2
overriding = sm['carState'].steeringPressed
changing_lanes = sm['modelV2'].meta.laneChangeState != 0
model_points = sm['modelV2'].position.y
# must be engaged, not at standstill, not overriding steering, and not changing lanes
if active and not standstill and not overriding and not changing_lanes:
self.cnt += 1
# wait 5 seconds after engage / standstill / override / lane change
if self.cnt >= 500:
actual_angle = control_state.steeringAngleDeg
desired_angle = control_state.steeringAngleDesiredDeg
# calculate error before rounding, then round for stats grouping
angle_error = abs(desired_angle - actual_angle)
actual_angle = round(actual_angle, 1)
desired_angle = round(desired_angle, 1)
angle_error = round(angle_error, 2)
angle_abs = int(abs(round(desired_angle, 0)))
for group, group_props in self.all_groups.items():
if v_ego > group_props[0]:
# collect stats
self.speed_group_stats[group][angle_abs]["cnt"] += 1
self.speed_group_stats[group][angle_abs]["err"] += angle_error
self.speed_group_stats[group][angle_abs]["steer"] += abs(steer)
if len(model_points):
self.speed_group_stats[group][angle_abs]["dpp"] += abs(model_points[0])
if steer_limited:
self.speed_group_stats[group][angle_abs]["limited"] += 1
if control_state.saturated:
self.speed_group_stats[group][angle_abs]["saturated"] += 1
if actual_angle == desired_angle:
self.speed_group_stats[group][angle_abs]["="] += 1
else:
if desired_angle == 0.:
overshoot = True
else:
overshoot = desired_angle < actual_angle if desired_angle > 0. else desired_angle > actual_angle
self.speed_group_stats[group][angle_abs]["+" if overshoot else "-"] += 1
break
else:
self.cnt = 0
if self.msg_cnt % 100 == 0:
print(chr(27) + "[2J")
if self.cnt != 0:
print("COLLECTING ...\n")
else:
print("DISABLED (not active, standstill, steering override, or lane change)\n")
for group in self.display_groups:
if len(self.speed_group_stats[group]) > 0:
print(f"speed group: {group:10s} {self.all_groups[group][1]:>96s}")
print(f" {'-'*118}")
for k in sorted(self.speed_group_stats[group].keys()):
v = self.speed_group_stats[group][k]
print(f' {k:#2}° | actuator:{int(v["steer"] / v["cnt"] * 100):#3}% ' +
f'| error: {round(v["err"] / v["cnt"], 2):2.2f}° | -:{int(v["-"] / v["cnt"] * 100):#3}% ' +
f'| =:{int(v["="] / v["cnt"] * 100):#3}% | +:{int(v["+"] / v["cnt"] * 100):#3}% | lim:{v["limited"]:#5} ' +
f'| sat:{v["saturated"]:#5} | path dev: {round(v["dpp"] / v["cnt"], 2):2.2f}m | total: {v["cnt"]:#5}')
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Steering accuracy measurement tool')
parser.add_argument('--route', help="route name")
parser.add_argument('--addr', default='127.0.0.1', help="IP address for optional ZMQ listener, default to msgq")
parser.add_argument('--group', default='all', help="speed group to display, [crawl|slow|medium|fast|veryfast|germany|all], default to all")
parser.add_argument('--cache', default=False, action='store_true', help="use cached data, default to False")
args = parser.parse_args()
if args.cache:
os.environ['FILEREADER_CACHE'] = '1'
tool = SteeringAccuracyTool(args)
if args.route is not None:
print(f"loading {args.route}...")
lr = LogReader(args.route, sort_by_time=True)
sm = {}
for msg in lr:
if msg.which() == 'carState':
sm['carState'] = msg.carState
elif msg.which() == 'carControl':
sm['carControl'] = msg.carControl
elif msg.which() == 'controlsState':
sm['controlsState'] = msg.controlsState
elif msg.which() == 'modelV2':
sm['modelV2'] = msg.modelV2
if msg.which() == 'carControl' and 'carState' in sm and 'controlsState' in sm and 'modelV2' in sm:
tool.update(sm)
else:
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
carControl = messaging.sub_sock('carControl', addr=args.addr, conflate=True)
sm = messaging.SubMaster(['carState', 'carControl', 'carOutput', 'controlsState', 'modelV2'], addr=args.addr)
time.sleep(1) # Make sure all submaster data is available before going further
print("waiting for messages...")
while messaging.recv_one(carControl):
sm.update()
tool.update(sm)
|
2301_81045437/openpilot
|
tools/tuning/measure_steering_accuracy.py
|
Python
|
mit
| 6,706
|
#!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
# NOTE: this is used in a docker build, so do not run any scripts here.
$DIR/install_ubuntu_dependencies.sh
$DIR/install_python_dependencies.sh
echo
echo "---- OPENPILOT SETUP DONE ----"
echo "Open a new shell or configure your active shell env by running:"
echo "source ~/.bashrc"
|
2301_81045437/openpilot
|
tools/ubuntu_setup.sh
|
Shell
|
mit
| 385
|
import cv2 as cv
import numpy as np
class Camera:
def __init__(self, cam_type_state, stream_type, camera_id):
try:
camera_id = int(camera_id)
except ValueError: # allow strings, ex: /dev/video0
pass
self.cam_type_state = cam_type_state
self.stream_type = stream_type
self.cur_frame_id = 0
self.cap = cv.VideoCapture(camera_id)
self.W = self.cap.get(cv.CAP_PROP_FRAME_WIDTH)
self.H = self.cap.get(cv.CAP_PROP_FRAME_HEIGHT)
@classmethod
def bgr2nv12(self, bgr):
yuv = cv.cvtColor(bgr, cv.COLOR_BGR2YUV_I420)
uv_row_cnt = yuv.shape[0] // 3
uv_plane = np.transpose(yuv[uv_row_cnt * 2:].reshape(2, -1), [1, 0])
yuv[uv_row_cnt * 2:] = uv_plane.reshape(uv_row_cnt, -1)
return yuv
def read_frames(self):
while True:
sts , frame = self.cap.read()
if not sts:
break
yuv = Camera.bgr2nv12(frame)
yield yuv.data.tobytes()
self.cap.release()
|
2301_81045437/openpilot
|
tools/webcam/camera.py
|
Python
|
mit
| 945
|
#!/usr/bin/env python3
import threading
import os
from collections import namedtuple
from cereal.visionipc import VisionIpcServer, VisionStreamType
from cereal import messaging
from openpilot.tools.webcam.camera import Camera
from openpilot.common.realtime import Ratekeeper
DUAL_CAM = os.getenv("DUAL_CAMERA")
CameraType = namedtuple("CameraType", ["msg_name", "stream_type", "cam_id"])
CAMERAS = [
CameraType("roadCameraState", VisionStreamType.VISION_STREAM_ROAD, os.getenv("CAMERA_ROAD_ID", "0")),
CameraType("driverCameraState", VisionStreamType.VISION_STREAM_DRIVER, os.getenv("CAMERA_DRIVER_ID", "1")),
]
if DUAL_CAM:
CAMERAS.append(CameraType("wideRoadCameraState", VisionStreamType.VISION_STREAM_WIDE_ROAD, DUAL_CAM))
class Camerad:
def __init__(self):
self.pm = messaging.PubMaster([c.msg_name for c in CAMERAS])
self.vipc_server = VisionIpcServer("camerad")
self.cameras = []
for c in CAMERAS:
cam = Camera(c.msg_name, c.stream_type, c.cam_id)
assert cam.cap.isOpened(), f"Can't find camera {c}"
self.cameras.append(cam)
self.vipc_server.create_buffers(c.stream_type, 20, False, cam.W, cam.H)
self.vipc_server.start_listener()
def _send_yuv(self, yuv, frame_id, pub_type, yuv_type):
eof = int(frame_id * 0.05 * 1e9)
self.vipc_server.send(yuv_type, yuv, frame_id, eof, eof)
dat = messaging.new_message(pub_type, valid=True)
msg = {
"frameId": frame_id,
"transform": [1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0]
}
setattr(dat, pub_type, msg)
self.pm.send(pub_type, dat)
def camera_runner(self, cam):
rk = Ratekeeper(20, None)
while cam.cap.isOpened():
for yuv in cam.read_frames():
self._send_yuv(yuv, cam.cur_frame_id, cam.cam_type_state, cam.stream_type)
cam.cur_frame_id += 1
rk.keep_time()
def run(self):
threads = []
for cam in self.cameras:
cam_thread = threading.Thread(target=self.camera_runner, args=(cam,))
cam_thread.start()
threads.append(cam_thread)
for t in threads:
t.join()
def main():
camerad = Camerad()
camerad.run()
if __name__ == "__main__":
main()
|
2301_81045437/openpilot
|
tools/webcam/camerad.py
|
Python
|
mit
| 2,212
|
#!/bin/bash
# export the block below when call manager.py
export BLOCK="${BLOCK},camerad"
export USE_WEBCAM="1"
# Change camera index according to your setting
export CAMERA_ROAD_ID="0"
export CAMERA_DRIVER_ID="1"
export DUAL_CAMERA="2" # camera index for wide road camera
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
$DIR/camerad.py
|
2301_81045437/openpilot
|
tools/webcam/start_camerad.sh
|
Shell
|
mit
| 360
|
<!DOCTYPE html>
<html lang="ch">
<head>
<meta charset="UTF-8">
<title>IKUN Studio</title>
<style>
*{
box-sizing: border-box;
}
/*无法解释*/
sty{
}
body{
margin: 0;
font-family: Arial;
padding: 10px;
background: #f1f1f1;
}
.header{
background-color: #F1F1F1;
text-align: center;
padding: 30px;
}
.topnav{
overflow: hidden;
background-color: #333;
}
.topnav a{
float: left;
display: block;
color: #f2f2f2;
text-align: center;
padding: 14px 16px;
text-decoration: none;
transition-duration: 0.5s;
}
.artbottom{
display: inline-block;
border-radius: 4px;
background-color: #333;
border: none;
color: #FFFFFF;
text-align: center;
font-size: 28px;
padding: 20px;
width: 250px;
transition: all 0.5s;
cursor: pointer;
margin: 5px;
}
.artbottom span{
cursor: pointer;
display: inline-block;
position: relative;
transition: 0.5s;
}
.artbottom span:after{
content: '»';
position: absolute;
opacity: 0;
top: 0;
right: -20px;
transition: 0.5s;
}
.artbottom:hover span{
padding-right: 25px;
}
.artbottom:hover span:after{
opacity: 1;
right: 0;
}
.topnav a:hover{
background-color: #ddd;
color: black;
font-size: 20px;
}
.leftcolumn{
float: left;
width: 75%;
}
.rightcolumn{
float: left;
width: 25%;
background-color: #f1f1f1;
padding-left: 20px;
}
.row:after{
content: "";
display: table;
clear: both;
}
.card{
background-color: white;
padding: 20px;
margin-top: 20px;
border-radius: 12px;
}
.footer{
background-color: #F1F1F1;
text-align: centre;
padding: 20px;
margin-top: 20px;
}
@media screen and (max-width: 1300px) {
.leftcolumn, .rightcolumn {
width: 100%;
padding: 0;
}
}
@media screen and (max-width: 400px) {
.topnav a {
float: none;
width: 100%;
}
}
</style>
</head>
<body>
<div class="header">
<h1>IKUN Studio<sub style="font-size: 15px;">爱坤工作室</sub></h1>
<p>主页</p>
</div>
<div class="topnav">
<a href="main.html">● <sty style="color: chartreuse;">主页</sty></a>
<a href="#">文章</a>
<a href="#">工具</a>
</div>
<div class="row">
<div class="leftcolumn">
<div class="card">
<h2>css基础</h2>
<h5>2025年6月1日</h5>
<p>毕</p>
<p>毕</p>
</div>
<div class="card">
<h2>css与html布局</h2>
<h5>2025年6月1日</h5>
<p>毕</p>
</div>
</div>
<div class="rightcolumn">
<div class="card">
<h2>关于我</h2>
<p>
我是爱坤
</p>
</div>
<div class="card">
<h3>热门文章</h3>
<div class="artbottom"><span>文章1</span></div>
<div class="artbottom"><span>文章2</span></div>
<div class="artbottom"><span>文章3</span></div>
</div>
<div class="card">
<h3>联系我</h3>
<p>
QQ:3825157084
<br>
Email:yinchenen0415@outlook.com
</p>
</div>
</div>
</div>
<div class="footer">
IKUN Studio : yinchenen & snaped © 版权所有
</div>
</body>
</html>
|
2301_81414198/IKUN
|
main.html
|
HTML
|
unknown
| 5,308
|
import streamlit as st
import pandas as pd
# 设置页面标题
st.set_page_config(page_title="公司信息查询系统", layout="wide")
st.title("公司信息查询系统")
# 读取Excel文件
@st.cache_data
def load_data():
df = pd.read_excel("output.xlsx")
return df
# 加载数据
df = load_data()
# 创建搜索框
search_term = st.text_input("请输入公司简称进行搜索:")
# 搜索功能
if search_term:
# 使用公司简称进行模糊匹配
result = df[df["公司简称"].str.contains(search_term, case=False, na=False)]
if len(result) > 0:
st.success(f"找到 {len(result)} 条匹配记录")
# 显示查询结果
st.dataframe(result, use_container_width=True)
else:
st.warning("未找到匹配的记录")
# 显示数据统计信息
st.subheader("数据统计")
st.write(f"总记录数:{len(df)}")
|
2301_81200836/classmateCode
|
app.py
|
Python
|
unknown
| 884
|
package com.example.a20231015;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.a20231015", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/09/20231015/app/src/androidTest/java/com/example/a20231015/ExampleInstrumentedTest.java
|
Java
|
unknown
| 756
|
package com.example.a20231015;
import androidx.appcompat.app.AppCompatActivity;
import android.os.Bundle;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
}
|
2301_77966824/learning-android-studio
|
2024/09/20231015/app/src/main/java/com/example/a20231015/MainActivity.java
|
Java
|
unknown
| 334
|
package com.example.themyno1;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.themyno1", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/09/no1/app/src/androidTest/java/com/example/themyno1/ExampleInstrumentedTest.java
|
Java
|
unknown
| 754
|
package com.example.themyno1;
import androidx.appcompat.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.Toast;
public class MainActivity extends AppCompatActivity {
private Button btn1;
private ImageView img_icon;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_login);
//2.绑定控件
btn1 = findViewById(R.id.btn_login);
img_icon = findViewById(R.id.img_icon);
//设置监听事件
btn1.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
//实现功能代码
Toast.makeText(MainActivity.this, "登陆成功", Toast.LENGTH_SHORT).show();
img_icon.setImageResource(R.drawable.head);
}
});
}
}
|
2301_77966824/learning-android-studio
|
2024/09/no1/app/src/main/java/com/example/themyno1/MainActivity.java
|
Java
|
unknown
| 1,001
|
package com.example.themyno1;
import androidx.appcompat.app.AppCompatActivity;
import android.annotation.SuppressLint;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.Toast;
public class login extends AppCompatActivity {
// 声明控件
private Button loginButton;
private ImageView iconImageView;
// 声明输入框控件
private EditText usernameEditText;
private EditText passwordEditText;
@SuppressLint("MissingInflatedId")
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_login);
// 绑定控件
loginButton = findViewById(R.id.btn_login);
iconImageView = findViewById(R.id.img_icon);
usernameEditText = findViewById(R.id.Edit_name);
passwordEditText = findViewById(R.id.Edit_password);
// 设置监听事件
loginButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String username = usernameEditText.getText().toString();
String password = passwordEditText.getText().toString();
if (!isValidUsername(username)) {
Toast.makeText(login.this, "请输入正确的用户名!!!", Toast.LENGTH_SHORT).show();
} else if (!isValidPassword(password)) {
Toast.makeText(login.this, "请输入正确的密码!!!", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(login.this, "恭喜" + username + "登录成功了!" + "你的密码是:" + password, Toast.LENGTH_SHORT).show();
}
}
});
}
private boolean isValidUsername(String username) {
return "test".equals(username);
}
private boolean isValidPassword(String password) {
return "123456".equals(password);
}
}
|
2301_77966824/learning-android-studio
|
2024/09/no1/app/src/main/java/com/example/themyno1/login.java
|
Java
|
unknown
| 2,074
|
package com.example.a10_30_01;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.a10_30_01", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/10/10_30_01/app/src/androidTest/java/com/example/a10_30_01/ExampleInstrumentedTest.java
|
Java
|
unknown
| 756
|
package com.example.a10_30_01;
import androidx.appcompat.app.AppCompatActivity;
import android.os.Bundle;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
}
|
2301_77966824/learning-android-studio
|
2024/10/10_30_01/app/src/main/java/com/example/a10_30_01/MainActivity.java
|
Java
|
unknown
| 334
|
package com.example.a10_30_01.;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.Toast;
public class MainActivity extends AppCompatActivity {
//1.声明控件
private Button btn1;
// private 私有变量
//public 公有变量
//protected 受保护的
private ImageView img_icon;
//声明输入框控件
private EditText Edit_name;
private EditText Edit_password;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_login);
//2.绑定控件
btn1 = findViewById(R.id.btn_login);
img_icon = findViewById(R.id.img_icon);
Edit_name = findViewById(R.id.Edit_name);
Edit_password = findViewById(R.id.Edit_password);
//设置监听事件
btn1.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
//实现功能的代码
//Toast.makeText(MainActivity.this, "恭喜你,登陆成功!", Toast.LENGTH_SHORT).show();
img_icon.setImageResource(R.drawable.cancer);
//定义一个字符串变量,用来接收用户输入的姓名内容
String name = Edit_name.getText().toString();//getText方法是获取输入内容 toString方法是转换数据类型为字符串
String password = Edit_password.getText().toString();//getText方法是获取输入内容 toString方法是转换数据类型为字符串
// if (name.equals("test")) {
// //姓名输入正确,进行密码判断
// if (password.equals("123456")) {
// //提醒用户登录成功,跳转页面
// Toast.makeText(MainActivity.this, "恭喜" + name + "登录成功了!" + "你的密码是:" + password, Toast.LENGTH_SHORT).show();
// } else {
// //提醒用户密码输入有误
// Toast.makeText(MainActivity.this, "请输入正确的密码!!!", Toast.LENGTH_SHORT).show();
// }
// } else {
// //用户输入的姓名与test不匹配,提醒用户姓名输入有误
// Toast.makeText(MainActivity.this, "请输入正确的用户名!!!", Toast.LENGTH_SHORT).show();
// }
if (!name.equals("test")) {
//通过!取反,判断与test不匹配的情况,提醒用户输入有误
Toast.makeText(MainActivity.this, "请输入正确的用户名!!!", Toast.LENGTH_SHORT).show();
} else if (!password.equals("123456")) {
//通过!取反,判断与123456不匹配的情况,提醒用户密码输入有误
Toast.makeText(MainActivity.this, "请输入正确的密码!!!", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(MainActivity.this, "恭喜" + name + "登录成功了!" + "你的密码是:" + password, Toast.LENGTH_SHORT).show();
}
}
});
}
}
|
2301_77966824/learning-android-studio
|
2024/10/10_30_01/app/src/main/java/com/example/a10_30_01/login.java
|
Java
|
unknown
| 3,420
|
package com.example.a2024_10_15_01;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.a2024_10_15_01", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/10/2024_10_15_01/app/src/androidTest/java/com/example/a2024_10_15_01/ExampleInstrumentedTest.java
|
Java
|
unknown
| 766
|
package com.example.a2024_10_15_01;
import androidx.appcompat.app.AppCompatActivity;
import android.os.Bundle;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
}
|
2301_77966824/learning-android-studio
|
2024/10/2024_10_15_01/app/src/main/java/com/example/a2024_10_15_01/MainActivity.java
|
Java
|
unknown
| 339
|
package com.example.food;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.food", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/10/Food/app/src/androidTest/java/com/example/food/ExampleInstrumentedTest.java
|
Java
|
unknown
| 746
|
package com.example.food;
import android.content.res.Resources;
import android.graphics.drawable.Drawable;
import android.os.Build;
import android.os.Bundle;
import android.view.Window;
import android.widget.RadioButton;
import android.widget.RadioGroup;
import android.widget.Toast;
import androidx.appcompat.app.AppCompatActivity;
public class MainActivity extends AppCompatActivity {
private RadioGroup myradioGroup;
private RadioButton rbutton1, rbutton2, rbutton3, rbutton4;
private Resources res;
private Drawable iconHomeTrue, iconHomeFalse, iconCommunityTrue, iconCommunityFalse, iconOrderTrue, iconOrderFalse, iconMeTrue, iconMeFalse;
private int fontColorFalse, fontColorTrue;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
getSupportActionBar().hide();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
Window window = getWindow();
window.setStatusBarColor(getResources().getColor(android.R.color.white));
}
initview();
navigation();
}
private void initview() {
myradioGroup = findViewById(R.id.main_radioGroup);
rbutton1 = findViewById(R.id.main_radio0);
rbutton2 = findViewById(R.id.main_radio1);
rbutton3 = findViewById(R.id.main_radio2);
rbutton4 = findViewById(R.id.main_radio3);
res = getResources();
iconHomeTrue = res.getDrawable(R.drawable.icon_home_true);
iconHomeFalse = res.getDrawable(R.drawable.icon_home_false);
iconCommunityTrue = res.getDrawable(R.drawable.icon_community_true);
iconCommunityFalse = res.getDrawable(R.drawable.icon_community_false);
iconOrderTrue = res.getDrawable(R.drawable.icon_order_true);
iconOrderFalse = res.getDrawable(R.drawable.icon_order_false);
iconMeTrue = res.getDrawable(R.drawable.icon_me_true);
iconMeFalse = res.getDrawable(R.drawable.icon_me_false);
fontColorFalse = res.getColor(R.color.navigation_false);
fontColorTrue = res.getColor(R.color.p_green);
setAllImage();
}
private void setAllImage() {
rbutton1.setCompoundDrawablesWithIntrinsicBounds(null, iconHomeFalse, null, null);
rbutton2.setCompoundDrawablesWithIntrinsicBounds(null, iconCommunityFalse, null, null);
rbutton3.setCompoundDrawablesWithIntrinsicBounds(null, iconOrderFalse, null, null);
rbutton4.setCompoundDrawablesWithIntrinsicBounds(null, iconMeFalse, null, null);
}
private void navigation() {
myradioGroup.setOnCheckedChangeListener((radioGroup, checkedId) -> {
setAllImage(); // Reset all images to false state
RadioButton checkedButton = null;
switch (checkedId) {
case R.id.main_radio0:
checkedButton = rbutton1;
break;
case R.id.main_radio1:
checkedButton = rbutton2;
break;
case R.id.main_radio2:
checkedButton = rbutton3;
break;
case R.id.main_radio3:
checkedButton = rbutton4;
break;
}
if (checkedButton != null) {
checkedButton.setTextColor(fontColorTrue);
switch (checkedId) {
case R.id.main_radio0:
checkedButton.setCompoundDrawablesWithIntrinsicBounds(null, iconHomeTrue, null, null);
Toast.makeText(MainActivity.this, "首页", Toast.LENGTH_SHORT).show();
break;
case R.id.main_radio1:
checkedButton.setCompoundDrawablesWithIntrinsicBounds(null, iconCommunityTrue, null, null);
Toast.makeText(MainActivity.this, "吃货驾到", Toast.LENGTH_SHORT).show();
break;
case R.id.main_radio2:
checkedButton.setCompoundDrawablesWithIntrinsicBounds(null, iconOrderTrue, null, null);
Toast.makeText(MainActivity.this, "我的订单", Toast.LENGTH_SHORT).show();
break;
case R.id.main_radio3:
checkedButton.setCompoundDrawablesWithIntrinsicBounds(null, iconMeTrue, null, null);
Toast.makeText(MainActivity.this, "个人中心", Toast.LENGTH_SHORT).show();
break;
}
}
});
}
}
|
2301_77966824/learning-android-studio
|
2024/10/Food/app/src/main/java/com/example/food/MainActivity.java
|
Java
|
unknown
| 4,661
|
package com.example.my10_30;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.my10_30", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/10/My10_30/app/src/androidTest/java/com/example/my10_30/ExampleInstrumentedTest.java
|
Java
|
unknown
| 752
|
package com.example.my10_30;
import androidx.appcompat.app.AppCompatActivity;
import android.os.Bundle;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
}
|
2301_77966824/learning-android-studio
|
2024/10/My10_30/app/src/main/java/com/example/my10_30/MainActivity.java
|
Java
|
unknown
| 332
|
package com.example.myapplication;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.myapplication", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/10/MyApplication/app/src/androidTest/java/com/example/myapplication/ExampleInstrumentedTest.java
|
Java
|
unknown
| 764
|
package com.example.myapplication;
import androidx.appcompat.app.AppCompatActivity;
import android.os.Bundle;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
}
|
2301_77966824/learning-android-studio
|
2024/10/MyApplication/app/src/main/java/com/example/myapplication/MainActivity.java
|
Java
|
unknown
| 338
|
package com.example.themyno1;
import android.content.Context;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
assertEquals("com.example.themyno1", appContext.getPackageName());
}
}
|
2301_77966824/learning-android-studio
|
2024/10/themyno1/app/src/androidTest/java/com/example/themyno1/ExampleInstrumentedTest.java
|
Java
|
unknown
| 754
|
package com.example.themyno1;
import androidx.appcompat.app.AppCompatActivity;
import android.annotation.SuppressLint;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.Toast;
public class MainActivity extends AppCompatActivity {
// 声明控件
private Button loginButton;
private ImageView iconImageView;
// 声明输入框控件
private EditText usernameEditText;
private EditText passwordEditText;
@SuppressLint("MissingInflatedId")
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_login);
// 绑定控件
loginButton = findViewById(R.id.btn_login);
iconImageView = findViewById(R.id.img_icon);
usernameEditText = findViewById(R.id.Edit_name);
passwordEditText = findViewById(R.id.Edit_password);
// 设置监听事件
loginButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String username = usernameEditText.getText().toString();
String password = passwordEditText.getText().toString();
if (!isValidUsername(username)) {
Toast.makeText(MainActivity.this, "请输入正确的用户名!!!", Toast.LENGTH_SHORT).show();
} else if (!isValidPassword(password)) {
Toast.makeText(MainActivity.this, "请输入正确的密码!!!", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(MainActivity.this, "恭喜" + username + "登录成功了!" + "你的密码是:" + password, Toast.LENGTH_SHORT).show();
}
}
});
}
private boolean isValidUsername(String username) {
return "test".equals(username);
}
private boolean isValidPassword(String password) {
return "123456".equals(password);
}
}
|
2301_77966824/learning-android-studio
|
2024/10/themyno1/app/src/main/java/com/example/themyno1/MainActivity.java
|
Java
|
unknown
| 2,102
|
import js from '@eslint/js'
import globals from 'globals'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
export default [
{ ignores: ['dist'] },
{
files: ['**/*.{js,jsx}'],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
parserOptions: {
ecmaVersion: 'latest',
ecmaFeatures: { jsx: true },
sourceType: 'module',
},
},
plugins: {
'react-hooks': reactHooks,
'react-refresh': reactRefresh,
},
rules: {
...js.configs.recommended.rules,
...reactHooks.configs.recommended.rules,
'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
},
]
|
2301_81295389/student-java-web-frontend
|
eslint.config.js
|
JavaScript
|
unknown
| 844
|
<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="description" content="学生信息管理系统" />
<title>学生信息管理系统</title>
<link rel="icon" href="./public/logo.jpg" type="image/x-icon" />
</head>
<body>
<div id="root"></div>
<script type="module" src="src/main.jsx"></script>
</body>
</html>
|
2301_81295389/student-java-web-frontend
|
index.html
|
HTML
|
unknown
| 426
|
import React from 'react';
import {BrowserRouter as Router, Route, Routes, Navigate} from 'react-router-dom';
import Login from './pages/login/Login.jsx';
import NavigationLayout from "./pages/layouts/NavigationLayout.jsx";
import HomePage from "./pages/homePage/HomePage.jsx";
import StudentPage from "./pages/studentPage/StudentPage.jsx"
import RegisteredPage from "./pages/registeredPage/RegisteredPage.jsx";
import AcademyPage from "./pages/academyPage/AcademyPage.jsx";
import SpecialtyPage from "./pages/specialtyPage/SpecialtyPage.jsx";
import ClassPage from "./pages/classPage/ClassPage.jsx";
import TeacherPage from "./pages/teacherPage/TeacherPage.jsx";
function App() {
return (
<Router>
<Routes>
<Route path="/" element={<Login/>}/>
<Route path="/login" element={<Login/>}/>
<Route path="/registered" element={<RegisteredPage/>}/>
<Route path="/pages" element={<NavigationLayout/>}>
<Route index element={<Navigate to="home" replace/>}/>
<Route path="home" element={<HomePage/>}/>
<Route path="student" element={<StudentPage/>}/>
<Route path="academy" element={<AcademyPage/>}/>
<Route path="specialty" element={<SpecialtyPage/>}/>
<Route path="classPage" element={<ClassPage/>}/>
<Route path="teacher" element={<TeacherPage/>}/>
</Route>
</Routes>
</Router>
);
}
export default App;
|
2301_81295389/student-java-web-frontend
|
src/App.jsx
|
JavaScript
|
unknown
| 1,565
|