Release 260111
This commit is contained in:
0
system/loggerd/__init__.py
Normal file
0
system/loggerd/__init__.py
Normal file
BIN
system/loggerd/bootlog
Executable file
BIN
system/loggerd/bootlog
Executable file
Binary file not shown.
29
system/loggerd/config.py
Normal file
29
system/loggerd/config.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import os
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
|
||||
|
||||
CAMERA_FPS = 20
|
||||
SEGMENT_LENGTH = 60
|
||||
|
||||
STATS_DIR_FILE_LIMIT = 10000
|
||||
STATS_SOCKET = "ipc:///tmp/stats"
|
||||
STATS_FLUSH_TIME_S = 60
|
||||
|
||||
def get_available_percent(default: float) -> float:
|
||||
try:
|
||||
statvfs = os.statvfs(Paths.log_root())
|
||||
available_percent = 100.0 * statvfs.f_bavail / statvfs.f_blocks
|
||||
except OSError:
|
||||
available_percent = default
|
||||
|
||||
return available_percent
|
||||
|
||||
|
||||
def get_available_bytes(default: int) -> int:
|
||||
try:
|
||||
statvfs = os.statvfs(Paths.log_root())
|
||||
available_bytes = statvfs.f_bavail * statvfs.f_frsize
|
||||
except OSError:
|
||||
available_bytes = default
|
||||
|
||||
return available_bytes
|
||||
80
system/loggerd/deleter.py
Executable file
80
system/loggerd/deleter.py
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
from openpilot.system.loggerd.config import get_available_bytes, get_available_percent
|
||||
from openpilot.system.loggerd.uploader import listdir_by_creation
|
||||
from openpilot.system.loggerd.xattr_cache import getxattr
|
||||
|
||||
MIN_BYTES = 5 * 1024 * 1024 * 1024
|
||||
MIN_PERCENT = 30
|
||||
|
||||
DELETE_LAST = ['boot', 'crash']
|
||||
|
||||
PRESERVE_ATTR_NAME = 'user.preserve'
|
||||
PRESERVE_ATTR_VALUE = b'1'
|
||||
PRESERVE_COUNT = 5
|
||||
|
||||
|
||||
def has_preserve_xattr(d: str) -> bool:
|
||||
return getxattr(os.path.join(Paths.log_root(), d), PRESERVE_ATTR_NAME) == PRESERVE_ATTR_VALUE
|
||||
|
||||
|
||||
def get_preserved_segments(dirs_by_creation: list[str]) -> set[str]:
|
||||
# skip deleting most recent N preserved segments (and their prior segment)
|
||||
preserved = set()
|
||||
for n, d in enumerate(filter(has_preserve_xattr, reversed(dirs_by_creation))):
|
||||
if n == PRESERVE_COUNT:
|
||||
break
|
||||
date_str, _, seg_str = d.rpartition("--")
|
||||
|
||||
# ignore non-segment directories
|
||||
if not date_str:
|
||||
continue
|
||||
try:
|
||||
seg_num = int(seg_str)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# preserve segment and two prior
|
||||
for _seg_num in range(max(0, seg_num - 2), seg_num + 1):
|
||||
preserved.add(f"{date_str}--{_seg_num}")
|
||||
|
||||
return preserved
|
||||
|
||||
|
||||
def deleter_thread(exit_event: threading.Event):
|
||||
while not exit_event.is_set():
|
||||
out_of_bytes = get_available_bytes(default=MIN_BYTES + 1) < MIN_BYTES
|
||||
out_of_percent = get_available_percent(default=MIN_PERCENT + 1) < MIN_PERCENT
|
||||
|
||||
if out_of_percent or out_of_bytes:
|
||||
dirs = listdir_by_creation(Paths.log_root())
|
||||
preserved_dirs = get_preserved_segments(dirs)
|
||||
|
||||
# remove the earliest directory we can
|
||||
for delete_dir in sorted(dirs, key=lambda d: (d in DELETE_LAST, d in preserved_dirs)):
|
||||
delete_path = os.path.join(Paths.log_root(), delete_dir)
|
||||
|
||||
if any(name.endswith(".lock") for name in os.listdir(delete_path)):
|
||||
continue
|
||||
|
||||
try:
|
||||
cloudlog.info(f"deleting {delete_path}")
|
||||
shutil.rmtree(delete_path)
|
||||
break
|
||||
except OSError:
|
||||
cloudlog.exception(f"issue deleting {delete_path}")
|
||||
exit_event.wait(.1)
|
||||
else:
|
||||
exit_event.wait(30)
|
||||
|
||||
|
||||
def main():
|
||||
deleter_thread(threading.Event())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
42
system/loggerd/encoder/encoder.h
Normal file
42
system/loggerd/encoder/encoder.h
Normal file
@@ -0,0 +1,42 @@
|
||||
#pragma once
|
||||
|
||||
// has to be in this order
|
||||
#ifdef __linux__
|
||||
#include "third_party/linux/include/v4l2-controls.h"
|
||||
#include <linux/videodev2.h>
|
||||
#else
|
||||
#define V4L2_BUF_FLAG_KEYFRAME 8
|
||||
#endif
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "cereal/messaging/messaging.h"
|
||||
#include "msgq/visionipc/visionipc.h"
|
||||
#include "common/queue.h"
|
||||
#include "system/loggerd/loggerd.h"
|
||||
|
||||
class VideoEncoder {
|
||||
public:
|
||||
VideoEncoder(const EncoderInfo &encoder_info, int in_width, int in_height);
|
||||
virtual ~VideoEncoder() {}
|
||||
virtual int encode_frame(VisionBuf* buf, VisionIpcBufExtra *extra) = 0;
|
||||
virtual void encoder_open() = 0;
|
||||
virtual void encoder_close() = 0;
|
||||
|
||||
void publisher_publish(int segment_num, uint32_t idx, VisionIpcBufExtra &extra, unsigned int flags, kj::ArrayPtr<capnp::byte> header, kj::ArrayPtr<capnp::byte> dat);
|
||||
|
||||
protected:
|
||||
int in_width, in_height;
|
||||
int out_width, out_height;
|
||||
const EncoderInfo encoder_info;
|
||||
|
||||
private:
|
||||
// total frames encoded
|
||||
int cnt = 0;
|
||||
std::unique_ptr<PubMaster> pm;
|
||||
std::vector<capnp::byte> msg_cache;
|
||||
};
|
||||
34
system/loggerd/encoder/ffmpeg_encoder.h
Normal file
34
system/loggerd/encoder/ffmpeg_encoder.h
Normal file
@@ -0,0 +1,34 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
}
|
||||
|
||||
#include "system/loggerd/encoder/encoder.h"
|
||||
#include "system/loggerd/loggerd.h"
|
||||
|
||||
class FfmpegEncoder : public VideoEncoder {
|
||||
public:
|
||||
FfmpegEncoder(const EncoderInfo &encoder_info, int in_width, int in_height);
|
||||
~FfmpegEncoder();
|
||||
int encode_frame(VisionBuf* buf, VisionIpcBufExtra *extra);
|
||||
void encoder_open();
|
||||
void encoder_close();
|
||||
|
||||
private:
|
||||
int segment_num = -1;
|
||||
int counter = 0;
|
||||
bool is_open = false;
|
||||
|
||||
AVCodecContext *codec_ctx;
|
||||
AVFrame *frame = NULL;
|
||||
std::vector<uint8_t> convert_buf;
|
||||
std::vector<uint8_t> downscale_buf;
|
||||
};
|
||||
32
system/loggerd/encoder/jpeg_encoder.h
Normal file
32
system/loggerd/encoder/jpeg_encoder.h
Normal file
@@ -0,0 +1,32 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <jpeglib.h>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "cereal/messaging/messaging.h"
|
||||
#include "msgq/visionipc/visionbuf.h"
|
||||
|
||||
class JpegEncoder {
|
||||
public:
|
||||
JpegEncoder(const std::string &pusblish_name, int width, int height);
|
||||
~JpegEncoder();
|
||||
void pushThumbnail(VisionBuf *buf, const VisionIpcBufExtra &extra);
|
||||
|
||||
private:
|
||||
void generateThumbnail(const uint8_t *y, const uint8_t *uv, int width, int height, int stride);
|
||||
void compressToJpeg(uint8_t *y_plane, uint8_t *u_plane, uint8_t *v_plane);
|
||||
|
||||
int thumbnail_width;
|
||||
int thumbnail_height;
|
||||
std::string publish_name;
|
||||
std::vector<uint8_t> yuv_buffer;
|
||||
std::unique_ptr<PubMaster> pm;
|
||||
|
||||
// JPEG output buffer
|
||||
unsigned char* out_buffer = nullptr;
|
||||
unsigned long out_size = 0;
|
||||
};
|
||||
31
system/loggerd/encoder/v4l_encoder.h
Normal file
31
system/loggerd/encoder/v4l_encoder.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/queue.h"
|
||||
#include "system/loggerd/encoder/encoder.h"
|
||||
|
||||
#define BUF_IN_COUNT 7
|
||||
#define BUF_OUT_COUNT 6
|
||||
|
||||
class V4LEncoder : public VideoEncoder {
|
||||
public:
|
||||
V4LEncoder(const EncoderInfo &encoder_info, int in_width, int in_height);
|
||||
~V4LEncoder();
|
||||
int encode_frame(VisionBuf* buf, VisionIpcBufExtra *extra);
|
||||
void encoder_open();
|
||||
void encoder_close();
|
||||
|
||||
private:
|
||||
int fd;
|
||||
|
||||
bool is_open = false;
|
||||
int segment_num = -1;
|
||||
int counter = 0;
|
||||
|
||||
SafeQueue<VisionIpcBufExtra> extras;
|
||||
|
||||
static void dequeue_handler(V4LEncoder *e);
|
||||
std::thread dequeue_handler_thread;
|
||||
|
||||
VisionBuf buf_out[BUF_OUT_COUNT];
|
||||
SafeQueue<unsigned int> free_buf_in;
|
||||
};
|
||||
BIN
system/loggerd/encoderd
Executable file
BIN
system/loggerd/encoderd
Executable file
Binary file not shown.
37
system/loggerd/logger.h
Normal file
37
system/loggerd/logger.h
Normal file
@@ -0,0 +1,37 @@
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "cereal/messaging/messaging.h"
|
||||
#include "common/util.h"
|
||||
#include "system/hardware/hw.h"
|
||||
#include "system/loggerd/zstd_writer.h"
|
||||
|
||||
constexpr int LOG_COMPRESSION_LEVEL = 10;
|
||||
|
||||
typedef cereal::Sentinel::SentinelType SentinelType;
|
||||
|
||||
class LoggerState {
|
||||
public:
|
||||
LoggerState(const std::string& log_root = Path::log_root());
|
||||
~LoggerState();
|
||||
bool next();
|
||||
void write(uint8_t* data, size_t size, bool in_qlog);
|
||||
inline int segment() const { return part; }
|
||||
inline const std::string& segmentPath() const { return segment_path; }
|
||||
inline const std::string& routeName() const { return route_name; }
|
||||
inline void write(kj::ArrayPtr<kj::byte> bytes, bool in_qlog) { write(bytes.begin(), bytes.size(), in_qlog); }
|
||||
inline void setExitSignal(int signal) { exit_signal = signal; }
|
||||
|
||||
protected:
|
||||
int part = -1, exit_signal = 0;
|
||||
std::string route_path, route_name, segment_path, lock_file;
|
||||
kj::Array<capnp::word> init_data;
|
||||
std::unique_ptr<ZstdFileWriter> rlog, qlog;
|
||||
};
|
||||
|
||||
kj::Array<capnp::word> logger_build_init_data();
|
||||
std::string logger_get_identifier(std::string key);
|
||||
std::string zstd_decompress(const std::string &in);
|
||||
BIN
system/loggerd/loggerd
Executable file
BIN
system/loggerd/loggerd
Executable file
Binary file not shown.
153
system/loggerd/loggerd.h
Normal file
153
system/loggerd/loggerd.h
Normal file
@@ -0,0 +1,153 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "cereal/messaging/messaging.h"
|
||||
#include "cereal/services.h"
|
||||
#include "msgq/visionipc/visionipc_client.h"
|
||||
#include "system/hardware/hw.h"
|
||||
#include "common/params.h"
|
||||
#include "common/swaglog.h"
|
||||
#include "common/util.h"
|
||||
|
||||
#include "system/loggerd/logger.h"
|
||||
|
||||
constexpr int MAIN_FPS = 20;
|
||||
const int MAIN_BITRATE = 1e7;
|
||||
const int LIVESTREAM_BITRATE = 1e6;
|
||||
const int QCAM_BITRATE = 256000;
|
||||
|
||||
#define NO_CAMERA_PATIENCE 500 // fall back to time-based rotation if all cameras are dead
|
||||
|
||||
#define INIT_ENCODE_FUNCTIONS(encode_type) \
|
||||
.get_encode_data_func = &cereal::Event::Reader::get##encode_type##Data, \
|
||||
.set_encode_idx_func = &cereal::Event::Builder::set##encode_type##Idx, \
|
||||
.init_encode_data_func = &cereal::Event::Builder::init##encode_type##Data
|
||||
|
||||
const bool LOGGERD_TEST = getenv("LOGGERD_TEST");
|
||||
const int SEGMENT_LENGTH = LOGGERD_TEST ? atoi(getenv("LOGGERD_SEGMENT_LENGTH")) : 60;
|
||||
|
||||
constexpr char PRESERVE_ATTR_NAME[] = "user.preserve";
|
||||
constexpr char PRESERVE_ATTR_VALUE = '1';
|
||||
class EncoderInfo {
|
||||
public:
|
||||
const char *publish_name;
|
||||
const char *thumbnail_name = NULL;
|
||||
const char *filename = NULL;
|
||||
bool record = true;
|
||||
bool include_audio = false;
|
||||
int frame_width = -1;
|
||||
int frame_height = -1;
|
||||
int fps = MAIN_FPS;
|
||||
int bitrate = MAIN_BITRATE;
|
||||
cereal::EncodeIndex::Type encode_type = Hardware::PC() ? cereal::EncodeIndex::Type::BIG_BOX_LOSSLESS
|
||||
: cereal::EncodeIndex::Type::FULL_H_E_V_C;
|
||||
::cereal::EncodeData::Reader (cereal::Event::Reader::*get_encode_data_func)() const;
|
||||
void (cereal::Event::Builder::*set_encode_idx_func)(::cereal::EncodeIndex::Reader);
|
||||
cereal::EncodeData::Builder (cereal::Event::Builder::*init_encode_data_func)();
|
||||
};
|
||||
|
||||
class LogCameraInfo {
|
||||
public:
|
||||
const char *thread_name;
|
||||
int fps = MAIN_FPS;
|
||||
VisionStreamType stream_type;
|
||||
std::vector<EncoderInfo> encoder_infos;
|
||||
};
|
||||
|
||||
const EncoderInfo main_road_encoder_info = {
|
||||
.publish_name = "roadEncodeData",
|
||||
.thumbnail_name = "thumbnail",
|
||||
.filename = "fcamera.hevc",
|
||||
.record = Params().getInt("RecordRoadCam") > 0,
|
||||
INIT_ENCODE_FUNCTIONS(RoadEncode),
|
||||
};
|
||||
|
||||
const EncoderInfo main_wide_road_encoder_info = {
|
||||
.publish_name = "wideRoadEncodeData",
|
||||
.filename = "ecamera.hevc",
|
||||
.record = Params().getInt("RecordRoadCam") > 1,
|
||||
INIT_ENCODE_FUNCTIONS(WideRoadEncode),
|
||||
};
|
||||
|
||||
const EncoderInfo main_driver_encoder_info = {
|
||||
.publish_name = "driverEncodeData",
|
||||
.filename = "dcamera.hevc",
|
||||
.record = Params().getBool("RecordFront"),
|
||||
INIT_ENCODE_FUNCTIONS(DriverEncode),
|
||||
};
|
||||
|
||||
const EncoderInfo stream_road_encoder_info = {
|
||||
.publish_name = "livestreamRoadEncodeData",
|
||||
//.thumbnail_name = "thumbnail",
|
||||
.encode_type = cereal::EncodeIndex::Type::QCAMERA_H264,
|
||||
.record = false,
|
||||
.bitrate = LIVESTREAM_BITRATE,
|
||||
INIT_ENCODE_FUNCTIONS(LivestreamRoadEncode),
|
||||
};
|
||||
|
||||
const EncoderInfo stream_wide_road_encoder_info = {
|
||||
.publish_name = "livestreamWideRoadEncodeData",
|
||||
.encode_type = cereal::EncodeIndex::Type::QCAMERA_H264,
|
||||
.record = false,
|
||||
.bitrate = LIVESTREAM_BITRATE,
|
||||
INIT_ENCODE_FUNCTIONS(LivestreamWideRoadEncode),
|
||||
};
|
||||
|
||||
const EncoderInfo stream_driver_encoder_info = {
|
||||
.publish_name = "livestreamDriverEncodeData",
|
||||
.encode_type = cereal::EncodeIndex::Type::QCAMERA_H264,
|
||||
.record = false,
|
||||
.bitrate = LIVESTREAM_BITRATE,
|
||||
INIT_ENCODE_FUNCTIONS(LivestreamDriverEncode),
|
||||
};
|
||||
|
||||
const EncoderInfo qcam_encoder_info = {
|
||||
.publish_name = "qRoadEncodeData",
|
||||
.filename = "qcamera.ts",
|
||||
.bitrate = QCAM_BITRATE,
|
||||
.encode_type = cereal::EncodeIndex::Type::QCAMERA_H264,
|
||||
.frame_width = 526,
|
||||
.frame_height = 330,
|
||||
.include_audio = Params().getBool("RecordAudio"),
|
||||
INIT_ENCODE_FUNCTIONS(QRoadEncode),
|
||||
};
|
||||
|
||||
const LogCameraInfo road_camera_info{
|
||||
.thread_name = "road_cam_encoder",
|
||||
.stream_type = VISION_STREAM_ROAD,
|
||||
.encoder_infos = {main_road_encoder_info, qcam_encoder_info}
|
||||
};
|
||||
|
||||
const LogCameraInfo wide_road_camera_info{
|
||||
.thread_name = "wide_road_cam_encoder",
|
||||
.stream_type = VISION_STREAM_WIDE_ROAD,
|
||||
.encoder_infos = {main_wide_road_encoder_info}
|
||||
};
|
||||
|
||||
const LogCameraInfo driver_camera_info{
|
||||
.thread_name = "driver_cam_encoder",
|
||||
.stream_type = VISION_STREAM_DRIVER,
|
||||
.encoder_infos = {main_driver_encoder_info}
|
||||
};
|
||||
|
||||
const LogCameraInfo stream_road_camera_info{
|
||||
.thread_name = "road_cam_encoder",
|
||||
.stream_type = VISION_STREAM_ROAD,
|
||||
.encoder_infos = {stream_road_encoder_info}
|
||||
};
|
||||
|
||||
const LogCameraInfo stream_wide_road_camera_info{
|
||||
.thread_name = "wide_road_cam_encoder",
|
||||
.stream_type = VISION_STREAM_WIDE_ROAD,
|
||||
.encoder_infos = {stream_wide_road_encoder_info}
|
||||
};
|
||||
|
||||
const LogCameraInfo stream_driver_camera_info{
|
||||
.thread_name = "driver_cam_encoder",
|
||||
.stream_type = VISION_STREAM_DRIVER,
|
||||
.encoder_infos = {stream_driver_encoder_info}
|
||||
};
|
||||
|
||||
const LogCameraInfo cameras_logged[] = {road_camera_info, wide_road_camera_info};//, driver_camera_info};
|
||||
const LogCameraInfo stream_cameras_logged[] = {stream_road_camera_info, stream_wide_road_camera_info};//, stream_driver_camera_info};
|
||||
0
system/loggerd/tests/__init__.py
Normal file
0
system/loggerd/tests/__init__.py
Normal file
90
system/loggerd/tests/loggerd_tests_common.py
Normal file
90
system/loggerd/tests/loggerd_tests_common.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import os
|
||||
import random
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
import openpilot.system.loggerd.deleter as deleter
|
||||
import openpilot.system.loggerd.uploader as uploader
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
from openpilot.system.loggerd.xattr_cache import setxattr
|
||||
|
||||
|
||||
def create_random_file(file_path: Path, size_mb: float, lock: bool = False, upload_xattr: bytes = None) -> None:
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if lock:
|
||||
lock_path = str(file_path) + ".lock"
|
||||
os.close(os.open(lock_path, os.O_CREAT | os.O_EXCL))
|
||||
|
||||
chunks = 128
|
||||
chunk_bytes = int(size_mb * 1024 * 1024 / chunks)
|
||||
data = os.urandom(chunk_bytes)
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
for _ in range(chunks):
|
||||
f.write(data)
|
||||
|
||||
if upload_xattr is not None:
|
||||
setxattr(str(file_path), uploader.UPLOAD_ATTR_NAME, upload_xattr)
|
||||
|
||||
class MockResponse:
|
||||
def __init__(self, text, status_code):
|
||||
self.text = text
|
||||
self.status_code = status_code
|
||||
|
||||
class MockApi:
|
||||
def __init__(self, dongle_id):
|
||||
pass
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
return MockResponse('{"url": "http://localhost/does/not/exist", "headers": {}}', 200)
|
||||
|
||||
def get_token(self):
|
||||
return "fake-token"
|
||||
|
||||
class MockApiIgnore:
|
||||
def __init__(self, dongle_id):
|
||||
pass
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
return MockResponse('', 412)
|
||||
|
||||
def get_token(self):
|
||||
return "fake-token"
|
||||
|
||||
class UploaderTestCase:
|
||||
f_type = "UNKNOWN"
|
||||
|
||||
root: Path
|
||||
seg_num: int
|
||||
seg_format: str
|
||||
seg_format2: str
|
||||
seg_dir: str
|
||||
|
||||
def set_ignore(self):
|
||||
uploader.Api = MockApiIgnore
|
||||
|
||||
def setup_method(self):
|
||||
uploader.Api = MockApi
|
||||
uploader.fake_upload = True
|
||||
uploader.force_wifi = True
|
||||
uploader.allow_sleep = False
|
||||
self.seg_num = random.randint(1, 300)
|
||||
self.seg_format = "00000004--0ac3964c96--{}"
|
||||
self.seg_format2 = "00000005--4c4e99b08b--{}"
|
||||
self.seg_dir = self.seg_format.format(self.seg_num)
|
||||
|
||||
self.params = Params()
|
||||
self.params.put("IsOffroad", "1")
|
||||
self.params.put("DongleId", "0000000000000000")
|
||||
|
||||
def make_file_with_data(self, f_dir: str, fn: str, size_mb: float = .1, lock: bool = False,
|
||||
upload_xattr: bytes = None, preserve_xattr: bytes = None) -> Path:
|
||||
file_path = Path(Paths.log_root()) / f_dir / fn
|
||||
create_random_file(file_path, size_mb, lock, upload_xattr)
|
||||
|
||||
if preserve_xattr is not None:
|
||||
setxattr(str(file_path.parent), deleter.PRESERVE_ATTR_NAME, preserve_xattr)
|
||||
|
||||
return file_path
|
||||
117
system/loggerd/tests/test_deleter.py
Normal file
117
system/loggerd/tests/test_deleter.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import time
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
from pathlib import Path
|
||||
from collections.abc import Sequence
|
||||
|
||||
import openpilot.system.loggerd.deleter as deleter
|
||||
from openpilot.common.timeout import Timeout, TimeoutException
|
||||
from openpilot.system.loggerd.tests.loggerd_tests_common import UploaderTestCase
|
||||
|
||||
Stats = namedtuple("Stats", ['f_bavail', 'f_blocks', 'f_frsize'])
|
||||
|
||||
|
||||
class TestDeleter(UploaderTestCase):
|
||||
def fake_statvfs(self, d):
|
||||
return self.fake_stats
|
||||
|
||||
def setup_method(self):
|
||||
self.f_type = "fcamera.hevc"
|
||||
super().setup_method()
|
||||
self.fake_stats = Stats(f_bavail=0, f_blocks=10, f_frsize=4096)
|
||||
deleter.os.statvfs = self.fake_statvfs
|
||||
|
||||
def start_thread(self):
|
||||
self.end_event = threading.Event()
|
||||
self.del_thread = threading.Thread(target=deleter.deleter_thread, args=[self.end_event])
|
||||
self.del_thread.daemon = True
|
||||
self.del_thread.start()
|
||||
|
||||
def join_thread(self):
|
||||
self.end_event.set()
|
||||
self.del_thread.join()
|
||||
|
||||
def test_delete(self):
|
||||
f_path = self.make_file_with_data(self.seg_dir, self.f_type, 1)
|
||||
|
||||
self.start_thread()
|
||||
|
||||
try:
|
||||
with Timeout(2, "Timeout waiting for file to be deleted"):
|
||||
while f_path.exists():
|
||||
time.sleep(0.01)
|
||||
finally:
|
||||
self.join_thread()
|
||||
|
||||
def assertDeleteOrder(self, f_paths: Sequence[Path], timeout: int = 5) -> None:
|
||||
deleted_order = []
|
||||
|
||||
self.start_thread()
|
||||
try:
|
||||
with Timeout(timeout, "Timeout waiting for files to be deleted"):
|
||||
while True:
|
||||
for f in f_paths:
|
||||
if not f.exists() and f not in deleted_order:
|
||||
deleted_order.append(f)
|
||||
if len(deleted_order) == len(f_paths):
|
||||
break
|
||||
time.sleep(0.01)
|
||||
except TimeoutException:
|
||||
print("Not deleted:", [f for f in f_paths if f not in deleted_order])
|
||||
raise
|
||||
finally:
|
||||
self.join_thread()
|
||||
|
||||
assert deleted_order == f_paths, "Files not deleted in expected order"
|
||||
|
||||
def test_delete_order(self):
|
||||
self.assertDeleteOrder([
|
||||
self.make_file_with_data(self.seg_format.format(0), self.f_type),
|
||||
self.make_file_with_data(self.seg_format.format(1), self.f_type),
|
||||
self.make_file_with_data(self.seg_format2.format(0), self.f_type),
|
||||
])
|
||||
|
||||
def test_delete_many_preserved(self):
|
||||
self.assertDeleteOrder([
|
||||
self.make_file_with_data(self.seg_format.format(0), self.f_type),
|
||||
self.make_file_with_data(self.seg_format.format(1), self.f_type, preserve_xattr=deleter.PRESERVE_ATTR_VALUE),
|
||||
self.make_file_with_data(self.seg_format.format(2), self.f_type),
|
||||
] + [
|
||||
self.make_file_with_data(self.seg_format2.format(i), self.f_type, preserve_xattr=deleter.PRESERVE_ATTR_VALUE)
|
||||
for i in range(5)
|
||||
])
|
||||
|
||||
def test_delete_last(self):
|
||||
self.assertDeleteOrder([
|
||||
self.make_file_with_data(self.seg_format.format(1), self.f_type),
|
||||
self.make_file_with_data(self.seg_format2.format(0), self.f_type),
|
||||
self.make_file_with_data(self.seg_format.format(0), self.f_type, preserve_xattr=deleter.PRESERVE_ATTR_VALUE),
|
||||
self.make_file_with_data("boot", self.seg_format[:-4]),
|
||||
self.make_file_with_data("crash", self.seg_format2[:-4]),
|
||||
])
|
||||
|
||||
def test_no_delete_when_available_space(self):
|
||||
f_path = self.make_file_with_data(self.seg_dir, self.f_type)
|
||||
|
||||
block_size = 4096
|
||||
available = (10 * 1024 * 1024 * 1024) / block_size # 10GB free
|
||||
self.fake_stats = Stats(f_bavail=available, f_blocks=10, f_frsize=block_size)
|
||||
|
||||
self.start_thread()
|
||||
start_time = time.monotonic()
|
||||
while f_path.exists() and time.monotonic() - start_time < 2:
|
||||
time.sleep(0.01)
|
||||
self.join_thread()
|
||||
|
||||
assert f_path.exists(), "File deleted with available space"
|
||||
|
||||
def test_no_delete_with_lock_file(self):
|
||||
f_path = self.make_file_with_data(self.seg_dir, self.f_type, lock=True)
|
||||
|
||||
self.start_thread()
|
||||
start_time = time.monotonic()
|
||||
while f_path.exists() and time.monotonic() - start_time < 2:
|
||||
time.sleep(0.01)
|
||||
self.join_thread()
|
||||
|
||||
assert f_path.exists(), "File deleted when locked"
|
||||
150
system/loggerd/tests/test_encoder.py
Normal file
150
system/loggerd/tests/test_encoder.py
Normal file
@@ -0,0 +1,150 @@
|
||||
import math
|
||||
import os
|
||||
import pytest
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from parameterized import parameterized
|
||||
from tqdm import trange
|
||||
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.timeout import Timeout
|
||||
from openpilot.system.hardware import TICI
|
||||
from openpilot.system.manager.process_config import managed_processes
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
|
||||
SEGMENT_LENGTH = 2
|
||||
FULL_SIZE = 2507572
|
||||
CAMERAS = [
|
||||
("fcamera.hevc", 20, FULL_SIZE, "roadEncodeIdx"),
|
||||
("dcamera.hevc", 20, FULL_SIZE, "driverEncodeIdx"),
|
||||
("ecamera.hevc", 20, FULL_SIZE, "wideRoadEncodeIdx"),
|
||||
("qcamera.ts", 20, 130000, None),
|
||||
]
|
||||
|
||||
# we check frame count, so we don't have to be too strict on size
|
||||
FILE_SIZE_TOLERANCE = 0.7
|
||||
|
||||
|
||||
@pytest.mark.tici # TODO: all of loggerd should work on PC
|
||||
class TestEncoder:
|
||||
|
||||
def setup_method(self):
|
||||
self._clear_logs()
|
||||
os.environ["LOGGERD_TEST"] = "1"
|
||||
os.environ["LOGGERD_SEGMENT_LENGTH"] = str(SEGMENT_LENGTH)
|
||||
|
||||
def teardown_method(self):
|
||||
self._clear_logs()
|
||||
|
||||
def _clear_logs(self):
|
||||
if os.path.exists(Paths.log_root()):
|
||||
shutil.rmtree(Paths.log_root())
|
||||
|
||||
def _get_latest_segment_path(self):
|
||||
last_route = sorted(Path(Paths.log_root()).iterdir())[-1]
|
||||
return os.path.join(Paths.log_root(), last_route)
|
||||
|
||||
# TODO: this should run faster than real time
|
||||
@parameterized.expand([(True, ), (False, )])
|
||||
def test_log_rotation(self, record_front):
|
||||
Params().put_bool("RecordFront", record_front)
|
||||
|
||||
managed_processes['sensord'].start()
|
||||
managed_processes['loggerd'].start()
|
||||
managed_processes['encoderd'].start()
|
||||
|
||||
time.sleep(1.0)
|
||||
managed_processes['camerad'].start()
|
||||
|
||||
num_segments = int(os.getenv("SEGMENTS", random.randint(2, 8)))
|
||||
|
||||
# wait for loggerd to make the dir for first segment
|
||||
route_prefix_path = None
|
||||
with Timeout(int(SEGMENT_LENGTH*3)):
|
||||
while route_prefix_path is None:
|
||||
try:
|
||||
route_prefix_path = self._get_latest_segment_path().rsplit("--", 1)[0]
|
||||
except Exception:
|
||||
time.sleep(0.1)
|
||||
|
||||
def check_seg(i):
|
||||
# check each camera file size
|
||||
counts = []
|
||||
first_frames = []
|
||||
for camera, fps, size, encode_idx_name in CAMERAS:
|
||||
if not record_front and "dcamera" in camera:
|
||||
continue
|
||||
|
||||
file_path = f"{route_prefix_path}--{i}/{camera}"
|
||||
|
||||
# check file exists
|
||||
assert os.path.exists(file_path), f"segment #{i}: '{file_path}' missing"
|
||||
|
||||
# TODO: this ffprobe call is really slow
|
||||
# check frame count
|
||||
cmd = f"ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 {file_path}"
|
||||
if TICI:
|
||||
cmd = "LD_LIBRARY_PATH=/usr/local/lib " + cmd
|
||||
|
||||
expected_frames = fps * SEGMENT_LENGTH
|
||||
probe = subprocess.check_output(cmd, shell=True, encoding='utf8')
|
||||
frame_count = int(probe.split('\n')[0].strip())
|
||||
counts.append(frame_count)
|
||||
|
||||
assert frame_count == expected_frames, \
|
||||
f"segment #{i}: {camera} failed frame count check: expected {expected_frames}, got {frame_count}"
|
||||
|
||||
# sanity check file size
|
||||
file_size = os.path.getsize(file_path)
|
||||
assert math.isclose(file_size, size, rel_tol=FILE_SIZE_TOLERANCE), \
|
||||
f"{file_path} size {file_size} isn't close to target size {size}"
|
||||
|
||||
# Check encodeIdx
|
||||
if encode_idx_name is not None:
|
||||
rlog_path = f"{route_prefix_path}--{i}/rlog.zst"
|
||||
msgs = [m for m in LogReader(rlog_path) if m.which() == encode_idx_name]
|
||||
encode_msgs = [getattr(m, encode_idx_name) for m in msgs]
|
||||
|
||||
valid = [m.valid for m in msgs]
|
||||
segment_idxs = [m.segmentId for m in encode_msgs]
|
||||
encode_idxs = [m.encodeId for m in encode_msgs]
|
||||
frame_idxs = [m.frameId for m in encode_msgs]
|
||||
|
||||
# Check frame count
|
||||
assert frame_count == len(segment_idxs)
|
||||
assert frame_count == len(encode_idxs)
|
||||
|
||||
# Check for duplicates or skips
|
||||
assert 0 == segment_idxs[0]
|
||||
assert len(set(segment_idxs)) == len(segment_idxs)
|
||||
|
||||
assert all(valid)
|
||||
|
||||
assert expected_frames * i == encode_idxs[0]
|
||||
first_frames.append(frame_idxs[0])
|
||||
assert len(set(encode_idxs)) == len(encode_idxs)
|
||||
|
||||
assert 1 == len(set(first_frames))
|
||||
|
||||
if TICI:
|
||||
expected_frames = fps * SEGMENT_LENGTH
|
||||
assert min(counts) == expected_frames
|
||||
shutil.rmtree(f"{route_prefix_path}--{i}")
|
||||
|
||||
try:
|
||||
for i in trange(num_segments):
|
||||
# poll for next segment
|
||||
with Timeout(int(SEGMENT_LENGTH*10), error_msg=f"timed out waiting for segment {i}"):
|
||||
while Path(f"{route_prefix_path}--{i+1}") not in Path(Paths.log_root()).iterdir():
|
||||
time.sleep(0.1)
|
||||
check_seg(i)
|
||||
finally:
|
||||
managed_processes['loggerd'].stop()
|
||||
managed_processes['encoderd'].stop()
|
||||
managed_processes['camerad'].stop()
|
||||
managed_processes['sensord'].stop()
|
||||
283
system/loggerd/tests/test_loggerd.py
Normal file
283
system/loggerd/tests/test_loggerd.py
Normal file
@@ -0,0 +1,283 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import re
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
import pytest
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal import log
|
||||
from cereal.services import SERVICE_LIST
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.timeout import Timeout
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
from openpilot.system.loggerd.xattr_cache import getxattr
|
||||
from openpilot.system.loggerd.deleter import PRESERVE_ATTR_NAME, PRESERVE_ATTR_VALUE
|
||||
from openpilot.system.manager.process_config import managed_processes
|
||||
from openpilot.system.version import get_version
|
||||
from openpilot.tools.lib.helpers import RE
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from msgq.visionipc import VisionIpcServer, VisionStreamType
|
||||
from openpilot.common.transformations.camera import DEVICE_CAMERAS
|
||||
|
||||
SentinelType = log.Sentinel.SentinelType
|
||||
|
||||
CEREAL_SERVICES = [f for f in log.Event.schema.union_fields if f in SERVICE_LIST
|
||||
and SERVICE_LIST[f].should_log and "encode" not in f.lower()]
|
||||
|
||||
|
||||
class TestLoggerd:
|
||||
def _get_latest_log_dir(self):
|
||||
log_dirs = sorted(Path(Paths.log_root()).iterdir(), key=lambda f: f.stat().st_mtime)
|
||||
return log_dirs[-1]
|
||||
|
||||
def _get_log_dir(self, x):
|
||||
for l in x.splitlines():
|
||||
for p in l.split(' '):
|
||||
path = Path(p.strip())
|
||||
if path.is_dir():
|
||||
return path
|
||||
return None
|
||||
|
||||
def _get_log_fn(self, x):
|
||||
for l in x.splitlines():
|
||||
for p in l.split(' '):
|
||||
path = Path(p.strip())
|
||||
if path.is_file():
|
||||
return path
|
||||
return None
|
||||
|
||||
def _gen_bootlog(self):
|
||||
with Timeout(5):
|
||||
out = subprocess.check_output("./bootlog", cwd=os.path.join(BASEDIR, "system/loggerd"), encoding='utf-8')
|
||||
|
||||
log_fn = self._get_log_fn(out)
|
||||
|
||||
# check existence
|
||||
assert log_fn is not None
|
||||
|
||||
return log_fn
|
||||
|
||||
def _check_init_data(self, msgs):
|
||||
msg = msgs[0]
|
||||
assert msg.which() == 'initData'
|
||||
|
||||
def _check_sentinel(self, msgs, route):
|
||||
start_type = SentinelType.startOfRoute if route else SentinelType.startOfSegment
|
||||
assert msgs[1].sentinel.type == start_type
|
||||
|
||||
end_type = SentinelType.endOfRoute if route else SentinelType.endOfSegment
|
||||
assert msgs[-1].sentinel.type == end_type
|
||||
|
||||
def _publish_random_messages(self, services: list[str]) -> dict[str, list]:
|
||||
pm = messaging.PubMaster(services)
|
||||
|
||||
managed_processes["loggerd"].start()
|
||||
for s in services:
|
||||
assert pm.wait_for_readers_to_update(s, timeout=5)
|
||||
|
||||
sent_msgs = defaultdict(list)
|
||||
for _ in range(random.randint(2, 10) * 100):
|
||||
for s in services:
|
||||
try:
|
||||
m = messaging.new_message(s)
|
||||
except Exception:
|
||||
m = messaging.new_message(s, random.randint(2, 10))
|
||||
pm.send(s, m)
|
||||
sent_msgs[s].append(m)
|
||||
|
||||
for s in services:
|
||||
assert pm.wait_for_readers_to_update(s, timeout=5)
|
||||
managed_processes["loggerd"].stop()
|
||||
|
||||
return sent_msgs
|
||||
|
||||
def test_init_data_values(self):
|
||||
os.environ["CLEAN"] = random.choice(["0", "1"])
|
||||
|
||||
dongle = ''.join(random.choice(string.printable) for n in range(random.randint(1, 100)))
|
||||
fake_params = [
|
||||
# param, initData field, value
|
||||
("DongleId", "dongleId", dongle),
|
||||
("GitCommit", "gitCommit", "commit"),
|
||||
("GitCommitDate", "gitCommitDate", "date"),
|
||||
("GitBranch", "gitBranch", "branch"),
|
||||
("GitRemote", "gitRemote", "remote"),
|
||||
]
|
||||
params = Params()
|
||||
for k, _, v in fake_params:
|
||||
params.put(k, v)
|
||||
params.put("AccessToken", "abc")
|
||||
|
||||
lr = list(LogReader(str(self._gen_bootlog())))
|
||||
initData = lr[0].initData
|
||||
|
||||
assert initData.dirty != bool(os.environ["CLEAN"])
|
||||
assert initData.version == get_version()
|
||||
|
||||
if os.path.isfile("/proc/cmdline"):
|
||||
with open("/proc/cmdline") as f:
|
||||
assert list(initData.kernelArgs) == f.read().strip().split(" ")
|
||||
|
||||
with open("/proc/version") as f:
|
||||
assert initData.kernelVersion == f.read()
|
||||
|
||||
# check params
|
||||
logged_params = {entry.key: entry.value for entry in initData.params.entries}
|
||||
expected_params = {k for k, _, __ in fake_params} | {'AccessToken', 'BootCount'}
|
||||
assert set(logged_params.keys()) == expected_params, set(logged_params.keys()) ^ expected_params
|
||||
assert logged_params['AccessToken'] == b'', f"DONT_LOG param value was logged: {repr(logged_params['AccessToken'])}"
|
||||
for param_key, initData_key, v in fake_params:
|
||||
assert getattr(initData, initData_key) == v
|
||||
assert logged_params[param_key].decode() == v
|
||||
|
||||
@pytest.mark.skip("FIXME: encoderd sometimes crashes in CI when running with pytest-xdist")
|
||||
def test_rotation(self):
|
||||
os.environ["LOGGERD_TEST"] = "1"
|
||||
Params().put("RecordFront", "1")
|
||||
|
||||
d = DEVICE_CAMERAS[("tici", "ar0231")]
|
||||
expected_files = {"rlog.zst", "qlog.zst", "qcamera.ts", "fcamera.hevc", "dcamera.hevc", "ecamera.hevc"}
|
||||
streams = [(VisionStreamType.VISION_STREAM_ROAD, (d.fcam.width, d.fcam.height, 2048*2346, 2048, 2048*1216), "roadCameraState"),
|
||||
(VisionStreamType.VISION_STREAM_DRIVER, (d.dcam.width, d.dcam.height, 2048*2346, 2048, 2048*1216), "driverCameraState"),
|
||||
(VisionStreamType.VISION_STREAM_WIDE_ROAD, (d.ecam.width, d.ecam.height, 2048*2346, 2048, 2048*1216), "wideRoadCameraState")]
|
||||
|
||||
pm = messaging.PubMaster(["roadCameraState", "driverCameraState", "wideRoadCameraState"])
|
||||
vipc_server = VisionIpcServer("camerad")
|
||||
for stream_type, frame_spec, _ in streams:
|
||||
vipc_server.create_buffers_with_sizes(stream_type, 40, *(frame_spec))
|
||||
vipc_server.start_listener()
|
||||
|
||||
num_segs = random.randint(2, 5)
|
||||
length = random.randint(1, 3)
|
||||
os.environ["LOGGERD_SEGMENT_LENGTH"] = str(length)
|
||||
managed_processes["loggerd"].start()
|
||||
managed_processes["encoderd"].start()
|
||||
assert pm.wait_for_readers_to_update("roadCameraState", timeout=5)
|
||||
|
||||
fps = 20.0
|
||||
for n in range(1, int(num_segs*length*fps)+1):
|
||||
for stream_type, frame_spec, state in streams:
|
||||
dat = np.empty(frame_spec[2], dtype=np.uint8)
|
||||
vipc_server.send(stream_type, dat[:].flatten().tobytes(), n, n/fps, n/fps)
|
||||
|
||||
camera_state = messaging.new_message(state)
|
||||
frame = getattr(camera_state, state)
|
||||
frame.frameId = n
|
||||
pm.send(state, camera_state)
|
||||
|
||||
for _, _, state in streams:
|
||||
assert pm.wait_for_readers_to_update(state, timeout=5, dt=0.001)
|
||||
|
||||
managed_processes["loggerd"].stop()
|
||||
managed_processes["encoderd"].stop()
|
||||
|
||||
route_path = str(self._get_latest_log_dir()).rsplit("--", 1)[0]
|
||||
for n in range(num_segs):
|
||||
p = Path(f"{route_path}--{n}")
|
||||
logged = {f.name for f in p.iterdir() if f.is_file()}
|
||||
diff = logged ^ expected_files
|
||||
assert len(diff) == 0, f"didn't get all expected files. run={_} seg={n} {route_path=}, {diff=}\n{logged=} {expected_files=}"
|
||||
|
||||
def test_bootlog(self):
|
||||
# generate bootlog with fake launch log
|
||||
launch_log = ''.join(str(random.choice(string.printable)) for _ in range(100))
|
||||
with open("/tmp/launch_log", "w") as f:
|
||||
f.write(launch_log)
|
||||
|
||||
bootlog_path = self._gen_bootlog()
|
||||
lr = list(LogReader(str(bootlog_path)))
|
||||
|
||||
# check length
|
||||
assert len(lr) == 2 # boot + initData
|
||||
|
||||
self._check_init_data(lr)
|
||||
|
||||
# check msgs
|
||||
bootlog_msgs = [m for m in lr if m.which() == 'boot']
|
||||
assert len(bootlog_msgs) == 1
|
||||
|
||||
# sanity check values
|
||||
boot = bootlog_msgs.pop().boot
|
||||
assert abs(boot.wallTimeNanos - time.time_ns()) < 5*1e9 # within 5s
|
||||
assert boot.launchLog == launch_log
|
||||
|
||||
for fn in ["console-ramoops", "pmsg-ramoops-0"]:
|
||||
path = Path(os.path.join("/sys/fs/pstore/", fn))
|
||||
if path.is_file():
|
||||
with open(path, "rb") as f:
|
||||
expected_val = f.read()
|
||||
bootlog_val = [e.value for e in boot.pstore.entries if e.key == fn][0]
|
||||
assert expected_val == bootlog_val
|
||||
|
||||
# next one should increment by one
|
||||
bl1 = re.match(RE.LOG_ID_V2, bootlog_path.name)
|
||||
bl2 = re.match(RE.LOG_ID_V2, self._gen_bootlog().name)
|
||||
assert bl1.group('uid') != bl2.group('uid')
|
||||
assert int(bl1.group('count')) == 0 and int(bl2.group('count')) == 1
|
||||
|
||||
def test_qlog(self):
|
||||
qlog_services = [s for s in CEREAL_SERVICES if SERVICE_LIST[s].decimation is not None]
|
||||
no_qlog_services = [s for s in CEREAL_SERVICES if SERVICE_LIST[s].decimation is None]
|
||||
|
||||
services = random.sample(qlog_services, random.randint(2, min(10, len(qlog_services)))) + \
|
||||
random.sample(no_qlog_services, random.randint(2, min(10, len(no_qlog_services))))
|
||||
sent_msgs = self._publish_random_messages(services)
|
||||
|
||||
qlog_path = os.path.join(self._get_latest_log_dir(), "qlog.zst")
|
||||
lr = list(LogReader(qlog_path))
|
||||
|
||||
# check initData and sentinel
|
||||
self._check_init_data(lr)
|
||||
self._check_sentinel(lr, True)
|
||||
|
||||
recv_msgs = defaultdict(list)
|
||||
for m in lr:
|
||||
recv_msgs[m.which()].append(m)
|
||||
|
||||
for s, msgs in sent_msgs.items():
|
||||
recv_cnt = len(recv_msgs[s])
|
||||
|
||||
if s in no_qlog_services:
|
||||
# check services with no specific decimation aren't in qlog
|
||||
assert recv_cnt == 0, f"got {recv_cnt} {s} msgs in qlog"
|
||||
else:
|
||||
# check logged message count matches decimation
|
||||
expected_cnt = (len(msgs) - 1) // SERVICE_LIST[s].decimation + 1
|
||||
assert recv_cnt == expected_cnt, f"expected {expected_cnt} msgs for {s}, got {recv_cnt}"
|
||||
|
||||
def test_rlog(self):
|
||||
services = random.sample(CEREAL_SERVICES, random.randint(5, 10))
|
||||
sent_msgs = self._publish_random_messages(services)
|
||||
|
||||
lr = list(LogReader(os.path.join(self._get_latest_log_dir(), "rlog.zst")))
|
||||
|
||||
# check initData and sentinel
|
||||
self._check_init_data(lr)
|
||||
self._check_sentinel(lr, True)
|
||||
|
||||
# check all messages were logged and in order
|
||||
lr = lr[2:-1] # slice off initData and both sentinels
|
||||
for m in lr:
|
||||
sent = sent_msgs[m.which()].pop(0)
|
||||
sent.clear_write_flag()
|
||||
assert sent.to_bytes() == m.as_builder().to_bytes()
|
||||
|
||||
def test_preserving_flagged_segments(self):
|
||||
services = set(random.sample(CEREAL_SERVICES, random.randint(5, 10))) | {"userFlag"}
|
||||
self._publish_random_messages(services)
|
||||
|
||||
segment_dir = self._get_latest_log_dir()
|
||||
assert getxattr(segment_dir, PRESERVE_ATTR_NAME) == PRESERVE_ATTR_VALUE
|
||||
|
||||
def test_not_preserving_unflagged_segments(self):
|
||||
services = set(random.sample(CEREAL_SERVICES, random.randint(5, 10))) - {"userFlag"}
|
||||
self._publish_random_messages(services)
|
||||
|
||||
segment_dir = self._get_latest_log_dir()
|
||||
assert getxattr(segment_dir, PRESERVE_ATTR_NAME) is None
|
||||
184
system/loggerd/tests/test_uploader.py
Normal file
184
system/loggerd/tests/test_uploader.py
Normal file
@@ -0,0 +1,184 @@
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
from openpilot.system.loggerd.uploader import main, UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE
|
||||
|
||||
from openpilot.system.loggerd.tests.loggerd_tests_common import UploaderTestCase
|
||||
|
||||
|
||||
class FakeLogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
logging.Handler.__init__(self)
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.upload_order = list()
|
||||
self.upload_ignored = list()
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
j = json.loads(record.getMessage())
|
||||
if j["event"] == "upload_success":
|
||||
self.upload_order.append(j["key"])
|
||||
if j["event"] == "upload_ignored":
|
||||
self.upload_ignored.append(j["key"])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_handler = FakeLogHandler()
|
||||
cloudlog.addHandler(log_handler)
|
||||
|
||||
|
||||
class TestUploader(UploaderTestCase):
|
||||
def setup_method(self):
|
||||
super().setup_method()
|
||||
log_handler.reset()
|
||||
|
||||
def start_thread(self):
|
||||
self.end_event = threading.Event()
|
||||
self.up_thread = threading.Thread(target=main, args=[self.end_event])
|
||||
self.up_thread.daemon = True
|
||||
self.up_thread.start()
|
||||
|
||||
def join_thread(self):
|
||||
self.end_event.set()
|
||||
self.up_thread.join()
|
||||
|
||||
def gen_files(self, lock=False, xattr: bytes = None, boot=True) -> list[Path]:
|
||||
f_paths = []
|
||||
for t in ["qlog", "rlog", "dcamera.hevc", "fcamera.hevc"]:
|
||||
f_paths.append(self.make_file_with_data(self.seg_dir, t, 1, lock=lock, upload_xattr=xattr))
|
||||
|
||||
if boot:
|
||||
f_paths.append(self.make_file_with_data("boot", f"{self.seg_dir}", 1, lock=lock, upload_xattr=xattr))
|
||||
return f_paths
|
||||
|
||||
def gen_order(self, seg1: list[int], seg2: list[int], boot=True) -> list[str]:
|
||||
keys = []
|
||||
if boot:
|
||||
keys += [f"boot/{self.seg_format.format(i)}.zst" for i in seg1]
|
||||
keys += [f"boot/{self.seg_format2.format(i)}.zst" for i in seg2]
|
||||
keys += [f"{self.seg_format.format(i)}/qlog.zst" for i in seg1]
|
||||
keys += [f"{self.seg_format2.format(i)}/qlog.zst" for i in seg2]
|
||||
return keys
|
||||
|
||||
def test_upload(self):
|
||||
self.gen_files(lock=False)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
exp_order = self.gen_order([self.seg_num], [])
|
||||
|
||||
assert len(log_handler.upload_ignored) == 0, "Some files were ignored"
|
||||
assert not len(log_handler.upload_order) < len(exp_order), "Some files failed to upload"
|
||||
assert not len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice"
|
||||
for f_path in exp_order:
|
||||
assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not uploaded"
|
||||
|
||||
assert log_handler.upload_order == exp_order, "Files uploaded in wrong order"
|
||||
|
||||
def test_upload_with_wrong_xattr(self):
|
||||
self.gen_files(lock=False, xattr=b'0')
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
exp_order = self.gen_order([self.seg_num], [])
|
||||
|
||||
assert len(log_handler.upload_ignored) == 0, "Some files were ignored"
|
||||
assert not len(log_handler.upload_order) < len(exp_order), "Some files failed to upload"
|
||||
assert not len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice"
|
||||
for f_path in exp_order:
|
||||
assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not uploaded"
|
||||
|
||||
assert log_handler.upload_order == exp_order, "Files uploaded in wrong order"
|
||||
|
||||
def test_upload_ignored(self):
|
||||
self.set_ignore()
|
||||
self.gen_files(lock=False)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
exp_order = self.gen_order([self.seg_num], [])
|
||||
|
||||
assert len(log_handler.upload_order) == 0, "Some files were not ignored"
|
||||
assert not len(log_handler.upload_ignored) < len(exp_order), "Some files failed to ignore"
|
||||
assert not len(log_handler.upload_ignored) > len(exp_order), "Some files were ignored twice"
|
||||
for f_path in exp_order:
|
||||
assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not ignored"
|
||||
|
||||
assert log_handler.upload_ignored == exp_order, "Files ignored in wrong order"
|
||||
|
||||
def test_upload_files_in_create_order(self):
|
||||
seg1_nums = [0, 1, 2, 10, 20]
|
||||
for i in seg1_nums:
|
||||
self.seg_dir = self.seg_format.format(i)
|
||||
self.gen_files(boot=False)
|
||||
seg2_nums = [5, 50, 51]
|
||||
for i in seg2_nums:
|
||||
self.seg_dir = self.seg_format2.format(i)
|
||||
self.gen_files(boot=False)
|
||||
|
||||
exp_order = self.gen_order(seg1_nums, seg2_nums, boot=False)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
assert len(log_handler.upload_ignored) == 0, "Some files were ignored"
|
||||
assert not len(log_handler.upload_order) < len(exp_order), "Some files failed to upload"
|
||||
assert not len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice"
|
||||
for f_path in exp_order:
|
||||
assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not uploaded"
|
||||
|
||||
assert log_handler.upload_order == exp_order, "Files uploaded in wrong order"
|
||||
|
||||
def test_no_upload_with_lock_file(self):
|
||||
self.start_thread()
|
||||
|
||||
time.sleep(0.25)
|
||||
f_paths = self.gen_files(lock=True, boot=False)
|
||||
|
||||
# allow enough time that files should have been uploaded if they would be uploaded
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
for f_path in f_paths:
|
||||
fn = f_path.with_suffix(f_path.suffix.replace(".zst", ""))
|
||||
uploaded = UPLOAD_ATTR_NAME in os.listxattr(fn) and os.getxattr(fn, UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE
|
||||
assert not uploaded, "File upload when locked"
|
||||
|
||||
def test_no_upload_with_xattr(self):
|
||||
self.gen_files(lock=False, xattr=UPLOAD_ATTR_VALUE)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
assert len(log_handler.upload_order) == 0, "File uploaded again"
|
||||
|
||||
def test_clear_locks_on_startup(self):
|
||||
f_paths = self.gen_files(lock=True, boot=False)
|
||||
self.start_thread()
|
||||
time.sleep(0.25)
|
||||
self.join_thread()
|
||||
|
||||
for f_path in f_paths:
|
||||
lock_path = f_path.with_suffix(f_path.suffix + ".lock")
|
||||
assert not lock_path.is_file(), "File lock not cleared on startup"
|
||||
275
system/loggerd/uploader.py
Executable file
275
system/loggerd/uploader.py
Executable file
@@ -0,0 +1,275 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import requests
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
from collections.abc import Iterator
|
||||
|
||||
from cereal import log
|
||||
import cereal.messaging as messaging
|
||||
from openpilot.common.api import Api
|
||||
from openpilot.common.file_helpers import get_upload_stream
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.realtime import set_core_affinity
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
from openpilot.system.loggerd.xattr_cache import getxattr, setxattr
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
|
||||
NetworkType = log.DeviceState.NetworkType
|
||||
UPLOAD_ATTR_NAME = 'user.upload'
|
||||
UPLOAD_ATTR_VALUE = b'1'
|
||||
|
||||
MAX_UPLOAD_SIZES = {
|
||||
"qlog": 25*1e6, # can't be too restrictive here since we use qlogs to find
|
||||
# bugs, including ones that can cause massive log sizes
|
||||
"qcam": 5*1e6,
|
||||
}
|
||||
|
||||
allow_sleep = bool(os.getenv("UPLOADER_SLEEP", "1"))
|
||||
force_wifi = os.getenv("FORCEWIFI") is not None
|
||||
fake_upload = os.getenv("FAKEUPLOAD") is not None
|
||||
|
||||
|
||||
class FakeRequest:
|
||||
def __init__(self):
|
||||
self.headers = {"Content-Length": "0"}
|
||||
|
||||
|
||||
class FakeResponse:
|
||||
def __init__(self):
|
||||
self.status_code = 200
|
||||
self.request = FakeRequest()
|
||||
|
||||
|
||||
def get_directory_sort(d: str) -> list[str]:
|
||||
# ensure old format is sorted sooner
|
||||
o = ["0", ] if d.startswith("2024-") else ["1", ]
|
||||
return o + [s.rjust(10, '0') for s in d.rsplit('--', 1)]
|
||||
|
||||
def listdir_by_creation(d: str) -> list[str]:
|
||||
if not os.path.isdir(d):
|
||||
return []
|
||||
|
||||
try:
|
||||
paths = [f for f in os.listdir(d) if os.path.isdir(os.path.join(d, f))]
|
||||
paths = sorted(paths, key=get_directory_sort)
|
||||
return paths
|
||||
except OSError:
|
||||
cloudlog.exception("listdir_by_creation failed")
|
||||
return []
|
||||
|
||||
def clear_locks(root: str) -> None:
|
||||
for logdir in os.listdir(root):
|
||||
path = os.path.join(root, logdir)
|
||||
try:
|
||||
for fname in os.listdir(path):
|
||||
if fname.endswith(".lock"):
|
||||
os.unlink(os.path.join(path, fname))
|
||||
except OSError:
|
||||
cloudlog.exception("clear_locks failed")
|
||||
|
||||
|
||||
class Uploader:
|
||||
def __init__(self, dongle_id: str, root: str):
|
||||
self.dongle_id = dongle_id
|
||||
self.api = Api(dongle_id)
|
||||
self.root = root
|
||||
|
||||
self.params = Params()
|
||||
|
||||
# stats for last successfully uploaded file
|
||||
self.last_filename = ""
|
||||
|
||||
self.immediate_folders = ["crash/", "boot/"]
|
||||
self.immediate_priority = {"qlog": 0, "qlog.zst": 0, "qcamera.ts": 1}
|
||||
#if (self.params.get_int("EnableConnect") == 2):
|
||||
# self.immediate_priority.update({"rlog": 0, "rlog.zst": 0})
|
||||
|
||||
def list_upload_files(self, metered: bool) -> Iterator[tuple[str, str, str]]:
|
||||
r = self.params.get("AthenadRecentlyViewedRoutes", encoding="utf8")
|
||||
requested_routes = [] if r is None else [route for route in r.split(",") if route]
|
||||
|
||||
for logdir in listdir_by_creation(self.root):
|
||||
path = os.path.join(self.root, logdir)
|
||||
try:
|
||||
names = os.listdir(path)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
if any(name.endswith(".lock") for name in names):
|
||||
continue
|
||||
|
||||
for name in sorted(names, key=lambda n: self.immediate_priority.get(n, 1000)):
|
||||
key = os.path.join(logdir, name)
|
||||
fn = os.path.join(path, name)
|
||||
# skip files already uploaded
|
||||
try:
|
||||
ctime = os.path.getctime(fn)
|
||||
is_uploaded = getxattr(fn, UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE
|
||||
except OSError:
|
||||
cloudlog.event("uploader_getxattr_failed", key=key, fn=fn)
|
||||
# deleter could have deleted, so skip
|
||||
continue
|
||||
if is_uploaded:
|
||||
continue
|
||||
|
||||
# limit uploading on metered connections
|
||||
if metered:
|
||||
dt = datetime.timedelta(hours=12)
|
||||
if logdir in self.immediate_folders and (datetime.datetime.now() - datetime.datetime.fromtimestamp(ctime)) < dt:
|
||||
continue
|
||||
|
||||
if name == "qcamera.ts" and not any(logdir.startswith(r.split('|')[-1]) for r in requested_routes):
|
||||
continue
|
||||
|
||||
yield name, key, fn
|
||||
|
||||
def next_file_to_upload(self, metered: bool) -> tuple[str, str, str] | None:
|
||||
upload_files = list(self.list_upload_files(metered))
|
||||
|
||||
for name, key, fn in upload_files:
|
||||
if any(f in fn for f in self.immediate_folders):
|
||||
return name, key, fn
|
||||
|
||||
for name, key, fn in upload_files:
|
||||
if name in self.immediate_priority:
|
||||
return name, key, fn
|
||||
|
||||
return None
|
||||
|
||||
def do_upload(self, key: str, fn: str):
|
||||
url_resp = self.api.get("v1.4/" + self.dongle_id + "/upload_url/", timeout=10, path=key, access_token=self.api.get_token())
|
||||
if url_resp.status_code == 412:
|
||||
return url_resp
|
||||
|
||||
url_resp_json = json.loads(url_resp.text)
|
||||
url = url_resp_json['url']
|
||||
headers = url_resp_json['headers']
|
||||
cloudlog.debug("upload_url v1.4 %s %s", url, str(headers))
|
||||
|
||||
if fake_upload:
|
||||
return FakeResponse()
|
||||
|
||||
stream = None
|
||||
try:
|
||||
compress = key.endswith('.zst') and not fn.endswith('.zst')
|
||||
stream, _ = get_upload_stream(fn, compress)
|
||||
response = requests.put(url, data=stream, headers=headers, timeout=10)
|
||||
return response
|
||||
finally:
|
||||
if stream:
|
||||
stream.close()
|
||||
|
||||
def upload(self, name: str, key: str, fn: str, network_type: int, metered: bool) -> bool:
|
||||
try:
|
||||
sz = os.path.getsize(fn)
|
||||
except OSError:
|
||||
cloudlog.exception("upload: getsize failed")
|
||||
return False
|
||||
|
||||
cloudlog.event("upload_start", key=key, fn=fn, sz=sz, network_type=network_type, metered=metered)
|
||||
|
||||
if sz == 0:
|
||||
# tag files of 0 size as uploaded
|
||||
success = True
|
||||
elif name in MAX_UPLOAD_SIZES and sz > MAX_UPLOAD_SIZES[name]:
|
||||
cloudlog.event("uploader_too_large", key=key, fn=fn, sz=sz)
|
||||
success = True
|
||||
else:
|
||||
start_time = time.monotonic()
|
||||
|
||||
stat = None
|
||||
last_exc = None
|
||||
try:
|
||||
stat = self.do_upload(key, fn)
|
||||
except Exception as e:
|
||||
last_exc = (e, traceback.format_exc())
|
||||
|
||||
if stat is not None and stat.status_code in (200, 201, 401, 403, 412):
|
||||
self.last_filename = fn
|
||||
dt = time.monotonic() - start_time
|
||||
if stat.status_code == 412:
|
||||
cloudlog.event("upload_ignored", key=key, fn=fn, sz=sz, network_type=network_type, metered=metered)
|
||||
else:
|
||||
content_length = int(stat.request.headers.get("Content-Length", 0))
|
||||
speed = (content_length / 1e6) / dt
|
||||
cloudlog.event("upload_success", key=key, fn=fn, sz=sz, content_length=content_length,
|
||||
network_type=network_type, metered=metered, speed=speed)
|
||||
success = True
|
||||
else:
|
||||
success = False
|
||||
cloudlog.event("upload_failed", stat=stat, exc=last_exc, key=key, fn=fn, sz=sz, network_type=network_type, metered=metered)
|
||||
|
||||
if success:
|
||||
# tag file as uploaded
|
||||
try:
|
||||
setxattr(fn, UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE)
|
||||
except OSError:
|
||||
cloudlog.event("uploader_setxattr_failed", exc=last_exc, key=key, fn=fn, sz=sz)
|
||||
|
||||
return success
|
||||
|
||||
|
||||
def step(self, network_type: int, metered: bool) -> bool | None:
|
||||
d = self.next_file_to_upload(metered)
|
||||
if d is None:
|
||||
return None
|
||||
|
||||
name, key, fn = d
|
||||
|
||||
# qlogs and bootlogs need to be compressed before uploading
|
||||
if key.endswith(('qlog', 'rlog')) or (key.startswith('boot/') and not key.endswith('.zst')):
|
||||
key += ".zst"
|
||||
|
||||
return self.upload(name, key, fn, network_type, metered)
|
||||
|
||||
|
||||
def main(exit_event: threading.Event = None) -> None:
|
||||
if exit_event is None:
|
||||
exit_event = threading.Event()
|
||||
|
||||
try:
|
||||
set_core_affinity([0, 1, 2, 3])
|
||||
except Exception:
|
||||
cloudlog.exception("failed to set core affinity")
|
||||
|
||||
clear_locks(Paths.log_root())
|
||||
|
||||
params = Params()
|
||||
dongle_id = params.get("DongleId", encoding='utf8')
|
||||
|
||||
if dongle_id is None:
|
||||
cloudlog.info("uploader missing dongle_id")
|
||||
raise Exception("uploader can't start without dongle id")
|
||||
|
||||
sm = messaging.SubMaster(['deviceState'])
|
||||
uploader = Uploader(dongle_id, Paths.log_root())
|
||||
|
||||
backoff = 0.1
|
||||
while not exit_event.is_set():
|
||||
sm.update(0)
|
||||
offroad = params.get_bool("IsOffroad")
|
||||
network_type = sm['deviceState'].networkType if not force_wifi else NetworkType.wifi
|
||||
if network_type == NetworkType.none:
|
||||
if allow_sleep:
|
||||
time.sleep(60 if offroad else 5)
|
||||
continue
|
||||
|
||||
success = uploader.step(sm['deviceState'].networkType.raw, sm['deviceState'].networkMetered)
|
||||
if success is None:
|
||||
backoff = 60 if offroad else 5
|
||||
elif success:
|
||||
backoff = 0.1
|
||||
else:
|
||||
cloudlog.info("upload backoff %r", backoff)
|
||||
backoff = min(backoff*2, 120)
|
||||
if allow_sleep:
|
||||
time.sleep(backoff + random.uniform(0, backoff))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
41
system/loggerd/video_writer.h
Normal file
41
system/loggerd/video_writer.h
Normal file
@@ -0,0 +1,41 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <deque>
|
||||
|
||||
extern "C" {
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
}
|
||||
|
||||
#include "cereal/messaging/messaging.h"
|
||||
|
||||
class VideoWriter {
|
||||
public:
|
||||
VideoWriter(const char *path, const char *filename, bool remuxing, int width, int height, int fps, cereal::EncodeIndex::Type codec);
|
||||
void write(uint8_t *data, int len, long long timestamp, bool codecconfig, bool keyframe);
|
||||
void write_audio(uint8_t *data, int len, long long timestamp, int sample_rate);
|
||||
|
||||
~VideoWriter();
|
||||
|
||||
private:
|
||||
void initialize_audio(int sample_rate);
|
||||
void encode_and_write_audio_frame(AVFrame* frame);
|
||||
|
||||
std::string vid_path, lock_path;
|
||||
FILE *of = nullptr;
|
||||
|
||||
AVCodecContext *codec_ctx;
|
||||
AVFormatContext *ofmt_ctx;
|
||||
AVStream *out_stream;
|
||||
|
||||
bool audio_initialized = false;
|
||||
bool header_written = false;
|
||||
AVStream *audio_stream = nullptr;
|
||||
AVCodecContext *audio_codec_ctx = nullptr;
|
||||
AVFrame *audio_frame = nullptr;
|
||||
uint64_t audio_pts = 0;
|
||||
std::deque<float> audio_buffer;
|
||||
|
||||
bool remuxing;
|
||||
};
|
||||
23
system/loggerd/xattr_cache.py
Normal file
23
system/loggerd/xattr_cache.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import errno
|
||||
|
||||
import xattr
|
||||
|
||||
_cached_attributes: dict[tuple, bytes | None] = {}
|
||||
|
||||
def getxattr(path: str, attr_name: str) -> bytes | None:
|
||||
key = (path, attr_name)
|
||||
if key not in _cached_attributes:
|
||||
try:
|
||||
response = xattr.getxattr(path, attr_name)
|
||||
except OSError as e:
|
||||
# ENODATA (Linux) or ENOATTR (macOS) means attribute hasn't been set
|
||||
if e.errno == errno.ENODATA or (hasattr(errno, 'ENOATTR') and e.errno == errno.ENOATTR):
|
||||
response = None
|
||||
else:
|
||||
raise
|
||||
_cached_attributes[key] = response
|
||||
return _cached_attributes[key]
|
||||
|
||||
def setxattr(path: str, attr_name: str, attr_value: bytes) -> None:
|
||||
_cached_attributes.pop((path, attr_name), None)
|
||||
xattr.setxattr(path, attr_name, attr_value)
|
||||
24
system/loggerd/zstd_writer.h
Normal file
24
system/loggerd/zstd_writer.h
Normal file
@@ -0,0 +1,24 @@
|
||||
#pragma once
|
||||
|
||||
#include <zstd.h>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <capnp/common.h>
|
||||
|
||||
class ZstdFileWriter {
|
||||
public:
|
||||
ZstdFileWriter(const std::string &filename, int compression_level);
|
||||
~ZstdFileWriter();
|
||||
void write(void* data, size_t size);
|
||||
inline void write(kj::ArrayPtr<capnp::byte> array) { write(array.begin(), array.size()); }
|
||||
|
||||
private:
|
||||
void flushCache(bool last_chunk);
|
||||
|
||||
size_t input_cache_capacity_ = 0;
|
||||
std::vector<char> input_cache_;
|
||||
std::vector<char> output_buffer_;
|
||||
ZSTD_CStream *cstream_;
|
||||
FILE* file_ = nullptr;
|
||||
};
|
||||
Reference in New Issue
Block a user