Release 260111
This commit is contained in:
61
msgq/__init__.py
Normal file
61
msgq/__init__.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# must be built with scons
|
||||
from msgq.ipc_pyx import Context, Poller, SubSocket, PubSocket, SocketEventHandle, toggle_fake_events, \
|
||||
set_fake_prefix, get_fake_prefix, delete_fake_prefix, wait_for_one_event
|
||||
from msgq.ipc_pyx import MultiplePublishersError, IpcError
|
||||
|
||||
from typing import Optional, List
|
||||
|
||||
assert MultiplePublishersError
|
||||
assert IpcError
|
||||
assert toggle_fake_events
|
||||
assert set_fake_prefix
|
||||
assert get_fake_prefix
|
||||
assert delete_fake_prefix
|
||||
assert wait_for_one_event
|
||||
|
||||
NO_TRAVERSAL_LIMIT = 2**64-1
|
||||
|
||||
context = Context()
|
||||
|
||||
|
||||
def fake_event_handle(endpoint: str, identifier: Optional[str] = None, override: bool = True, enable: bool = False) -> SocketEventHandle:
|
||||
identifier = identifier or get_fake_prefix()
|
||||
handle = SocketEventHandle(endpoint, identifier, override)
|
||||
if override:
|
||||
handle.enabled = enable
|
||||
|
||||
return handle
|
||||
|
||||
def pub_sock(endpoint: str) -> PubSocket:
|
||||
sock = PubSocket()
|
||||
sock.connect(context, endpoint)
|
||||
return sock
|
||||
|
||||
|
||||
def sub_sock(endpoint: str, poller: Optional[Poller] = None, addr: str = "127.0.0.1",
|
||||
conflate: bool = False, timeout: Optional[int] = None) -> SubSocket:
|
||||
sock = SubSocket()
|
||||
sock.connect(context, endpoint, addr.encode('utf8'), conflate)
|
||||
|
||||
if timeout is not None:
|
||||
sock.setTimeout(timeout)
|
||||
|
||||
if poller is not None:
|
||||
poller.registerSocket(sock)
|
||||
return sock
|
||||
|
||||
def drain_sock_raw(sock: SubSocket, wait_for_one: bool = False) -> List[bytes]:
|
||||
"""Receive all message currently available on the queue"""
|
||||
ret: List[bytes] = []
|
||||
while 1:
|
||||
if wait_for_one and len(ret) == 0:
|
||||
dat = sock.receive()
|
||||
else:
|
||||
dat = sock.receive(non_blocking=True)
|
||||
|
||||
if dat is None:
|
||||
break
|
||||
|
||||
ret.append(dat)
|
||||
|
||||
return ret
|
||||
58
msgq/event.h
Normal file
58
msgq/event.h
Normal file
@@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#define CEREAL_EVENTS_PREFIX std::string("cereal_events")
|
||||
|
||||
void event_state_shm_mmap(std::string endpoint, std::string identifier, char **shm_mem, std::string *shm_path);
|
||||
|
||||
enum EventPurpose {
|
||||
RECV_CALLED,
|
||||
RECV_READY
|
||||
};
|
||||
|
||||
struct EventState {
|
||||
int fds[2];
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
class Event {
|
||||
private:
|
||||
int event_fd = -1;
|
||||
|
||||
inline void throw_if_invalid() const {
|
||||
if (!this->is_valid()) {
|
||||
throw std::runtime_error("Event does not have valid file descriptor.");
|
||||
}
|
||||
}
|
||||
public:
|
||||
Event(int fd = -1);
|
||||
|
||||
void set() const;
|
||||
int clear() const;
|
||||
void wait(int timeout_sec = -1) const;
|
||||
bool peek() const;
|
||||
bool is_valid() const;
|
||||
int fd() const;
|
||||
|
||||
static int wait_for_one(const std::vector<Event>& events, int timeout_sec = -1);
|
||||
};
|
||||
|
||||
class SocketEventHandle {
|
||||
private:
|
||||
std::string shm_path;
|
||||
EventState* state;
|
||||
public:
|
||||
SocketEventHandle(std::string endpoint, std::string identifier = "", bool override = true);
|
||||
~SocketEventHandle();
|
||||
|
||||
bool is_enabled();
|
||||
void set_enabled(bool enabled);
|
||||
Event recv_called();
|
||||
Event recv_ready();
|
||||
|
||||
static void toggle_fake_events(bool enabled);
|
||||
static void set_fake_prefix(std::string prefix);
|
||||
static std::string fake_prefix();
|
||||
};
|
||||
67
msgq/impl_fake.h
Normal file
67
msgq/impl_fake.h
Normal file
@@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <filesystem>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "msgq/ipc.h"
|
||||
#include "msgq/event.h"
|
||||
|
||||
template<typename TSubSocket>
|
||||
class FakeSubSocket: public TSubSocket {
|
||||
private:
|
||||
Event *recv_called = nullptr;
|
||||
Event *recv_ready = nullptr;
|
||||
EventState *state = nullptr;
|
||||
|
||||
public:
|
||||
FakeSubSocket(): TSubSocket() {}
|
||||
~FakeSubSocket() {
|
||||
delete recv_called;
|
||||
delete recv_ready;
|
||||
if (state != nullptr) {
|
||||
munmap(state, sizeof(EventState));
|
||||
}
|
||||
}
|
||||
|
||||
int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true) override {
|
||||
const char* cereal_prefix = std::getenv("CEREAL_FAKE_PREFIX");
|
||||
|
||||
char* mem;
|
||||
std::string identifier = cereal_prefix != nullptr ? std::string(cereal_prefix) : "";
|
||||
event_state_shm_mmap(endpoint, identifier, &mem, nullptr);
|
||||
|
||||
this->state = (EventState*)mem;
|
||||
this->recv_called = new Event(state->fds[EventPurpose::RECV_CALLED]);
|
||||
this->recv_ready = new Event(state->fds[EventPurpose::RECV_READY]);
|
||||
|
||||
return TSubSocket::connect(context, endpoint, address, conflate, check_endpoint);
|
||||
}
|
||||
|
||||
Message *receive(bool non_blocking=false) override {
|
||||
if (this->state->enabled) {
|
||||
this->recv_called->set();
|
||||
this->recv_ready->wait();
|
||||
this->recv_ready->clear();
|
||||
}
|
||||
|
||||
return TSubSocket::receive(non_blocking);
|
||||
}
|
||||
};
|
||||
|
||||
class FakePoller: public Poller {
|
||||
private:
|
||||
std::vector<SubSocket*> sockets;
|
||||
|
||||
public:
|
||||
void registerSocket(SubSocket *socket) override;
|
||||
std::vector<SubSocket*> poll(int timeout) override;
|
||||
~FakePoller() {}
|
||||
};
|
||||
67
msgq/impl_msgq.h
Normal file
67
msgq/impl_msgq.h
Normal file
@@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "msgq/ipc.h"
|
||||
#include "msgq/msgq.h"
|
||||
|
||||
#define MAX_POLLERS 128
|
||||
|
||||
class MSGQContext : public Context {
|
||||
private:
|
||||
void * context = NULL;
|
||||
public:
|
||||
MSGQContext();
|
||||
void * getRawContext() {return context;}
|
||||
~MSGQContext();
|
||||
};
|
||||
|
||||
class MSGQMessage : public Message {
|
||||
private:
|
||||
char * data;
|
||||
size_t size;
|
||||
public:
|
||||
void init(size_t size);
|
||||
void init(char *data, size_t size);
|
||||
void takeOwnership(char *data, size_t size);
|
||||
size_t getSize(){return size;}
|
||||
char * getData(){return data;}
|
||||
void close();
|
||||
~MSGQMessage();
|
||||
};
|
||||
|
||||
class MSGQSubSocket : public SubSocket {
|
||||
private:
|
||||
msgq_queue_t * q = NULL;
|
||||
int timeout;
|
||||
public:
|
||||
int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true);
|
||||
void setTimeout(int timeout);
|
||||
void * getRawSocket() {return (void*)q;}
|
||||
Message *receive(bool non_blocking=false);
|
||||
~MSGQSubSocket();
|
||||
};
|
||||
|
||||
class MSGQPubSocket : public PubSocket {
|
||||
private:
|
||||
msgq_queue_t * q = NULL;
|
||||
public:
|
||||
int connect(Context *context, std::string endpoint, bool check_endpoint=true);
|
||||
int sendMessage(Message *message);
|
||||
int send(char *data, size_t size);
|
||||
bool all_readers_updated();
|
||||
~MSGQPubSocket();
|
||||
};
|
||||
|
||||
class MSGQPoller : public Poller {
|
||||
private:
|
||||
std::vector<SubSocket*> sockets;
|
||||
msgq_pollitem_t polls[MAX_POLLERS];
|
||||
size_t num_polls = 0;
|
||||
|
||||
public:
|
||||
void registerSocket(SubSocket *socket);
|
||||
std::vector<SubSocket*> poll(int timeout);
|
||||
~MSGQPoller(){}
|
||||
};
|
||||
68
msgq/impl_zmq.h
Normal file
68
msgq/impl_zmq.h
Normal file
@@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include <zmq.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "msgq/ipc.h"
|
||||
|
||||
#define MAX_POLLERS 128
|
||||
|
||||
class ZMQContext : public Context {
|
||||
private:
|
||||
void * context = NULL;
|
||||
public:
|
||||
ZMQContext();
|
||||
void * getRawContext() {return context;}
|
||||
~ZMQContext();
|
||||
};
|
||||
|
||||
class ZMQMessage : public Message {
|
||||
private:
|
||||
char * data;
|
||||
size_t size;
|
||||
public:
|
||||
void init(size_t size);
|
||||
void init(char *data, size_t size);
|
||||
size_t getSize(){return size;}
|
||||
char * getData(){return data;}
|
||||
void close();
|
||||
~ZMQMessage();
|
||||
};
|
||||
|
||||
class ZMQSubSocket : public SubSocket {
|
||||
private:
|
||||
void * sock;
|
||||
std::string full_endpoint;
|
||||
public:
|
||||
int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true);
|
||||
void setTimeout(int timeout);
|
||||
void * getRawSocket() {return sock;}
|
||||
Message *receive(bool non_blocking=false);
|
||||
~ZMQSubSocket();
|
||||
};
|
||||
|
||||
class ZMQPubSocket : public PubSocket {
|
||||
private:
|
||||
void * sock;
|
||||
std::string full_endpoint;
|
||||
int pid = -1;
|
||||
public:
|
||||
int connect(Context *context, std::string endpoint, bool check_endpoint=true);
|
||||
int sendMessage(Message *message);
|
||||
int send(char *data, size_t size);
|
||||
bool all_readers_updated();
|
||||
~ZMQPubSocket();
|
||||
};
|
||||
|
||||
class ZMQPoller : public Poller {
|
||||
private:
|
||||
std::vector<SubSocket*> sockets;
|
||||
zmq_pollitem_t polls[MAX_POLLERS];
|
||||
size_t num_polls = 0;
|
||||
|
||||
public:
|
||||
void registerSocket(SubSocket *socket);
|
||||
std::vector<SubSocket*> poll(int timeout);
|
||||
~ZMQPoller(){}
|
||||
};
|
||||
68
msgq/ipc.h
Normal file
68
msgq/ipc.h
Normal file
@@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <time.h>
|
||||
|
||||
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define CLOCK_BOOTTIME CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
#define MSG_MULTIPLE_PUBLISHERS 100
|
||||
|
||||
bool messaging_use_zmq();
|
||||
|
||||
class Context {
|
||||
public:
|
||||
virtual void * getRawContext() = 0;
|
||||
static Context * create();
|
||||
virtual ~Context(){}
|
||||
};
|
||||
|
||||
class Message {
|
||||
public:
|
||||
virtual void init(size_t size) = 0;
|
||||
virtual void init(char * data, size_t size) = 0;
|
||||
virtual void close() = 0;
|
||||
virtual size_t getSize() = 0;
|
||||
virtual char * getData() = 0;
|
||||
virtual ~Message(){}
|
||||
};
|
||||
|
||||
|
||||
class SubSocket {
|
||||
public:
|
||||
virtual int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true) = 0;
|
||||
virtual void setTimeout(int timeout) = 0;
|
||||
virtual Message *receive(bool non_blocking=false) = 0;
|
||||
virtual void * getRawSocket() = 0;
|
||||
static SubSocket * create();
|
||||
static SubSocket * create(Context * context, std::string endpoint, std::string address="127.0.0.1", bool conflate=false, bool check_endpoint=true);
|
||||
virtual ~SubSocket(){}
|
||||
};
|
||||
|
||||
class PubSocket {
|
||||
public:
|
||||
virtual int connect(Context *context, std::string endpoint, bool check_endpoint=true) = 0;
|
||||
virtual int sendMessage(Message *message) = 0;
|
||||
virtual int send(char *data, size_t size) = 0;
|
||||
virtual bool all_readers_updated() = 0;
|
||||
static PubSocket * create();
|
||||
static PubSocket * create(Context * context, std::string endpoint, bool check_endpoint=true);
|
||||
static PubSocket * create(Context * context, std::string endpoint, int port, bool check_endpoint=true);
|
||||
virtual ~PubSocket(){}
|
||||
};
|
||||
|
||||
class Poller {
|
||||
public:
|
||||
virtual void registerSocket(SubSocket *socket) = 0;
|
||||
virtual std::vector<SubSocket*> poll(int timeout) = 0;
|
||||
static Poller * create();
|
||||
static Poller * create(std::vector<SubSocket*> sockets);
|
||||
virtual ~Poller(){}
|
||||
};
|
||||
68
msgq/ipc.pxd
Normal file
68
msgq/ipc.pxd
Normal file
@@ -0,0 +1,68 @@
|
||||
# distutils: language = c++
|
||||
#cython: language_level=3
|
||||
|
||||
from libcpp.string cimport string
|
||||
from libcpp.vector cimport vector
|
||||
from libcpp cimport bool
|
||||
|
||||
|
||||
cdef extern from "msgq/impl_fake.h":
|
||||
cdef cppclass Event:
|
||||
@staticmethod
|
||||
int wait_for_one(vector[Event], int) except +
|
||||
|
||||
Event()
|
||||
Event(int)
|
||||
void set()
|
||||
int clear()
|
||||
void wait(int) except +
|
||||
bool peek()
|
||||
int fd()
|
||||
|
||||
cdef cppclass SocketEventHandle:
|
||||
@staticmethod
|
||||
void toggle_fake_events(bool)
|
||||
@staticmethod
|
||||
void set_fake_prefix(string)
|
||||
@staticmethod
|
||||
string fake_prefix()
|
||||
|
||||
SocketEventHandle(string, string, bool)
|
||||
bool is_enabled()
|
||||
void set_enabled(bool)
|
||||
Event recv_called()
|
||||
Event recv_ready()
|
||||
|
||||
|
||||
cdef extern from "msgq/ipc.h":
|
||||
cdef cppclass Context:
|
||||
@staticmethod
|
||||
Context * create()
|
||||
|
||||
cdef cppclass Message:
|
||||
void init(size_t)
|
||||
void init(char *, size_t)
|
||||
void close()
|
||||
size_t getSize()
|
||||
char *getData()
|
||||
|
||||
cdef cppclass SubSocket:
|
||||
@staticmethod
|
||||
SubSocket * create() nogil
|
||||
int connect(Context *, string, string, bool) nogil
|
||||
Message * receive(bool) nogil
|
||||
void setTimeout(int) nogil
|
||||
|
||||
cdef cppclass PubSocket:
|
||||
@staticmethod
|
||||
PubSocket * create()
|
||||
int connect(Context *, string)
|
||||
int sendMessage(Message *)
|
||||
int send(char *, size_t)
|
||||
bool all_readers_updated()
|
||||
|
||||
cdef cppclass Poller:
|
||||
@staticmethod
|
||||
Poller * create()
|
||||
void registerSocket(SubSocket *)
|
||||
vector[SubSocket*] poll(int) nogil
|
||||
251
msgq/ipc_pyx.pyx
Normal file
251
msgq/ipc_pyx.pyx
Normal file
@@ -0,0 +1,251 @@
|
||||
# distutils: language = c++
|
||||
# cython: c_string_encoding=ascii, language_level=3
|
||||
|
||||
import sys
|
||||
from libcpp.string cimport string
|
||||
from libcpp.vector cimport vector
|
||||
from libcpp cimport bool
|
||||
from libc cimport errno
|
||||
from libc.string cimport strerror
|
||||
from cython.operator import dereference
|
||||
|
||||
|
||||
from .ipc cimport Context as cppContext
|
||||
from .ipc cimport SubSocket as cppSubSocket
|
||||
from .ipc cimport PubSocket as cppPubSocket
|
||||
from .ipc cimport Poller as cppPoller
|
||||
from .ipc cimport Message as cppMessage
|
||||
from .ipc cimport Event as cppEvent, SocketEventHandle as cppSocketEventHandle
|
||||
|
||||
|
||||
class IpcError(Exception):
|
||||
def __init__(self, endpoint=None):
|
||||
suffix = f"with {endpoint.decode('utf-8')}" if endpoint else ""
|
||||
message = f"Messaging failure {suffix}: {strerror(errno.errno).decode('utf-8')}"
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class MultiplePublishersError(IpcError):
|
||||
pass
|
||||
|
||||
|
||||
def toggle_fake_events(bool enabled):
|
||||
cppSocketEventHandle.toggle_fake_events(enabled)
|
||||
|
||||
|
||||
def set_fake_prefix(string prefix):
|
||||
cppSocketEventHandle.set_fake_prefix(prefix)
|
||||
|
||||
|
||||
def get_fake_prefix():
|
||||
return cppSocketEventHandle.fake_prefix()
|
||||
|
||||
|
||||
def delete_fake_prefix():
|
||||
cppSocketEventHandle.set_fake_prefix(b"")
|
||||
|
||||
|
||||
def wait_for_one_event(list events, int timeout=-1):
|
||||
cdef vector[cppEvent] items
|
||||
for event in events:
|
||||
items.push_back(dereference(<cppEvent*><size_t>event.ptr))
|
||||
return cppEvent.wait_for_one(items, timeout)
|
||||
|
||||
|
||||
cdef class Event:
|
||||
cdef cppEvent event;
|
||||
|
||||
def __cinit__(self):
|
||||
pass
|
||||
|
||||
cdef setEvent(self, cppEvent event):
|
||||
self.event = event
|
||||
|
||||
def set(self):
|
||||
self.event.set()
|
||||
|
||||
def clear(self):
|
||||
return self.event.clear()
|
||||
|
||||
def wait(self, int timeout=-1):
|
||||
self.event.wait(timeout)
|
||||
|
||||
def peek(self):
|
||||
return self.event.peek()
|
||||
|
||||
@property
|
||||
def fd(self):
|
||||
return self.event.fd()
|
||||
|
||||
@property
|
||||
def ptr(self):
|
||||
return <size_t><void*>&self.event
|
||||
|
||||
|
||||
cdef class SocketEventHandle:
|
||||
cdef cppSocketEventHandle * handle;
|
||||
|
||||
def __cinit__(self, string endpoint, string identifier, bool override):
|
||||
self.handle = new cppSocketEventHandle(endpoint, identifier, override)
|
||||
|
||||
def __dealloc__(self):
|
||||
del self.handle
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return self.handle.is_enabled()
|
||||
|
||||
@enabled.setter
|
||||
def enabled(self, bool value):
|
||||
self.handle.set_enabled(value)
|
||||
|
||||
@property
|
||||
def recv_called_event(self):
|
||||
e = Event()
|
||||
e.setEvent(self.handle.recv_called())
|
||||
|
||||
return e
|
||||
|
||||
@property
|
||||
def recv_ready_event(self):
|
||||
e = Event()
|
||||
e.setEvent(self.handle.recv_ready())
|
||||
|
||||
return e
|
||||
|
||||
|
||||
cdef class Context:
|
||||
cdef cppContext * context
|
||||
|
||||
def __cinit__(self):
|
||||
self.context = cppContext.create()
|
||||
|
||||
def term(self):
|
||||
del self.context
|
||||
self.context = NULL
|
||||
|
||||
def __dealloc__(self):
|
||||
pass
|
||||
# Deleting the context will hang if sockets are still active
|
||||
# TODO: Figure out a way to make sure the context is closed last
|
||||
# del self.context
|
||||
|
||||
|
||||
cdef class Poller:
|
||||
cdef cppPoller * poller
|
||||
cdef list sub_sockets
|
||||
|
||||
def __cinit__(self):
|
||||
self.sub_sockets = []
|
||||
self.poller = cppPoller.create()
|
||||
|
||||
def __dealloc__(self):
|
||||
del self.poller
|
||||
|
||||
def registerSocket(self, SubSocket socket):
|
||||
self.sub_sockets.append(socket)
|
||||
self.poller.registerSocket(socket.socket)
|
||||
|
||||
def poll(self, timeout):
|
||||
sockets = []
|
||||
cdef int t = timeout
|
||||
|
||||
with nogil:
|
||||
result = self.poller.poll(t)
|
||||
|
||||
for s in result:
|
||||
socket = SubSocket()
|
||||
socket.setPtr(s)
|
||||
sockets.append(socket)
|
||||
|
||||
return sockets
|
||||
|
||||
|
||||
cdef class SubSocket:
|
||||
cdef cppSubSocket * socket
|
||||
cdef bool is_owner
|
||||
|
||||
def __cinit__(self):
|
||||
with nogil:
|
||||
self.socket = cppSubSocket.create()
|
||||
|
||||
self.is_owner = True
|
||||
if self.socket == NULL:
|
||||
raise IpcError
|
||||
|
||||
def __dealloc__(self):
|
||||
if self.is_owner:
|
||||
with nogil:
|
||||
del self.socket
|
||||
|
||||
cdef setPtr(self, cppSubSocket * ptr):
|
||||
if self.is_owner:
|
||||
with nogil:
|
||||
del self.socket
|
||||
|
||||
self.is_owner = False
|
||||
self.socket = ptr
|
||||
|
||||
def connect(self, Context context, string endpoint, string address=b"127.0.0.1", bool conflate=False):
|
||||
cdef int r
|
||||
with nogil:
|
||||
r = self.socket.connect(context.context, endpoint, address, conflate)
|
||||
|
||||
if r != 0:
|
||||
if errno.errno == errno.EADDRINUSE:
|
||||
raise MultiplePublishersError(endpoint)
|
||||
else:
|
||||
raise IpcError(endpoint)
|
||||
|
||||
def setTimeout(self, int timeout):
|
||||
with nogil:
|
||||
self.socket.setTimeout(timeout)
|
||||
|
||||
def receive(self, bool non_blocking=False):
|
||||
cdef cppMessage *msg
|
||||
with nogil:
|
||||
msg = self.socket.receive(non_blocking)
|
||||
|
||||
if msg == NULL:
|
||||
return None
|
||||
else:
|
||||
sz = msg.getSize()
|
||||
m = msg.getData()[:sz]
|
||||
with nogil:
|
||||
del msg
|
||||
|
||||
return m
|
||||
|
||||
|
||||
cdef class PubSocket:
|
||||
cdef cppPubSocket * socket
|
||||
|
||||
def __cinit__(self):
|
||||
self.socket = cppPubSocket.create()
|
||||
if self.socket == NULL:
|
||||
raise IpcError
|
||||
|
||||
def __dealloc__(self):
|
||||
del self.socket
|
||||
|
||||
def connect(self, Context context, string endpoint):
|
||||
r = self.socket.connect(context.context, endpoint)
|
||||
|
||||
if r != 0:
|
||||
if errno.errno == errno.EADDRINUSE:
|
||||
raise MultiplePublishersError(endpoint)
|
||||
else:
|
||||
raise IpcError(endpoint)
|
||||
|
||||
def send(self, bytes data):
|
||||
length = len(data)
|
||||
r = self.socket.send(<char*>data, length)
|
||||
|
||||
if r != length:
|
||||
if errno.errno == errno.EADDRINUSE:
|
||||
raise MultiplePublishersError
|
||||
else:
|
||||
raise IpcError
|
||||
|
||||
def all_readers_updated(self):
|
||||
return self.socket.all_readers_updated()
|
||||
BIN
msgq/ipc_pyx.so
Executable file
BIN
msgq/ipc_pyx.so
Executable file
Binary file not shown.
21
msgq/logger/logger.h
Normal file
21
msgq/logger/logger.h
Normal file
@@ -0,0 +1,21 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef SWAGLOG
|
||||
// cppcheck-suppress preprocessorErrorDirective
|
||||
#include SWAGLOG
|
||||
#else
|
||||
|
||||
#define CLOUDLOG_DEBUG 10
|
||||
#define CLOUDLOG_INFO 20
|
||||
#define CLOUDLOG_WARNING 30
|
||||
#define CLOUDLOG_ERROR 40
|
||||
#define CLOUDLOG_CRITICAL 50
|
||||
|
||||
#define cloudlog(lvl, fmt, ...) printf(fmt "\n", ## __VA_ARGS__)
|
||||
|
||||
#define LOGD(fmt, ...) cloudlog(CLOUDLOG_DEBUG, fmt, ## __VA_ARGS__)
|
||||
#define LOG(fmt, ...) cloudlog(CLOUDLOG_INFO, fmt, ## __VA_ARGS__)
|
||||
#define LOGW(fmt, ...) cloudlog(CLOUDLOG_WARNING, fmt, ## __VA_ARGS__)
|
||||
#define LOGE(fmt, ...) cloudlog(CLOUDLOG_ERROR, fmt, ## __VA_ARGS__)
|
||||
|
||||
#endif
|
||||
70
msgq/msgq.h
Normal file
70
msgq/msgq.h
Normal file
@@ -0,0 +1,70 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <atomic>
|
||||
|
||||
#define DEFAULT_SEGMENT_SIZE (10 * 1024 * 1024)
|
||||
#define NUM_READERS 15
|
||||
#define ALIGN(n) ((n + (8 - 1)) & -8)
|
||||
|
||||
#define UNUSED(x) (void)x
|
||||
#define UNPACK64(higher, lower, input) do {uint64_t tmp = input; higher = tmp >> 32; lower = tmp & 0xFFFFFFFF;} while (0)
|
||||
#define PACK64(output, higher, lower) output = ((uint64_t)higher << 32) | ((uint64_t)lower & 0xFFFFFFFF)
|
||||
|
||||
struct msgq_header_t {
|
||||
uint64_t num_readers;
|
||||
uint64_t write_pointer;
|
||||
uint64_t write_uid;
|
||||
uint64_t read_pointers[NUM_READERS];
|
||||
uint64_t read_valids[NUM_READERS];
|
||||
uint64_t read_uids[NUM_READERS];
|
||||
};
|
||||
|
||||
struct msgq_queue_t {
|
||||
std::atomic<uint64_t> *num_readers;
|
||||
std::atomic<uint64_t> *write_pointer;
|
||||
std::atomic<uint64_t> *write_uid;
|
||||
std::atomic<uint64_t> *read_pointers[NUM_READERS];
|
||||
std::atomic<uint64_t> *read_valids[NUM_READERS];
|
||||
std::atomic<uint64_t> *read_uids[NUM_READERS];
|
||||
char * mmap_p;
|
||||
char * data;
|
||||
size_t size;
|
||||
int reader_id;
|
||||
uint64_t read_uid_local;
|
||||
uint64_t write_uid_local;
|
||||
|
||||
bool read_conflate;
|
||||
std::string endpoint;
|
||||
};
|
||||
|
||||
struct msgq_msg_t {
|
||||
size_t size;
|
||||
char * data;
|
||||
};
|
||||
|
||||
struct msgq_pollitem_t {
|
||||
msgq_queue_t *q;
|
||||
int revents;
|
||||
};
|
||||
|
||||
void msgq_wait_for_subscriber(msgq_queue_t *q);
|
||||
void msgq_reset_reader(msgq_queue_t *q);
|
||||
|
||||
int msgq_msg_init_size(msgq_msg_t *msg, size_t size);
|
||||
int msgq_msg_init_data(msgq_msg_t *msg, char * data, size_t size);
|
||||
int msgq_msg_close(msgq_msg_t *msg);
|
||||
|
||||
int msgq_new_queue(msgq_queue_t * q, const char * path, size_t size);
|
||||
void msgq_close_queue(msgq_queue_t *q);
|
||||
void msgq_init_publisher(msgq_queue_t * q);
|
||||
void msgq_init_subscriber(msgq_queue_t * q);
|
||||
|
||||
int msgq_msg_send(msgq_msg_t *msg, msgq_queue_t *q);
|
||||
int msgq_msg_recv(msgq_msg_t *msg, msgq_queue_t *q);
|
||||
int msgq_msg_ready(msgq_queue_t * q);
|
||||
int msgq_poll(msgq_pollitem_t * items, size_t nitems, int timeout);
|
||||
|
||||
bool msgq_all_readers_updated(msgq_queue_t *q);
|
||||
0
msgq/tests/__init__.py
Normal file
0
msgq/tests/__init__.py
Normal file
188
msgq/tests/test_fake.py
Normal file
188
msgq/tests/test_fake.py
Normal file
@@ -0,0 +1,188 @@
|
||||
import pytest
|
||||
import os
|
||||
import multiprocessing
|
||||
import platform
|
||||
import msgq
|
||||
from parameterized import parameterized_class
|
||||
from typing import Optional
|
||||
|
||||
WAIT_TIMEOUT = 5
|
||||
|
||||
|
||||
@pytest.mark.skipif(condition=platform.system() == "Darwin", reason="Events not supported on macOS")
|
||||
class TestEvents:
|
||||
|
||||
def test_mutation(self):
|
||||
handle = msgq.fake_event_handle("carState")
|
||||
event = handle.recv_called_event
|
||||
|
||||
assert not event.peek()
|
||||
event.set()
|
||||
assert event.peek()
|
||||
event.clear()
|
||||
assert not event.peek()
|
||||
|
||||
del event
|
||||
|
||||
def test_wait(self):
|
||||
handle = msgq.fake_event_handle("carState")
|
||||
event = handle.recv_called_event
|
||||
|
||||
event.set()
|
||||
try:
|
||||
event.wait(WAIT_TIMEOUT)
|
||||
assert event.peek()
|
||||
except RuntimeError:
|
||||
pytest.fail("event.wait() timed out")
|
||||
|
||||
def test_wait_multiprocess(self):
|
||||
handle = msgq.fake_event_handle("carState")
|
||||
event = handle.recv_called_event
|
||||
|
||||
def set_event_run():
|
||||
event.set()
|
||||
|
||||
try:
|
||||
p = multiprocessing.Process(target=set_event_run)
|
||||
p.start()
|
||||
event.wait(WAIT_TIMEOUT)
|
||||
assert event.peek()
|
||||
except RuntimeError:
|
||||
pytest.fail("event.wait() timed out")
|
||||
|
||||
p.kill()
|
||||
|
||||
def test_wait_zero_timeout(self):
|
||||
handle = msgq.fake_event_handle("carState")
|
||||
event = handle.recv_called_event
|
||||
|
||||
try:
|
||||
event.wait(0)
|
||||
pytest.fail("event.wait() did not time out")
|
||||
except RuntimeError:
|
||||
assert not event.peek()
|
||||
|
||||
|
||||
@pytest.mark.skipif(condition=platform.system() == "Darwin", reason="FakeSockets not supported on macOS")
|
||||
@pytest.mark.skipif(condition="ZMQ" in os.environ, reason="FakeSockets not supported on ZMQ")
|
||||
@parameterized_class([{"prefix": None}, {"prefix": "test"}])
|
||||
class TestFakeSockets:
|
||||
prefix: Optional[str] = None
|
||||
|
||||
def setup_method(self):
|
||||
msgq.toggle_fake_events(True)
|
||||
if self.prefix is not None:
|
||||
msgq.set_fake_prefix(self.prefix)
|
||||
else:
|
||||
msgq.delete_fake_prefix()
|
||||
|
||||
def teardown_method(self):
|
||||
msgq.toggle_fake_events(False)
|
||||
msgq.delete_fake_prefix()
|
||||
|
||||
def test_event_handle_init(self):
|
||||
handle = msgq.fake_event_handle("controlsState", override=True)
|
||||
|
||||
assert not handle.enabled
|
||||
assert handle.recv_called_event.fd >= 0
|
||||
assert handle.recv_ready_event.fd >= 0
|
||||
|
||||
def test_non_managed_socket_state(self):
|
||||
# non managed socket should have zero state
|
||||
_ = msgq.pub_sock("ubloxGnss")
|
||||
|
||||
handle = msgq.fake_event_handle("ubloxGnss", override=False)
|
||||
|
||||
assert not handle.enabled
|
||||
assert handle.recv_called_event.fd == 0
|
||||
assert handle.recv_ready_event.fd == 0
|
||||
|
||||
def test_managed_socket_state(self):
|
||||
# managed socket should not change anything about the state
|
||||
handle = msgq.fake_event_handle("ubloxGnss")
|
||||
handle.enabled = True
|
||||
|
||||
expected_enabled = handle.enabled
|
||||
expected_recv_called_fd = handle.recv_called_event.fd
|
||||
expected_recv_ready_fd = handle.recv_ready_event.fd
|
||||
|
||||
_ = msgq.pub_sock("ubloxGnss")
|
||||
|
||||
assert handle.enabled == expected_enabled
|
||||
assert handle.recv_called_event.fd == expected_recv_called_fd
|
||||
assert handle.recv_ready_event.fd == expected_recv_ready_fd
|
||||
|
||||
def test_sockets_enable_disable(self):
|
||||
carState_handle = msgq.fake_event_handle("ubloxGnss", enable=True)
|
||||
recv_called = carState_handle.recv_called_event
|
||||
recv_ready = carState_handle.recv_ready_event
|
||||
|
||||
pub_sock = msgq.pub_sock("ubloxGnss")
|
||||
sub_sock = msgq.sub_sock("ubloxGnss")
|
||||
|
||||
try:
|
||||
carState_handle.enabled = True
|
||||
recv_ready.set()
|
||||
pub_sock.send(b"test")
|
||||
_ = sub_sock.receive()
|
||||
assert recv_called.peek()
|
||||
recv_called.clear()
|
||||
|
||||
carState_handle.enabled = False
|
||||
recv_ready.set()
|
||||
pub_sock.send(b"test")
|
||||
_ = sub_sock.receive()
|
||||
assert not recv_called.peek()
|
||||
except RuntimeError:
|
||||
pytest.fail("event.wait() timed out")
|
||||
|
||||
def test_synced_pub_sub(self):
|
||||
def daemon_repub_process_run():
|
||||
pub_sock = msgq.pub_sock("ubloxGnss")
|
||||
sub_sock = msgq.sub_sock("carState")
|
||||
|
||||
frame = -1
|
||||
while True:
|
||||
frame += 1
|
||||
msg = sub_sock.receive(non_blocking=True)
|
||||
if msg is None:
|
||||
print("none received")
|
||||
continue
|
||||
|
||||
bts = frame.to_bytes(8, 'little')
|
||||
pub_sock.send(bts)
|
||||
|
||||
carState_handle = msgq.fake_event_handle("carState", enable=True)
|
||||
recv_called = carState_handle.recv_called_event
|
||||
recv_ready = carState_handle.recv_ready_event
|
||||
|
||||
p = multiprocessing.Process(target=daemon_repub_process_run)
|
||||
p.start()
|
||||
|
||||
pub_sock = msgq.pub_sock("carState")
|
||||
sub_sock = msgq.sub_sock("ubloxGnss")
|
||||
|
||||
try:
|
||||
for i in range(10):
|
||||
recv_called.wait(WAIT_TIMEOUT)
|
||||
recv_called.clear()
|
||||
|
||||
if i == 0:
|
||||
sub_sock.receive(non_blocking=True)
|
||||
|
||||
bts = i.to_bytes(8, 'little')
|
||||
pub_sock.send(bts)
|
||||
|
||||
recv_ready.set()
|
||||
recv_called.wait(WAIT_TIMEOUT)
|
||||
|
||||
msg = sub_sock.receive(non_blocking=True)
|
||||
assert msg is not None
|
||||
assert len(msg) == 8
|
||||
|
||||
frame = int.from_bytes(msg, 'little')
|
||||
assert frame == i
|
||||
except RuntimeError:
|
||||
pytest.fail("event.wait() timed out")
|
||||
finally:
|
||||
p.kill()
|
||||
71
msgq/tests/test_messaging.py
Normal file
71
msgq/tests/test_messaging.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
import string
|
||||
import msgq
|
||||
|
||||
|
||||
def random_sock():
|
||||
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
|
||||
|
||||
def random_bytes(length=1000):
|
||||
return bytes([random.randrange(0xFF) for _ in range(length)])
|
||||
|
||||
def zmq_sleep(t=1):
|
||||
if "ZMQ" in os.environ:
|
||||
time.sleep(t)
|
||||
|
||||
class TestPubSubSockets:
|
||||
|
||||
def setup_method(self):
|
||||
# ZMQ pub socket takes too long to die
|
||||
# sleep to prevent multiple publishers error between tests
|
||||
zmq_sleep()
|
||||
|
||||
def test_pub_sub(self):
|
||||
sock = random_sock()
|
||||
pub_sock = msgq.pub_sock(sock)
|
||||
sub_sock = msgq.sub_sock(sock, conflate=False, timeout=None)
|
||||
zmq_sleep(3)
|
||||
|
||||
for _ in range(1000):
|
||||
msg = random_bytes()
|
||||
pub_sock.send(msg)
|
||||
recvd = sub_sock.receive()
|
||||
assert msg == recvd
|
||||
|
||||
def test_conflate(self):
|
||||
sock = random_sock()
|
||||
pub_sock = msgq.pub_sock(sock)
|
||||
for conflate in [True, False]:
|
||||
for _ in range(10):
|
||||
num_msgs = random.randint(3, 10)
|
||||
sub_sock = msgq.sub_sock(sock, conflate=conflate, timeout=None)
|
||||
zmq_sleep()
|
||||
|
||||
sent_msgs = []
|
||||
for __ in range(num_msgs):
|
||||
msg = random_bytes()
|
||||
pub_sock.send(msg)
|
||||
sent_msgs.append(msg)
|
||||
time.sleep(0.1)
|
||||
recvd_msgs = msgq.drain_sock_raw(sub_sock)
|
||||
if conflate:
|
||||
assert len(recvd_msgs) == 1
|
||||
assert recvd_msgs[0] == sent_msgs[-1]
|
||||
else:
|
||||
assert len(recvd_msgs) == len(sent_msgs)
|
||||
for rec_msg, sent_msg in zip(recvd_msgs, sent_msgs):
|
||||
assert rec_msg == sent_msg
|
||||
|
||||
def test_receive_timeout(self):
|
||||
sock = random_sock()
|
||||
for _ in range(10):
|
||||
timeout = random.randrange(200)
|
||||
sub_sock = msgq.sub_sock(sock, timeout=timeout)
|
||||
zmq_sleep()
|
||||
|
||||
start_time = time.monotonic()
|
||||
recvd = sub_sock.receive()
|
||||
assert (time.monotonic() - start_time) < 0.2
|
||||
assert recvd is None
|
||||
138
msgq/tests/test_poller.py
Normal file
138
msgq/tests/test_poller.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import pytest
|
||||
import time
|
||||
import msgq
|
||||
import concurrent.futures
|
||||
|
||||
SERVICE_NAME = 'myService'
|
||||
|
||||
def poller():
|
||||
context = msgq.Context()
|
||||
|
||||
p = msgq.Poller()
|
||||
|
||||
sub = msgq.SubSocket()
|
||||
sub.connect(context, SERVICE_NAME)
|
||||
p.registerSocket(sub)
|
||||
|
||||
socks = p.poll(10000)
|
||||
r = [s.receive(non_blocking=True) for s in socks]
|
||||
|
||||
return r
|
||||
|
||||
|
||||
class TestPoller:
|
||||
def test_poll_once(self):
|
||||
context = msgq.Context()
|
||||
|
||||
pub = msgq.PubSocket()
|
||||
pub.connect(context, SERVICE_NAME)
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as e:
|
||||
poll = e.submit(poller)
|
||||
|
||||
time.sleep(0.1) # Slow joiner syndrome
|
||||
|
||||
# Send message
|
||||
pub.send(b"a")
|
||||
|
||||
# Wait for poll result
|
||||
result = poll.result()
|
||||
|
||||
del pub
|
||||
context.term()
|
||||
|
||||
assert result == [b"a"]
|
||||
|
||||
def test_poll_and_create_many_subscribers(self):
|
||||
context = msgq.Context()
|
||||
|
||||
pub = msgq.PubSocket()
|
||||
pub.connect(context, SERVICE_NAME)
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as e:
|
||||
poll = e.submit(poller)
|
||||
|
||||
time.sleep(0.1) # Slow joiner syndrome
|
||||
c = msgq.Context()
|
||||
for _ in range(10):
|
||||
msgq.SubSocket().connect(c, SERVICE_NAME)
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
# Send message
|
||||
pub.send(b"a")
|
||||
|
||||
# Wait for poll result
|
||||
result = poll.result()
|
||||
|
||||
del pub
|
||||
context.term()
|
||||
|
||||
assert result == [b"a"]
|
||||
|
||||
def test_multiple_publishers_exception(self):
|
||||
context = msgq.Context()
|
||||
|
||||
with pytest.raises(msgq.MultiplePublishersError):
|
||||
pub1 = msgq.PubSocket()
|
||||
pub1.connect(context, SERVICE_NAME)
|
||||
|
||||
pub2 = msgq.PubSocket()
|
||||
pub2.connect(context, SERVICE_NAME)
|
||||
|
||||
pub1.send(b"a")
|
||||
|
||||
del pub1
|
||||
del pub2
|
||||
context.term()
|
||||
|
||||
def test_multiple_messages(self):
|
||||
context = msgq.Context()
|
||||
|
||||
pub = msgq.PubSocket()
|
||||
pub.connect(context, SERVICE_NAME)
|
||||
|
||||
sub = msgq.SubSocket()
|
||||
sub.connect(context, SERVICE_NAME)
|
||||
|
||||
time.sleep(0.1) # Slow joiner
|
||||
|
||||
for i in range(1, 100):
|
||||
pub.send(b'a'*i)
|
||||
|
||||
msg_seen = False
|
||||
i = 1
|
||||
while True:
|
||||
r = sub.receive(non_blocking=True)
|
||||
|
||||
if r is not None:
|
||||
assert b'a'*i == r
|
||||
|
||||
msg_seen = True
|
||||
i += 1
|
||||
|
||||
if r is None and msg_seen: # ZMQ sometimes receives nothing on the first receive
|
||||
break
|
||||
|
||||
del pub
|
||||
del sub
|
||||
context.term()
|
||||
|
||||
def test_conflate(self):
|
||||
context = msgq.Context()
|
||||
|
||||
pub = msgq.PubSocket()
|
||||
pub.connect(context, SERVICE_NAME)
|
||||
|
||||
sub = msgq.SubSocket()
|
||||
sub.connect(context, SERVICE_NAME, conflate=True)
|
||||
|
||||
time.sleep(0.1) # Slow joiner
|
||||
pub.send(b'a')
|
||||
pub.send(b'b')
|
||||
|
||||
assert b'b' == sub.receive()
|
||||
|
||||
del pub
|
||||
del sub
|
||||
context.term()
|
||||
6
msgq/visionipc/__init__.py
Normal file
6
msgq/visionipc/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from msgq.visionipc.visionipc_pyx import VisionBuf, VisionIpcClient, VisionIpcServer, VisionStreamType, get_endpoint_name
|
||||
assert VisionBuf
|
||||
assert VisionIpcClient
|
||||
assert VisionIpcServer
|
||||
assert VisionStreamType
|
||||
assert get_endpoint_name
|
||||
0
msgq/visionipc/tests/__init__.py
Normal file
0
msgq/visionipc/tests/__init__.py
Normal file
99
msgq/visionipc/tests/test_visionipc.py
Normal file
99
msgq/visionipc/tests/test_visionipc.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import os
|
||||
import time
|
||||
import random
|
||||
import numpy as np
|
||||
from msgq.visionipc import VisionIpcServer, VisionIpcClient, VisionStreamType
|
||||
|
||||
def zmq_sleep(t=1):
|
||||
if "ZMQ" in os.environ:
|
||||
time.sleep(t)
|
||||
|
||||
|
||||
class TestVisionIpc:
|
||||
|
||||
def setup_vipc(self, name, *stream_types, num_buffers=1, width=100, height=100, conflate=False):
|
||||
self.server = VisionIpcServer(name)
|
||||
for stream_type in stream_types:
|
||||
self.server.create_buffers(stream_type, num_buffers, width, height)
|
||||
self.server.start_listener()
|
||||
|
||||
if len(stream_types):
|
||||
self.client = VisionIpcClient(name, stream_types[0], conflate)
|
||||
assert self.client.connect(True)
|
||||
else:
|
||||
self.client = None
|
||||
|
||||
zmq_sleep()
|
||||
return self.server, self.client
|
||||
|
||||
def test_connect(self):
|
||||
self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD)
|
||||
assert self.client.is_connected
|
||||
del self.client
|
||||
del self.server
|
||||
|
||||
def test_available_streams(self):
|
||||
for k in range(4):
|
||||
stream_types = set(random.choices([x.value for x in VisionStreamType], k=k))
|
||||
self.setup_vipc("camerad", *stream_types)
|
||||
available_streams = VisionIpcClient.available_streams("camerad", True)
|
||||
assert available_streams == stream_types
|
||||
del self.client
|
||||
del self.server
|
||||
|
||||
def test_buffers(self):
|
||||
width, height, num_buffers = 100, 200, 5
|
||||
self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD, num_buffers=num_buffers, width=width, height=height)
|
||||
assert self.client.width == width
|
||||
assert self.client.height == height
|
||||
assert self.client.buffer_len > 0
|
||||
assert self.client.num_buffers == num_buffers
|
||||
del self.client
|
||||
del self.server
|
||||
|
||||
def test_send_single_buffer(self):
|
||||
self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD)
|
||||
|
||||
buf = np.zeros(self.client.buffer_len, dtype=np.uint8)
|
||||
buf.view('<i4')[0] = 1234
|
||||
self.server.send(VisionStreamType.VISION_STREAM_ROAD, buf, frame_id=1337)
|
||||
|
||||
recv_buf = self.client.recv()
|
||||
assert recv_buf is not None
|
||||
assert recv_buf.data.view('<i4')[0] == 1234
|
||||
assert self.client.frame_id == 1337
|
||||
del self.client
|
||||
del self.server
|
||||
|
||||
def test_no_conflate(self):
|
||||
self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD)
|
||||
|
||||
buf = np.zeros(self.client.buffer_len, dtype=np.uint8)
|
||||
self.server.send(VisionStreamType.VISION_STREAM_ROAD, buf, frame_id=1)
|
||||
self.server.send(VisionStreamType.VISION_STREAM_ROAD, buf, frame_id=2)
|
||||
|
||||
recv_buf = self.client.recv()
|
||||
assert recv_buf is not None
|
||||
assert self.client.frame_id == 1
|
||||
|
||||
recv_buf = self.client.recv()
|
||||
assert recv_buf is not None
|
||||
assert self.client.frame_id == 2
|
||||
del self.client
|
||||
del self.server
|
||||
|
||||
def test_conflate(self):
|
||||
self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD, conflate=True)
|
||||
|
||||
buf = np.zeros(self.client.buffer_len, dtype=np.uint8)
|
||||
self.server.send(VisionStreamType.VISION_STREAM_ROAD, buf, frame_id=1)
|
||||
self.server.send(VisionStreamType.VISION_STREAM_ROAD, buf, frame_id=2)
|
||||
|
||||
recv_buf = self.client.recv()
|
||||
assert recv_buf is not None
|
||||
assert self.client.frame_id == 2
|
||||
|
||||
recv_buf = self.client.recv()
|
||||
assert recv_buf is None
|
||||
del self.client
|
||||
del self.server
|
||||
62
msgq/visionipc/visionbuf.h
Normal file
62
msgq/visionipc/visionbuf.h
Normal file
@@ -0,0 +1,62 @@
|
||||
#pragma once
|
||||
|
||||
#include "msgq/visionipc/visionipc.h"
|
||||
|
||||
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
|
||||
#ifdef __APPLE__
|
||||
#include <OpenCL/cl.h>
|
||||
#else
|
||||
#include <CL/cl.h>
|
||||
#endif
|
||||
|
||||
#define VISIONBUF_SYNC_FROM_DEVICE 0
|
||||
#define VISIONBUF_SYNC_TO_DEVICE 1
|
||||
|
||||
enum VisionStreamType {
|
||||
VISION_STREAM_ROAD,
|
||||
VISION_STREAM_DRIVER,
|
||||
VISION_STREAM_WIDE_ROAD,
|
||||
|
||||
VISION_STREAM_MAP,
|
||||
VISION_STREAM_MAX,
|
||||
};
|
||||
|
||||
class VisionBuf {
|
||||
public:
|
||||
size_t len = 0;
|
||||
size_t mmap_len = 0;
|
||||
void * addr = nullptr;
|
||||
uint64_t *frame_id;
|
||||
int fd = 0;
|
||||
|
||||
size_t width = 0;
|
||||
size_t height = 0;
|
||||
size_t stride = 0;
|
||||
size_t uv_offset = 0;
|
||||
|
||||
// YUV
|
||||
uint8_t * y = nullptr;
|
||||
uint8_t * uv = nullptr;
|
||||
|
||||
// Visionipc
|
||||
uint64_t server_id = 0;
|
||||
size_t idx = 0;
|
||||
VisionStreamType type;
|
||||
|
||||
// OpenCL
|
||||
cl_mem buf_cl = nullptr;
|
||||
cl_command_queue copy_q = nullptr;
|
||||
|
||||
// ion
|
||||
int handle = 0;
|
||||
|
||||
void allocate(size_t len);
|
||||
void import();
|
||||
void init_cl(cl_device_id device_id, cl_context ctx);
|
||||
void init_yuv(size_t width, size_t height, size_t stride, size_t uv_offset);
|
||||
int sync(int dir);
|
||||
int free();
|
||||
|
||||
void set_frame_id(uint64_t id);
|
||||
uint64_t get_frame_id();
|
||||
};
|
||||
25
msgq/visionipc/visionipc.h
Normal file
25
msgq/visionipc/visionipc.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
|
||||
|
||||
int ipc_connect(const char* socket_path);
|
||||
int ipc_bind(const char* socket_path);
|
||||
int ipc_sendrecv_with_fds(bool send, int fd, void *buf, size_t buf_size, int* fds, int num_fds,
|
||||
int *out_num_fds);
|
||||
|
||||
constexpr int VISIONIPC_MAX_FDS = 128;
|
||||
|
||||
struct VisionIpcBufExtra {
|
||||
uint32_t frame_id;
|
||||
uint64_t timestamp_sof;
|
||||
uint64_t timestamp_eof;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct VisionIpcPacket {
|
||||
uint64_t server_id;
|
||||
size_t idx;
|
||||
struct VisionIpcBufExtra extra;
|
||||
};
|
||||
59
msgq/visionipc/visionipc.pxd
Normal file
59
msgq/visionipc/visionipc.pxd
Normal file
@@ -0,0 +1,59 @@
|
||||
# distutils: language = c++
|
||||
#cython: language_level=3
|
||||
|
||||
from libcpp.string cimport string
|
||||
from libcpp.vector cimport vector
|
||||
from libcpp.set cimport set
|
||||
from libc.stdint cimport uint32_t, uint64_t
|
||||
from libcpp cimport bool, int
|
||||
|
||||
cdef extern from "msgq/visionipc/visionbuf.h":
|
||||
struct _cl_device_id
|
||||
struct _cl_context
|
||||
struct _cl_mem
|
||||
|
||||
ctypedef _cl_device_id * cl_device_id
|
||||
ctypedef _cl_context * cl_context
|
||||
ctypedef _cl_mem * cl_mem
|
||||
|
||||
cdef enum VisionStreamType:
|
||||
pass
|
||||
|
||||
cdef cppclass VisionBuf:
|
||||
void * addr
|
||||
size_t len
|
||||
size_t width
|
||||
size_t height
|
||||
size_t stride
|
||||
size_t uv_offset
|
||||
cl_mem buf_cl
|
||||
void set_frame_id(uint64_t id)
|
||||
|
||||
cdef extern from "msgq/visionipc/visionipc.h":
|
||||
struct VisionIpcBufExtra:
|
||||
uint32_t frame_id
|
||||
uint64_t timestamp_sof
|
||||
uint64_t timestamp_eof
|
||||
bool valid
|
||||
|
||||
cdef extern from "msgq/visionipc/visionipc_server.h":
|
||||
string get_endpoint_name(string, VisionStreamType)
|
||||
|
||||
cdef cppclass VisionIpcServer:
|
||||
VisionIpcServer(string, void*, void*)
|
||||
void create_buffers(VisionStreamType, size_t, size_t, size_t)
|
||||
void create_buffers_with_sizes(VisionStreamType, size_t, size_t, size_t, size_t, size_t, size_t)
|
||||
VisionBuf * get_buffer(VisionStreamType)
|
||||
void send(VisionBuf *, VisionIpcBufExtra *, bool)
|
||||
void start_listener()
|
||||
|
||||
cdef extern from "msgq/visionipc/visionipc_client.h":
|
||||
cdef cppclass VisionIpcClient:
|
||||
int num_buffers
|
||||
VisionBuf buffers[1]
|
||||
VisionIpcClient(string, VisionStreamType, bool, void*, void*)
|
||||
VisionBuf * recv(VisionIpcBufExtra *, int)
|
||||
bool connect(bool)
|
||||
bool is_connected()
|
||||
@staticmethod
|
||||
set[VisionStreamType] getAvailableStreams(string, bool)
|
||||
31
msgq/visionipc/visionipc_client.h
Normal file
31
msgq/visionipc/visionipc_client.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
||||
#include "msgq/ipc.h"
|
||||
#include "msgq/visionipc/visionbuf.h"
|
||||
|
||||
|
||||
class VisionIpcClient {
|
||||
private:
|
||||
std::string name;
|
||||
Context * msg_ctx;
|
||||
SubSocket * sock;
|
||||
Poller * poller;
|
||||
|
||||
cl_device_id device_id = nullptr;
|
||||
cl_context ctx = nullptr;
|
||||
|
||||
public:
|
||||
bool connected = false;
|
||||
VisionStreamType type;
|
||||
int num_buffers = 0;
|
||||
VisionBuf buffers[VISIONIPC_MAX_FDS];
|
||||
VisionIpcClient(std::string name, VisionStreamType type, bool conflate, cl_device_id device_id=nullptr, cl_context ctx=nullptr);
|
||||
~VisionIpcClient();
|
||||
VisionBuf * recv(VisionIpcBufExtra * extra=nullptr, const int timeout_ms=100);
|
||||
bool connect(bool blocking=true);
|
||||
bool is_connected() { return connected; }
|
||||
static std::set<VisionStreamType> getAvailableStreams(const std::string &name, bool blocking = true);
|
||||
};
|
||||
15
msgq/visionipc/visionipc_pyx.pxd
Normal file
15
msgq/visionipc/visionipc_pyx.pxd
Normal file
@@ -0,0 +1,15 @@
|
||||
# distutils: language = c++
|
||||
#cython: language_level=3
|
||||
|
||||
from .visionipc cimport VisionBuf as cppVisionBuf
|
||||
from .visionipc cimport cl_device_id, cl_context
|
||||
|
||||
cdef class CLContext:
|
||||
cdef cl_device_id device_id
|
||||
cdef cl_context context
|
||||
|
||||
cdef class VisionBuf:
|
||||
cdef cppVisionBuf * buf
|
||||
|
||||
@staticmethod
|
||||
cdef create(cppVisionBuf*)
|
||||
160
msgq/visionipc/visionipc_pyx.pyx
Normal file
160
msgq/visionipc/visionipc_pyx.pyx
Normal file
@@ -0,0 +1,160 @@
|
||||
# distutils: language = c++
|
||||
# cython: c_string_encoding=ascii, language_level=3
|
||||
|
||||
import sys
|
||||
import numpy as np
|
||||
cimport numpy as cnp
|
||||
from cython.view cimport array
|
||||
from libc.string cimport memcpy
|
||||
from libc.stdint cimport uint32_t, uint64_t
|
||||
from libcpp cimport bool
|
||||
from libcpp.string cimport string
|
||||
|
||||
from .visionipc cimport VisionIpcServer as cppVisionIpcServer
|
||||
from .visionipc cimport VisionIpcClient as cppVisionIpcClient
|
||||
from .visionipc cimport VisionBuf as cppVisionBuf
|
||||
from .visionipc cimport VisionIpcBufExtra
|
||||
from .visionipc cimport get_endpoint_name as cpp_get_endpoint_name
|
||||
|
||||
|
||||
def get_endpoint_name(string name, VisionStreamType stream):
|
||||
return cpp_get_endpoint_name(name, stream).decode('utf-8')
|
||||
|
||||
|
||||
cpdef enum VisionStreamType:
|
||||
VISION_STREAM_ROAD
|
||||
VISION_STREAM_DRIVER
|
||||
VISION_STREAM_WIDE_ROAD
|
||||
VISION_STREAM_MAP
|
||||
|
||||
|
||||
cdef class VisionBuf:
|
||||
@staticmethod
|
||||
cdef create(cppVisionBuf * cbuf):
|
||||
buf = VisionBuf()
|
||||
buf.buf = cbuf
|
||||
return buf
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
return np.asarray(<cnp.uint8_t[:self.buf.len]> self.buf.addr)
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return self.buf.width
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self.buf.height
|
||||
|
||||
@property
|
||||
def stride(self):
|
||||
return self.buf.stride
|
||||
|
||||
@property
|
||||
def uv_offset(self):
|
||||
return self.buf.uv_offset
|
||||
|
||||
|
||||
cdef class VisionIpcServer:
|
||||
cdef cppVisionIpcServer * server
|
||||
|
||||
def __init__(self, string name):
|
||||
self.server = new cppVisionIpcServer(name, NULL, NULL)
|
||||
|
||||
def create_buffers(self, VisionStreamType tp, size_t num_buffers, size_t width, size_t height):
|
||||
self.server.create_buffers(tp, num_buffers, width, height)
|
||||
|
||||
def create_buffers_with_sizes(self, VisionStreamType tp, size_t num_buffers, size_t width, size_t height, size_t size, size_t stride, size_t uv_offset):
|
||||
self.server.create_buffers_with_sizes(tp, num_buffers, width, height, size, stride, uv_offset)
|
||||
|
||||
def send(self, VisionStreamType tp, const unsigned char[:] data, uint32_t frame_id=0, uint64_t timestamp_sof=0, uint64_t timestamp_eof=0):
|
||||
cdef cppVisionBuf * buf = self.server.get_buffer(tp)
|
||||
|
||||
# Populate buffer
|
||||
assert buf.len == len(data)
|
||||
memcpy(buf.addr, &data[0], len(data))
|
||||
buf.set_frame_id(frame_id)
|
||||
|
||||
cdef VisionIpcBufExtra extra
|
||||
extra.frame_id = frame_id
|
||||
extra.timestamp_sof = timestamp_sof
|
||||
extra.timestamp_eof = timestamp_eof
|
||||
|
||||
self.server.send(buf, &extra, False)
|
||||
|
||||
def start_listener(self):
|
||||
self.server.start_listener()
|
||||
|
||||
def __dealloc__(self):
|
||||
del self.server
|
||||
|
||||
|
||||
cdef class VisionIpcClient:
|
||||
cdef cppVisionIpcClient * client
|
||||
cdef VisionIpcBufExtra extra
|
||||
|
||||
def __cinit__(self, string name, VisionStreamType stream, bool conflate, CLContext context = None):
|
||||
if context:
|
||||
self.client = new cppVisionIpcClient(name, stream, conflate, context.device_id, context.context)
|
||||
else:
|
||||
self.client = new cppVisionIpcClient(name, stream, conflate, NULL, NULL)
|
||||
|
||||
def __dealloc__(self):
|
||||
del self.client
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return self.client.buffers[0].width if self.client.num_buffers else None
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self.client.buffers[0].height if self.client.num_buffers else None
|
||||
|
||||
@property
|
||||
def stride(self):
|
||||
return self.client.buffers[0].stride if self.client.num_buffers else None
|
||||
|
||||
@property
|
||||
def uv_offset(self):
|
||||
return self.client.buffers[0].uv_offset if self.client.num_buffers else None
|
||||
|
||||
@property
|
||||
def buffer_len(self):
|
||||
return self.client.buffers[0].len if self.client.num_buffers else None
|
||||
|
||||
@property
|
||||
def num_buffers(self):
|
||||
return self.client.num_buffers
|
||||
|
||||
@property
|
||||
def frame_id(self):
|
||||
return self.extra.frame_id
|
||||
|
||||
@property
|
||||
def timestamp_sof(self):
|
||||
return self.extra.timestamp_sof
|
||||
|
||||
@property
|
||||
def timestamp_eof(self):
|
||||
return self.extra.timestamp_eof
|
||||
|
||||
@property
|
||||
def valid(self):
|
||||
return self.extra.valid
|
||||
|
||||
def recv(self, int timeout_ms=100):
|
||||
buf = self.client.recv(&self.extra, timeout_ms)
|
||||
if not buf:
|
||||
return None
|
||||
return VisionBuf.create(buf)
|
||||
|
||||
def connect(self, bool blocking):
|
||||
return self.client.connect(blocking)
|
||||
|
||||
def is_connected(self):
|
||||
return self.client.is_connected()
|
||||
|
||||
@staticmethod
|
||||
def available_streams(string name, bool block):
|
||||
return cppVisionIpcClient.getAvailableStreams(name, block)
|
||||
BIN
msgq/visionipc/visionipc_pyx.so
Executable file
BIN
msgq/visionipc/visionipc_pyx.so
Executable file
Binary file not shown.
42
msgq/visionipc/visionipc_server.h
Normal file
42
msgq/visionipc/visionipc_server.h
Normal file
@@ -0,0 +1,42 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <map>
|
||||
|
||||
#include "msgq/ipc.h"
|
||||
#include "msgq/visionipc/visionbuf.h"
|
||||
|
||||
std::string get_endpoint_name(std::string name, VisionStreamType type);
|
||||
std::string get_ipc_path(const std::string &name);
|
||||
|
||||
class VisionIpcServer {
|
||||
private:
|
||||
cl_device_id device_id = nullptr;
|
||||
cl_context ctx = nullptr;
|
||||
uint64_t server_id;
|
||||
|
||||
std::atomic<bool> should_exit = false;
|
||||
std::string name;
|
||||
std::thread listener_thread;
|
||||
|
||||
std::map<VisionStreamType, std::atomic<size_t> > cur_idx;
|
||||
std::map<VisionStreamType, std::vector<VisionBuf*> > buffers;
|
||||
|
||||
Context * msg_ctx;
|
||||
std::map<VisionStreamType, PubSocket*> sockets;
|
||||
|
||||
void listener(void);
|
||||
|
||||
public:
|
||||
VisionIpcServer(std::string name, cl_device_id device_id=nullptr, cl_context ctx=nullptr);
|
||||
~VisionIpcServer();
|
||||
|
||||
VisionBuf * get_buffer(VisionStreamType type, int idx = -1);
|
||||
|
||||
void create_buffers(VisionStreamType type, size_t num_buffers, size_t width, size_t height);
|
||||
void create_buffers_with_sizes(VisionStreamType type, size_t num_buffers, size_t width, size_t height, size_t size, size_t stride, size_t uv_offset);
|
||||
void send(VisionBuf * buf, VisionIpcBufExtra * extra, bool sync=true);
|
||||
void start_listener();
|
||||
};
|
||||
Reference in New Issue
Block a user