VisionIPC 2.0 (#19641)

* Squashed vipc

* Update release files

* Remove else

* add visionipc to release files

* use poller in vipc receive

* opencl framework instead of lib on macos

* Fix camera webcam

* Fix opencl on mac in ui

* more webcam fixes

* typo in ui sconsfile

* Use cur_yuv_buf

* visionbuf c++ class

* Camera qcom was still using visionbuf_allocate

* Turn loggerd back on

* fix snapshot

* No build needed

* update test camerad

* no more release callback

* make encoder c++

* Revert "no more release callback"

This reverts commit e5707b07002fee665d0483d90713154efc2d70d4.

* fix exit handlers

* No need to check errno

* move release callback call

* s/VIPCBufExtra/VisionIpcBufExtra/g

* use non blocking connect

* ui use non blocking connect

* Lower condition variable wait time

* Snapshot cleanup

* bump cereal

* bump cereal
pull/19698/head
Willem Melching 2021-01-08 14:54:25 +01:00 committed by GitHub
parent 206d072bb4
commit fb496c692a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 398 additions and 2180 deletions

2
Jenkinsfile vendored
View File

@ -139,7 +139,7 @@ pipeline {
["test boardd loopback", "nosetests -s selfdrive/boardd/tests/test_boardd_loopback.py"],
["test loggerd", "CI=1 python selfdrive/loggerd/tests/test_loggerd.py"],
["test encoder", "CI=1 python selfdrive/loggerd/tests/test_encoder.py"],
//["test camerad", "CI=1 python selfdrive/camerad/test/test_camerad.py"], // wait for shelf refactor
//["test camerad", "CI=1 SEND_REAR=1 SEND_FRONT=1 python selfdrive/camerad/test/test_camerad.py"], // wait for shelf refactor
//["test updater", "python installer/updater/test_updater.py"],
])
}

View File

@ -171,6 +171,7 @@ env = Environment(
"#selfdrive/ui",
"#cereal",
"#cereal/messaging",
"#cereal/visionipc",
"#opendbc/can",
],
@ -315,19 +316,20 @@ if SHARED:
else:
cereal = [File('#cereal/libcereal.a')]
messaging = [File('#cereal/libmessaging.a')]
visionipc = [File('#cereal/libvisionipc.a')]
Export('cereal', 'messaging')
SConscript(['selfdrive/common/SConscript'])
Import('_common', '_visionipc', '_gpucommon', '_gpu_libs')
Import('_common', '_gpucommon', '_gpu_libs')
if SHARED:
common, visionipc, gpucommon = abspath(common), abspath(visionipc), abspath(gpucommon)
common, gpucommon = abspath(common), abspath(gpucommon)
else:
common = [_common, 'json11']
visionipc = _visionipc
gpucommon = [_gpucommon] + _gpu_libs
Export('common', 'visionipc', 'gpucommon')
Export('common', 'gpucommon', 'visionipc')
# Build openpilot

2
cereal

@ -1 +1 @@
Subproject commit 2220a4f10019d0e192f9cdad3bb388790e59f25b
Subproject commit 266fb9b204cdf4edf5fb8ff4584a4b3eaa35cee2

View File

@ -184,18 +184,13 @@ selfdrive/debug/*.py
selfdrive/common/SConscript
selfdrive/common/version.h
selfdrive/common/buffering.[c,h]
selfdrive/common/framebuffer.h
selfdrive/common/framebuffer.cc
selfdrive/common/glutil.[c,h]
selfdrive/common/touch.[c,h]
selfdrive/common/visionipc.[c,h]
selfdrive/common/visionbuf_cl.c
selfdrive/common/ipc.[c,h]
selfdrive/common/swaglog.h
selfdrive/common/swaglog.cc
selfdrive/common/util.[c,h]
selfdrive/common/efd.[c,h]
selfdrive/common/cqueue.[c,h]
selfdrive/common/clutil.cc
selfdrive/common/clutil.h
@ -208,8 +203,6 @@ selfdrive/common/mat.h
selfdrive/common/timing.h
selfdrive/common/utilpp.h
selfdrive/common/visionbuf.h
selfdrive/common/visionbuf_ion.c
selfdrive/common/visionimg.cc
selfdrive/common/visionimg.h
@ -307,8 +300,8 @@ selfdrive/proclogd/SConscript
selfdrive/proclogd/proclogd.cc
selfdrive/loggerd/SConscript
selfdrive/loggerd/encoder.h
selfdrive/loggerd/encoder.cc
selfdrive/loggerd/encoder.h
selfdrive/loggerd/frame_logger.h
selfdrive/loggerd/logger.cc
selfdrive/loggerd/logger.h
@ -488,6 +481,8 @@ cereal/messaging/messaging_pyx.pyx
cereal/messaging/msgq.cc
cereal/messaging/msgq.hpp
cereal/messaging/socketmaster.cc
cereal/visionipc/*.cc
cereal/visionipc/*.h
panda/.gitignore
panda/__init__.py

View File

@ -28,9 +28,6 @@ else:
env = env.Clone()
env['FRAMEWORKS'] = ['OpenCL', 'OpenGL']
env.SharedLibrary('snapshot/visionipc',
["#selfdrive/common/visionipc.c", "#selfdrive/common/ipc.c"])
env.Program('camerad', [
'main.cc',
'cameras/camera_common.cc',

View File

@ -1,4 +1,5 @@
#include <thread>
#include <chrono>
#include <stdio.h>
#include <assert.h>
#include <unistd.h>
@ -47,17 +48,24 @@ static cl_program build_debayer_program(cl_device_id device_id, cl_context conte
#endif
}
void CameraBuf::init(cl_device_id device_id, cl_context context, CameraState *s, int frame_cnt,
const char *name, release_cb relase_callback) {
void CameraBuf::init(cl_device_id device_id, cl_context context, CameraState *s, VisionIpcServer * v, int frame_cnt, VisionStreamType rgb_type, VisionStreamType yuv_type, release_cb release_callback) {
vipc_server = v;
this->rgb_type = rgb_type;
this->yuv_type = yuv_type;
this->release_callback = release_callback;
const CameraInfo *ci = &s->ci;
camera_state = s;
frame_buf_count = frame_cnt;
frame_size = ci->frame_height * ci->frame_stride;
// RAW frame
frame_size = ci->frame_height * ci->frame_stride;
camera_bufs = std::make_unique<VisionBuf[]>(frame_buf_count);
camera_bufs_metadata = std::make_unique<FrameMetadata[]>(frame_buf_count);
for (int i = 0; i < frame_buf_count; i++) {
camera_bufs[i] = visionbuf_allocate_cl(frame_size, device_id, context);
camera_bufs[i].allocate(frame_size);
camera_bufs[i].init_cl(device_id, context);
}
rgb_width = ci->frame_width;
@ -79,29 +87,10 @@ void CameraBuf::init(cl_device_id device_id, cl_context context, CameraState *s,
}};
yuv_transform = ci->bayer ? transform_scale_buffer(transform, db_s) : transform;
for (int i = 0; i < UI_BUF_COUNT; i++) {
VisionImg img = visionimg_alloc_rgb24(device_id, context, rgb_width, rgb_height, &rgb_bufs[i]);
if (i == 0) {
rgb_stride = img.stride;
}
}
tbuffer_init(&ui_tb, UI_BUF_COUNT, name);
tbuffer_init2(&camera_tb, frame_buf_count, "frame", relase_callback, s);
vipc_server->create_buffers(rgb_type, UI_BUF_COUNT, true, rgb_width, rgb_height);
rgb_stride = vipc_server->get_buffer(rgb_type)->stride;
// yuv back for recording and orbd
pool_init(&yuv_pool, YUV_COUNT);
yuv_tb = pool_get_tbuffer(&yuv_pool);
yuv_width = rgb_width;
yuv_height = rgb_height;
yuv_buf_size = rgb_width * rgb_height * 3 / 2;
for (int i = 0; i < YUV_COUNT; i++) {
yuv_ion[i] = visionbuf_allocate_cl(yuv_buf_size, device_id, context);
yuv_bufs[i].y = (uint8_t *)yuv_ion[i].addr;
yuv_bufs[i].u = yuv_bufs[i].y + (yuv_width * yuv_height);
yuv_bufs[i].v = yuv_bufs[i].u + (yuv_width / 2 * yuv_height / 2);
}
vipc_server->create_buffers(yuv_type, YUV_COUNT, false, rgb_width, rgb_height);
if (ci->bayer) {
cl_program prg_debayer = build_debayer_program(device_id, context, ci, this);
@ -109,7 +98,7 @@ void CameraBuf::init(cl_device_id device_id, cl_context context, CameraState *s,
CL_CHECK(clReleaseProgram(prg_debayer));
}
rgb_to_yuv_init(&rgb_to_yuv_state, context, device_id, yuv_width, yuv_height, rgb_stride);
rgb_to_yuv_init(&rgb_to_yuv_state, context, device_id, rgb_width, rgb_height, rgb_stride);
#ifdef __APPLE__
q = CL_CHECK_ERR(clCreateCommandQueue(context, device_id, 0, &err));
@ -121,14 +110,9 @@ void CameraBuf::init(cl_device_id device_id, cl_context context, CameraState *s,
CameraBuf::~CameraBuf() {
for (int i = 0; i < frame_buf_count; i++) {
visionbuf_free(&camera_bufs[i]);
}
for (auto &buf : rgb_bufs) {
visionbuf_free(&buf);
}
for (auto &buf : yuv_ion) {
visionbuf_free(&buf);
camera_bufs[i].free();
}
rgb_to_yuv_destroy(&rgb_to_yuv_state);
if (krnl_debayer) {
@ -138,24 +122,28 @@ CameraBuf::~CameraBuf() {
}
bool CameraBuf::acquire() {
const int buf_idx = tbuffer_acquire(&camera_tb);
if (buf_idx < 0) {
return false;
}
const FrameMetadata &frame_data = camera_bufs_metadata[buf_idx];
std::unique_lock<std::mutex> lk(frame_queue_mutex);
bool got_frame = frame_queue_cv.wait_for(lk, std::chrono::milliseconds(1), [this]{ return !frame_queue.empty(); });
if (!got_frame) return false;
cur_buf_idx = frame_queue.front();
frame_queue.pop();
lk.unlock();
const FrameMetadata &frame_data = camera_bufs_metadata[cur_buf_idx];
if (frame_data.frame_id == -1) {
LOGE("no frame data? wtf");
tbuffer_release(&camera_tb, buf_idx);
return false;
}
cur_frame_data = frame_data;
cur_rgb_idx = tbuffer_select(&ui_tb);
cur_rgb_buf = &rgb_bufs[cur_rgb_idx];
cur_rgb_buf = vipc_server->get_buffer(rgb_type);
cur_rgb_idx = cur_rgb_buf->idx;
cl_event debayer_event;
cl_mem camrabuf_cl = camera_bufs[buf_idx].buf_cl;
cl_mem camrabuf_cl = camera_bufs[cur_buf_idx].buf_cl;
if (camera_state->ci.bayer) {
CL_CHECK(clSetKernelArg(krnl_debayer, 0, sizeof(cl_mem), &camrabuf_cl));
CL_CHECK(clSetKernelArg(krnl_debayer, 1, sizeof(cl_mem), &cur_rgb_buf->buf_cl));
@ -186,30 +174,37 @@ bool CameraBuf::acquire() {
clWaitForEvents(1, &debayer_event);
CL_CHECK(clReleaseEvent(debayer_event));
tbuffer_release(&camera_tb, buf_idx);
visionbuf_sync(cur_rgb_buf, VISIONBUF_SYNC_FROM_DEVICE);
cur_yuv_idx = pool_select(&yuv_pool);
cur_yuv_buf = vipc_server->get_buffer(yuv_type);
cur_yuv_idx = cur_yuv_buf->idx;
yuv_metas[cur_yuv_idx] = frame_data;
rgb_to_yuv_queue(&rgb_to_yuv_state, q, cur_rgb_buf->buf_cl, yuv_ion[cur_yuv_idx].buf_cl);
visionbuf_sync(&yuv_ion[cur_yuv_idx], VISIONBUF_SYNC_FROM_DEVICE);
rgb_to_yuv_queue(&rgb_to_yuv_state, q, cur_rgb_buf->buf_cl, cur_yuv_buf->buf_cl);
// keep another reference around till were done processing
pool_acquire(&yuv_pool, cur_yuv_idx);
pool_push(&yuv_pool, cur_yuv_idx);
VisionIpcBufExtra extra = {
frame_data.frame_id,
frame_data.timestamp_sof,
frame_data.timestamp_eof,
};
vipc_server->send(cur_rgb_buf, &extra);
vipc_server->send(cur_yuv_buf, &extra);
return true;
}
void CameraBuf::release() {
tbuffer_dispatch(&ui_tb, cur_rgb_idx);
pool_release(&yuv_pool, cur_yuv_idx);
if (release_callback){
release_callback((void*)camera_state, cur_buf_idx);
}
}
void CameraBuf::stop() {
tbuffer_stop(&ui_tb);
tbuffer_stop(&camera_tb);
pool_stop(&yuv_pool);
}
void CameraBuf::queue(size_t buf_idx){
{
std::lock_guard<std::mutex> lk(frame_queue_mutex);
frame_queue.push(buf_idx);
}
frame_queue_cv.notify_one();
}
// common functions
@ -319,7 +314,7 @@ void set_exposure_target(CameraState *c, const uint8_t *pix_ptr, int x_start, in
uint32_t lum_binning[256] = {0};
for (int y = y_start; y < y_end; y += y_skip) {
for (int x = x_start; x < x_end; x += x_skip) {
uint8_t lum = pix_ptr[(y * b->yuv_width) + x];
uint8_t lum = pix_ptr[(y * b->rgb_width) + x];
lum_binning[lum]++;
}
}
@ -412,7 +407,7 @@ void common_camera_process_front(SubMaster *sm, PubMaster *pm, CameraState *c, i
y_max = 1148;
skip = 4;
#endif
set_exposure_target(c, (const uint8_t *)b->yuv_bufs[b->cur_yuv_idx].y, x_min, x_max, 2, y_min, y_max, skip);
set_exposure_target(c, (const uint8_t *)b->cur_yuv_buf->y, x_min, x_max, 2, y_min, y_max, skip);
}
MessageBuilder msg;

View File

@ -1,20 +1,23 @@
#pragma once
#include <queue>
#include <mutex>
#include <condition_variable>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <memory>
#include <thread>
#include "common/buffering.h"
#include "common/mat.h"
#include "common/swaglog.h"
#include "common/visionbuf.h"
#include "visionbuf.h"
#include "common/visionimg.h"
#include "imgproc/utils.h"
#include "messaging.hpp"
#include "transforms/rgb_to_yuv.h"
#include "common/visionipc.h"
#include "visionipc.h"
#include "visionipc_server.h"
#define CAMERA_ID_IMX298 0
#define CAMERA_ID_IMX179 1
@ -39,6 +42,8 @@ const bool env_send_front = getenv("SEND_FRONT") != NULL;
const bool env_send_rear = getenv("SEND_REAR") != NULL;
const bool env_send_wide = getenv("SEND_WIDE") != NULL;
typedef void (*release_cb)(void *cookie, int buf_idx);
typedef struct CameraInfo {
const char* name;
int frame_width, frame_height;
@ -82,53 +87,52 @@ typedef struct CameraExpInfo {
extern CameraInfo cameras_supported[CAMERA_ID_MAX];
typedef struct {
uint8_t *y, *u, *v;
} YUVBuf;
struct MultiCameraState;
struct CameraState;
typedef void (*release_cb)(void *cookie, int buf_idx);
class CameraBuf {
public:
VisionIpcServer *vipc_server;
CameraState *camera_state;
cl_kernel krnl_debayer;
cl_command_queue q;
Pool yuv_pool;
VisionBuf yuv_ion[YUV_COUNT];
YUVBuf yuv_bufs[YUV_COUNT];
FrameMetadata yuv_metas[YUV_COUNT];
size_t yuv_buf_size;
int yuv_width, yuv_height;
RGBToYUVState rgb_to_yuv_state;
int rgb_width, rgb_height, rgb_stride;
VisionBuf rgb_bufs[UI_BUF_COUNT];
mat3 yuv_transform;
FrameMetadata yuv_metas[YUV_COUNT];
size_t yuv_buf_size;
VisionStreamType rgb_type, yuv_type;
int rgb_width, rgb_height, rgb_stride;
int cur_yuv_idx, cur_rgb_idx;
FrameMetadata cur_frame_data;
VisionBuf *cur_rgb_buf;
VisionBuf *cur_rgb_buf;
VisionBuf *cur_yuv_buf;
int cur_buf_idx;
std::mutex frame_queue_mutex;
std::condition_variable frame_queue_cv;
std::queue<size_t> frame_queue;
std::unique_ptr<VisionBuf[]> camera_bufs;
std::unique_ptr<FrameMetadata[]> camera_bufs_metadata;
TBuffer camera_tb, ui_tb;
TBuffer *yuv_tb; // only for visionserver
int frame_buf_count;
int frame_size;
release_cb release_callback;
CameraBuf() = default;
~CameraBuf();
void init(cl_device_id device_id, cl_context context, CameraState *s, int frame_cnt,
const char *name = "frame", release_cb relase_callback = nullptr);
void init(cl_device_id device_id, cl_context context, CameraState *s, VisionIpcServer * v, int frame_cnt, VisionStreamType rgb_type, VisionStreamType yuv_type, release_cb release_callback=nullptr);
bool acquire();
void release();
void stop();
int frame_buf_count;
int frame_size;
void queue(size_t buf_idx);
};
typedef void (*process_thread_cb)(MultiCameraState *s, CameraState *c, int cnt);

View File

@ -11,7 +11,6 @@
#include "common/utilpp.h"
#include "common/timing.h"
#include "common/swaglog.h"
#include "buffering.h"
extern "C" {
#include <libavcodec/avcodec.h>
@ -30,27 +29,26 @@ void camera_close(CameraState *s) {
s->buf.stop();
}
void camera_init(CameraState *s, int camera_id, unsigned int fps, cl_device_id device_id, cl_context ctx) {
void camera_init(VisionIpcServer * v, CameraState *s, int camera_id, unsigned int fps, cl_device_id device_id, cl_context ctx, VisionStreamType rgb_type, VisionStreamType yuv_type) {
assert(camera_id < ARRAYSIZE(cameras_supported));
s->ci = cameras_supported[camera_id];
assert(s->ci.frame_width != 0);
s->fps = fps;
s->buf.init(device_id, ctx, s, FRAME_BUF_COUNT, "camera");
s->buf.init(device_id, ctx, s, v, FRAME_BUF_COUNT, rgb_type, yuv_type);
}
void run_frame_stream(MultiCameraState *s) {
s->sm = new SubMaster({"frame"});
CameraState *const rear_camera = &s->rear;
auto *tb = &rear_camera->buf.camera_tb;
size_t buf_idx = 0;
while (!do_exit) {
if (s->sm->update(1000) == 0) continue;
auto frame = (*(s->sm))["frame"].getFrame();
const int buf_idx = tbuffer_select(tb);
rear_camera->buf.camera_bufs_metadata[buf_idx] = {
.frame_id = frame.getFrameId(),
.timestamp_eof = frame.getTimestampEof(),
@ -63,8 +61,11 @@ void run_frame_stream(MultiCameraState *s) {
cl_mem yuv_cl = rear_camera->buf.camera_bufs[buf_idx].buf_cl;
clEnqueueWriteBuffer(q, yuv_cl, CL_TRUE, 0, frame.getImage().size(), frame.getImage().begin(), 0, NULL, NULL);
tbuffer_dispatch(tb, buf_idx);
rear_camera->buf.queue(buf_idx);
buf_idx = (buf_idx + 1) % FRAME_BUF_COUNT;
}
delete s->sm;
}
} // namespace
@ -87,9 +88,11 @@ CameraInfo cameras_supported[CAMERA_ID_MAX] = {
},
};
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
camera_init(&s->rear, CAMERA_ID_IMX298, 20, device_id, ctx);
camera_init(&s->front, CAMERA_ID_OV8865, 10, device_id, ctx);
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
camera_init(v, &s->rear, CAMERA_ID_IMX298, 20, device_id, ctx,
VISION_STREAM_RGB_BACK, VISION_STREAM_YUV_BACK);
camera_init(v, &s->front, CAMERA_ID_OV8865, 10, device_id, ctx,
VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV_FRONT);
}
void camera_autoexposure(CameraState *s, float grey_frac) {}

View File

@ -34,7 +34,7 @@ typedef struct MultiCameraState {
PubMaster *pm;
} MultiCameraState;
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_init(VisionIpcServer * v, MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_open(MultiCameraState *s);
void cameras_run(MultiCameraState *s);
void cameras_close(MultiCameraState *s);

View File

@ -99,9 +99,10 @@ static void camera_release_buffer(void* cookie, int buf_idx) {
ioctl(s->isp_fd, VIDIOC_MSM_ISP_ENQUEUE_BUF, &s->ss[0].qbuf_info[buf_idx]);
}
static void camera_init(CameraState *s, int camera_id, int camera_num,
static void camera_init(VisionIpcServer *v, CameraState *s, int camera_id, int camera_num,
uint32_t pixel_clock, uint32_t line_length_pclk,
unsigned int max_gain, unsigned int fps, cl_device_id device_id, cl_context ctx) {
unsigned int max_gain, unsigned int fps, cl_device_id device_id, cl_context ctx,
VisionStreamType rgb_type, VisionStreamType yuv_type) {
s->camera_num = camera_num;
s->camera_id = camera_id;
@ -116,7 +117,7 @@ static void camera_init(CameraState *s, int camera_id, int camera_num,
s->self_recover = 0;
s->buf.init(device_id, ctx, s, FRAME_BUF_COUNT, "frame", camera_release_buffer);
s->buf.init(device_id, ctx, s, v, FRAME_BUF_COUNT, rgb_type, yuv_type, camera_release_buffer);
pthread_mutex_init(&s->frame_info_lock, NULL);
}
@ -252,7 +253,7 @@ cl_program build_conv_program(cl_device_id device_id, cl_context context, int im
return cl_program_from_file(context, device_id, "imgproc/conv.cl", args);
}
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
char project_name[1024] = {0};
property_get("ro.boot.project_name", project_name, "");
@ -288,7 +289,7 @@ void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
// 508 = ISO 12800, 16x digital gain
// 510 = ISO 25600, 32x digital gain
camera_init(&s->rear, CAMERA_ID_IMX298, 0,
camera_init(v, &s->rear, CAMERA_ID_IMX298, 0,
/*pixel_clock=*/600000000, /*line_length_pclk=*/5536,
/*max_gain=*/510, //0 (ISO 100)- 448 (ISO 800, max analog gain) - 511 (super noisy)
#ifdef HIGH_FPS
@ -296,23 +297,27 @@ void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
#else
/*fps*/ 20,
#endif
device_id, ctx);
device_id, ctx,
VISION_STREAM_RGB_BACK, VISION_STREAM_YUV_BACK);
s->rear.apply_exposure = imx298_apply_exposure;
if (s->device == DEVICE_OP3T) {
camera_init(&s->front, CAMERA_ID_S5K3P8SP, 1,
camera_init(v, &s->front, CAMERA_ID_S5K3P8SP, 1,
/*pixel_clock=*/560000000, /*line_length_pclk=*/5120,
/*max_gain=*/510, 10, device_id, ctx);
/*max_gain=*/510, 10, device_id, ctx,
VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV_FRONT);
s->front.apply_exposure = imx179_s5k3p8sp_apply_exposure;
} else if (s->device == DEVICE_LP3) {
camera_init(&s->front, CAMERA_ID_OV8865, 1,
camera_init(v, &s->front, CAMERA_ID_OV8865, 1,
/*pixel_clock=*/72000000, /*line_length_pclk=*/1602,
/*max_gain=*/510, 10, device_id, ctx);
/*max_gain=*/510, 10, device_id, ctx,
VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV_FRONT);
s->front.apply_exposure = ov8865_apply_exposure;
} else {
camera_init(&s->front, CAMERA_ID_IMX179, 1,
camera_init(v, &s->front, CAMERA_ID_IMX179, 1,
/*pixel_clock=*/251200000, /*line_length_pclk=*/3440,
/*max_gain=*/224, 20, device_id, ctx);
/*max_gain=*/224, 20, device_id, ctx,
VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV_FRONT);
s->front.apply_exposure = imx179_s5k3p8sp_apply_exposure;
}
@ -324,8 +329,8 @@ void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
for (int i = 0; i < FRAME_BUF_COUNT; i++) {
// TODO: make lengths correct
s->focus_bufs[i] = visionbuf_allocate(0xb80);
s->stats_bufs[i] = visionbuf_allocate(0xb80);
s->focus_bufs[i].allocate(0xb80);
s->stats_bufs[i].allocate(0xb80);
}
const int width = s->rear.buf.rgb_width/NUM_SEGMENTS_X;
const int height = s->rear.buf.rgb_height/NUM_SEGMENTS_Y;
@ -1766,7 +1771,7 @@ static std::optional<float> get_accel_z(SubMaster *sm) {
if (sm->update(0) > 0) {
for (auto event : (*sm)["sensorEvents"].getSensorEvents()) {
if (event.which() == cereal::SensorEventData::ACCELERATION) {
if (auto v = event.getAcceleration().getV(); v.size() >= 3)
if (auto v = event.getAcceleration().getV(); v.size() >= 3)
return -v[2];
break;
}
@ -2118,7 +2123,7 @@ void camera_process_frame(MultiCameraState *s, CameraState *c, int cnt) {
if (cnt % 3 == 0) {
const int x = 290, y = 322, width = 560, height = 314;
const int skip = 1;
set_exposure_target(c, (const uint8_t *)b->yuv_bufs[b->cur_yuv_idx].y, x, x + width, skip, y, y + height, skip);
set_exposure_target(c, (const uint8_t *)b->cur_yuv_buf->y, x, x + width, skip, y, y + height, skip);
}
}
@ -2190,7 +2195,7 @@ void cameras_run(MultiCameraState *s) {
if (buffer == 0) {
c->buf.camera_bufs_metadata[buf_idx] = get_frame_metadata(c, isp_event_data->frame_id);
tbuffer_dispatch(&c->buf.camera_tb, buf_idx);
c->buf.queue(buf_idx);
} else {
uint8_t *d = (uint8_t*)(c->ss[buffer].bufs[buf_idx].addr);
if (buffer == 1) {
@ -2242,8 +2247,8 @@ void cameras_close(MultiCameraState *s) {
camera_close(&s->rear);
camera_close(&s->front);
for (int i = 0; i < FRAME_BUF_COUNT; i++) {
visionbuf_free(&s->focus_bufs[i]);
visionbuf_free(&s->stats_bufs[i]);
s->focus_bufs[i].free();
s->stats_bufs[i].free();
}
CL_CHECK(clReleaseMemObject(s->rgb_conv_roi_cl));
CL_CHECK(clReleaseMemObject(s->rgb_conv_result_cl));

View File

@ -12,9 +12,9 @@
#include "msmb_camera.h"
#include "msm_cam_sensor.h"
#include "visionbuf.h"
#include "common/mat.h"
#include "common/visionbuf.h"
#include "common/buffering.h"
#include "common/utilpp.h"
#include "camera_common.h"
@ -137,7 +137,7 @@ typedef struct MultiCameraState {
} MultiCameraState;
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_open(MultiCameraState *s);
void cameras_run(MultiCameraState *s);
void cameras_close(MultiCameraState *s);

View File

@ -501,8 +501,8 @@ void enqueue_buffer(struct CameraState *s, int i, bool dp) {
// LOGD("fence wait: %d %d", ret, sync_wait.sync_obj);
s->buf.camera_bufs_metadata[i].timestamp_eof = (uint64_t)nanos_since_boot(); // set true eof
if (dp) tbuffer_dispatch(&s->buf.camera_tb, i);
if (dp) s->buf.queue(i);
// destroy old output fence
struct cam_sync_info sync_destroy = {0};
strcpy(sync_destroy.name, "NodeOutputPortFence");
@ -535,11 +535,11 @@ void enqueue_buffer(struct CameraState *s, int i, bool dp) {
ret = cam_control(s->video0_fd, CAM_REQ_MGR_MAP_BUF, &mem_mgr_map_cmd, sizeof(mem_mgr_map_cmd));
// LOGD("map buf req: (fd: %d) 0x%x %d", s->bufs[i].fd, mem_mgr_map_cmd.out.buf_handle, ret);
s->buf_handle[i] = mem_mgr_map_cmd.out.buf_handle;
// poke sensor
sensors_poke(s, request_id);
// LOGD("Poked sensor");
// push the buffer
config_isp(s, s->buf_handle[i], s->sync_objs[i], request_id, s->buf0_handle, 65632*(i+1));
}
@ -553,7 +553,7 @@ void enqueue_req_multi(struct CameraState *s, int start, int n, bool dp) {
// ******************* camera *******************
static void camera_init(CameraState *s, int camera_id, int camera_num, unsigned int fps, cl_device_id device_id, cl_context ctx) {
static void camera_init(VisionIpcServer * v, CameraState *s, int camera_id, int camera_num, unsigned int fps, cl_device_id device_id, cl_context ctx, VisionStreamType rgb_type, VisionStreamType yuv_type) {
LOGD("camera init %d", camera_num);
assert(camera_id < ARRAYSIZE(cameras_supported));
@ -572,7 +572,7 @@ static void camera_init(CameraState *s, int camera_id, int camera_num, unsigned
s->skipped = true;
s->ef_filtered = 1.0;
s->buf.init(device_id, ctx, s, FRAME_BUF_COUNT, "frame");
s->buf.init(device_id, ctx, s, v, FRAME_BUF_COUNT, rgb_type, yuv_type);
}
// TODO: refactor this to somewhere nicer, perhaps use in camera_qcom as well
@ -712,7 +712,7 @@ static void camera_open(CameraState *s) {
acquire_dev_cmd.resource_hdl = (uint64_t)&csiphy_acquire_dev_info;
ret = cam_control(s->csiphy_fd, CAM_ACQUIRE_DEV, &acquire_dev_cmd, sizeof(acquire_dev_cmd));
LOGD("acquire csiphy dev: %d", ret);
s->csiphy_dev_handle = acquire_dev_cmd.dev_handle;
@ -795,12 +795,15 @@ static void camera_open(CameraState *s) {
enqueue_req_multi(s, 1, FRAME_BUF_COUNT, 0);
}
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
camera_init(&s->rear, CAMERA_ID_AR0231, 1, 20, device_id, ctx); // swap left/right
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
camera_init(v, &s->rear, CAMERA_ID_AR0231, 1, 20, device_id, ctx,
VISION_STREAM_RGB_BACK, VISION_STREAM_YUV_BACK); // swap left/right
printf("rear initted \n");
camera_init(&s->wide, CAMERA_ID_AR0231, 0, 20, device_id, ctx);
camera_init(v, &s->wide, CAMERA_ID_AR0231, 0, 20, device_id, ctx,
VISION_STREAM_RGB_WIDE, VISION_STREAM_YUV_WIDE);
printf("wide initted \n");
camera_init(&s->front, CAMERA_ID_AR0231, 2, 20, device_id, ctx);
camera_init(v, &s->front, CAMERA_ID_AR0231, 2, 20, device_id, ctx,
VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV_FRONT);
printf("front initted \n");
s->sm = new SubMaster({"driverState"});
@ -852,7 +855,7 @@ void cameras_open(MultiCameraState *s) {
sub.id = 2; // should use boot time for sof
ret = ioctl(s->video0_fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
printf("req mgr subscribe: %d\n", ret);
camera_open(&s->rear);
printf("rear opened \n");
camera_open(&s->wide);
@ -1134,7 +1137,7 @@ void camera_process_frame(MultiCameraState *s, CameraState *c, int cnt) {
exposure_height = 524;
}
int skip = 2;
set_exposure_target(c, (const uint8_t *)b->yuv_bufs[b->cur_yuv_idx].y, exposure_x, exposure_x + exposure_width, skip, exposure_y, exposure_y + exposure_height, skip);
set_exposure_target(c, (const uint8_t *)b->cur_yuv_buf->y, exposure_x, exposure_x + exposure_width, skip, exposure_y, exposure_y + exposure_height, skip);
}
}
@ -1201,6 +1204,6 @@ void cameras_run(MultiCameraState *s) {
assert(err == 0);
cameras_close(s);
for (auto &t : threads) t.join();
}

View File

@ -83,7 +83,7 @@ typedef struct MultiCameraState {
PubMaster *pm;
} MultiCameraState;
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_open(MultiCameraState *s);
void cameras_run(MultiCameraState *s);
void camera_autoexposure(CameraState *s, float grey_frac);

View File

@ -35,21 +35,21 @@ void camera_close(CameraState *s) {
s->buf.stop();
}
void camera_init(CameraState *s, int camera_id, unsigned int fps, cl_device_id device_id, cl_context ctx) {
void camera_init(VisionIpcServer * v, CameraState *s, int camera_id, unsigned int fps, cl_device_id device_id, cl_context ctx, VisionStreamType rgb_type, VisionStreamType yuv_type) {
assert(camera_id < ARRAYSIZE(cameras_supported));
s->ci = cameras_supported[camera_id];
assert(s->ci.frame_width != 0);
s->fps = fps;
s->buf.init(device_id, ctx, s, FRAME_BUF_COUNT, "frame");
s->buf.init(device_id, ctx, s, v, FRAME_BUF_COUNT, rgb_type, yuv_type);
}
static void* rear_thread(void *arg) {
int err;
set_thread_name("webcam_rear_thread");
CameraState* s = (CameraState*)arg;
CameraState *s = (CameraState*)arg;
cv::VideoCapture cap_rear(1); // road
cap_rear.set(cv::CAP_PROP_FRAME_WIDTH, 853);
@ -78,8 +78,7 @@ static void* rear_thread(void *arg) {
}
uint32_t frame_id = 0;
TBuffer* tb = &s->buf.camera_tb;
size_t buf_idx = 0;
while (!do_exit) {
cv::Mat frame_mat;
cv::Mat transformed_mat;
@ -94,13 +93,13 @@ static void* rear_thread(void *arg) {
int transformed_size = transformed_mat.total() * transformed_mat.elemSize();
const int buf_idx = tbuffer_select(tb);
s->buf.camera_bufs_metadata[buf_idx] = {
.frame_id = frame_id,
};
cl_command_queue q = s->buf.camera_bufs[buf_idx].copy_q;
cl_mem yuv_cl = s->buf.camera_bufs[buf_idx].buf_cl;
cl_event map_event;
void *yuv_buf = (void *)CL_CHECK_ERR(clEnqueueMapBuffer(q, yuv_cl, CL_TRUE,
CL_MAP_WRITE, 0, transformed_size,
@ -112,11 +111,15 @@ static void* rear_thread(void *arg) {
CL_CHECK(clEnqueueUnmapMemObject(q, yuv_cl, yuv_buf, 0, NULL, &map_event));
clWaitForEvents(1, &map_event);
clReleaseEvent(map_event);
tbuffer_dispatch(tb, buf_idx);
s->buf.queue(buf_idx);
frame_id += 1;
frame_mat.release();
transformed_mat.release();
buf_idx = (buf_idx + 1) % FRAME_BUF_COUNT;
}
cap_rear.release();
@ -151,7 +154,7 @@ void front_thread(CameraState *s) {
}
uint32_t frame_id = 0;
TBuffer* tb = &s->buf.camera_tb;
size_t buf_idx = 0;
while (!do_exit) {
cv::Mat frame_mat;
@ -167,7 +170,6 @@ void front_thread(CameraState *s) {
int transformed_size = transformed_mat.total() * transformed_mat.elemSize();
const int buf_idx = tbuffer_select(tb);
s->buf.camera_bufs_metadata[buf_idx] = {
.frame_id = frame_id,
};
@ -185,11 +187,14 @@ void front_thread(CameraState *s) {
CL_CHECK(clEnqueueUnmapMemObject(q, yuv_cl, yuv_buf, 0, NULL, &map_event));
clWaitForEvents(1, &map_event);
clReleaseEvent(map_event);
tbuffer_dispatch(tb, buf_idx);
s->buf.queue(buf_idx);
frame_id += 1;
frame_mat.release();
transformed_mat.release();
buf_idx = (buf_idx + 1) % FRAME_BUF_COUNT;
}
cap_front.release();
@ -217,10 +222,11 @@ CameraInfo cameras_supported[CAMERA_ID_MAX] = {
},
};
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
camera_init(&s->rear, CAMERA_ID_LGC920, 20, device_id, ctx);
camera_init(&s->front, CAMERA_ID_LGC615, 10, device_id, ctx);
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx) {
camera_init(v, &s->rear, CAMERA_ID_LGC920, 20, device_id, ctx,
VISION_STREAM_RGB_BACK, VISION_STREAM_YUV_BACK);
camera_init(v, &s->front, CAMERA_ID_LGC615, 10, device_id, ctx,
VISION_STREAM_RGB_FRONT, VISION_STREAM_YUV_FRONT);
s->pm = new PubMaster({"frame", "frontFrame"});
}
@ -253,7 +259,7 @@ void camera_process_rear(MultiCameraState *s, CameraState *c, int cnt) {
MessageBuilder msg;
auto framed = msg.initEvent().initFrame();
fill_frame_data(framed, b->cur_frame_data, cnt);
framed.setImage(kj::arrayPtr((const uint8_t *)b->yuv_ion[b->cur_yuv_idx].addr, b->yuv_buf_size));
framed.setImage(kj::arrayPtr((const uint8_t *)b->cur_yuv_buf->addr, b->cur_yuv_buf->len));
framed.setTransform(b->yuv_transform.v);
s->pm->send("frame", msg);
}

View File

@ -28,7 +28,7 @@ typedef struct MultiCameraState {
PubMaster *pm;
} MultiCameraState;
void cameras_init(MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_id, cl_context ctx);
void cameras_open(MultiCameraState *s);
void cameras_run(MultiCameraState *s);
void cameras_close(MultiCameraState *s);

View File

@ -18,301 +18,29 @@
#include <libyuv.h>
#include "clutil.h"
#include "common/ipc.h"
#include "common/params.h"
#include "common/swaglog.h"
#include "common/util.h"
#include "common/utilpp.h"
#include "common/visionipc.h"
#define MAX_CLIENTS 6
#include "visionipc_server.h"
ExitHandler do_exit;
struct VisionState;
struct VisionClientState {
VisionState *s;
int fd;
pthread_t thread_handle;
bool running;
};
struct VisionClientStreamState {
bool subscribed;
int bufs_outstanding;
bool tb;
TBuffer* tbuffer;
PoolQueue* queue;
};
struct VisionState {
MultiCameraState cameras;
pthread_mutex_t clients_lock;
VisionClientState clients[MAX_CLIENTS];
};
static CameraBuf *get_camerabuf_by_type(VisionState *s, VisionStreamType type) {
assert(type >= 0 && type < VISION_STREAM_MAX);
if (type == VISION_STREAM_RGB_BACK || type == VISION_STREAM_YUV) {
return &s->cameras.rear.buf;
} else if (type == VISION_STREAM_RGB_FRONT || type == VISION_STREAM_YUV_FRONT) {
return &s->cameras.front.buf;
}
#ifdef QCOM2
return &s->cameras.wide.buf;
#endif
assert(0);
}
// visionserver
void* visionserver_client_thread(void* arg) {
int err;
auto *client = (VisionClientState*)arg;
VisionState *s = client->s;
int fd = client->fd;
set_thread_name("clientthread");
VisionClientStreamState streams[VISION_STREAM_MAX] = {{false}};
LOGW("client start fd %d", fd);
while (true) {
struct pollfd polls[1+VISION_STREAM_MAX] = {{0}};
polls[0].fd = fd;
polls[0].events = POLLIN;
int poll_to_stream[1+VISION_STREAM_MAX] = {0};
int num_polls = 1;
for (int i=0; i<VISION_STREAM_MAX; i++) {
if (!streams[i].subscribed) continue;
polls[num_polls].events = POLLIN;
if (streams[i].bufs_outstanding >= 2) {
continue;
}
if (streams[i].tb) {
polls[num_polls].fd = tbuffer_efd(streams[i].tbuffer);
} else {
polls[num_polls].fd = poolq_efd(streams[i].queue);
}
poll_to_stream[num_polls] = i;
num_polls++;
}
int ret = poll(polls, num_polls, -1);
if (ret < 0) {
if (errno == EINTR || errno == EAGAIN) continue;
LOGE("poll failed (%d - %d)", ret, errno);
break;
}
if (do_exit) break;
if (polls[0].revents) {
VisionPacket p;
err = vipc_recv(fd, &p);
if (err <= 0) {
break;
} else if (p.type == VIPC_STREAM_SUBSCRIBE) {
VisionStreamType stream_type = p.d.stream_sub.type;
VisionPacket rep = {
.type = VIPC_STREAM_BUFS,
.d = { .stream_bufs = { .type = stream_type }, },
};
VisionClientStreamState *stream = &streams[stream_type];
stream->tb = p.d.stream_sub.tbuffer;
VisionStreamBufs *stream_bufs = &rep.d.stream_bufs;
CameraBuf *b = get_camerabuf_by_type(s, stream_type);
if (stream_type == VISION_STREAM_RGB_BACK ||
stream_type == VISION_STREAM_RGB_FRONT ||
stream_type == VISION_STREAM_RGB_WIDE) {
stream_bufs->width = b->rgb_width;
stream_bufs->height = b->rgb_height;
stream_bufs->stride = b->rgb_stride;
stream_bufs->buf_len = b->rgb_bufs[0].len;
rep.num_fds = UI_BUF_COUNT;
for (int i = 0; i < rep.num_fds; i++) {
rep.fds[i] = b->rgb_bufs[i].fd;
}
if (stream->tb) {
stream->tbuffer = &b->ui_tb;
} else {
assert(false);
}
} else {
stream_bufs->width = b->yuv_width;
stream_bufs->height = b->yuv_height;
stream_bufs->stride = b->yuv_width;
stream_bufs->buf_len = b->yuv_buf_size;
rep.num_fds = YUV_COUNT;
for (int i = 0; i < rep.num_fds; i++) {
rep.fds[i] = b->yuv_ion[i].fd;
}
if (stream->tb) {
stream->tbuffer = b->yuv_tb;
} else {
stream->queue = pool_get_queue(&b->yuv_pool);
}
}
vipc_send(fd, &rep);
streams[stream_type].subscribed = true;
} else if (p.type == VIPC_STREAM_RELEASE) {
int si = p.d.stream_rel.type;
assert(si < VISION_STREAM_MAX);
if (streams[si].tb) {
tbuffer_release(streams[si].tbuffer, p.d.stream_rel.idx);
} else {
poolq_release(streams[si].queue, p.d.stream_rel.idx);
}
streams[p.d.stream_rel.type].bufs_outstanding--;
} else {
assert(false);
}
} else {
int stream_i = VISION_STREAM_MAX;
for (int i=1; i<num_polls; i++) {
int si = poll_to_stream[i];
if (!streams[si].subscribed) continue;
if (polls[i].revents) {
stream_i = si;
break;
}
}
if (stream_i < VISION_STREAM_MAX) {
streams[stream_i].bufs_outstanding++;
int idx;
if (streams[stream_i].tb) {
idx = tbuffer_acquire(streams[stream_i].tbuffer);
} else {
idx = poolq_pop(streams[stream_i].queue);
}
if (idx < 0) {
break;
}
VisionPacket rep = {
.type = VIPC_STREAM_ACQUIRE,
.d = {.stream_acq = {
.type = (VisionStreamType)stream_i,
.idx = idx,
}},
};
if (stream_i == VISION_STREAM_YUV ||
stream_i == VISION_STREAM_YUV_FRONT ||
stream_i == VISION_STREAM_YUV_WIDE) {
CameraBuf *b = get_camerabuf_by_type(s, (VisionStreamType)stream_i);
rep.d.stream_acq.extra.frame_id = b->yuv_metas[idx].frame_id;
rep.d.stream_acq.extra.timestamp_sof = b->yuv_metas[idx].timestamp_sof;
rep.d.stream_acq.extra.timestamp_eof = b->yuv_metas[idx].timestamp_eof;
}
vipc_send(fd, &rep);
}
}
}
LOGW("client end fd %d", fd);
for (auto &stream : streams) {
if (!stream.subscribed) continue;
if (stream.tb) {
tbuffer_release_all(stream.tbuffer);
} else {
pool_release_queue(stream.queue);
}
}
close(fd);
pthread_mutex_lock(&s->clients_lock);
client->running = false;
pthread_mutex_unlock(&s->clients_lock);
return NULL;
}
void* visionserver_thread(void* arg) {
int err;
VisionState *s = (VisionState*)arg;
set_thread_name("visionserver");
int sock = ipc_bind(VIPC_SOCKET_PATH);
while (!do_exit) {
struct pollfd polls[1] = {{0}};
polls[0].fd = sock;
polls[0].events = POLLIN;
int ret = poll(polls, ARRAYSIZE(polls), 1000);
if (ret < 0) {
if (errno == EINTR || errno == EAGAIN) continue;
LOGE("poll failed (%d - %d)", ret, errno);
break;
}
if (do_exit) break;
if (!polls[0].revents) {
continue;
}
int fd = accept(sock, NULL, NULL);
assert(fd >= 0);
pthread_mutex_lock(&s->clients_lock);
int client_idx = 0;
for (; client_idx < MAX_CLIENTS; client_idx++) {
if (!s->clients[client_idx].running) break;
}
if (client_idx >= MAX_CLIENTS) {
LOG("ignoring visionserver connection, max clients connected");
close(fd);
pthread_mutex_unlock(&s->clients_lock);
continue;
}
VisionClientState *client = &s->clients[client_idx];
client->s = s;
client->fd = fd;
client->running = true;
err = pthread_create(&client->thread_handle, NULL,
visionserver_client_thread, client);
assert(err == 0);
pthread_mutex_unlock(&s->clients_lock);
}
for (auto &client : s->clients) {
pthread_mutex_lock(&s->clients_lock);
bool running = client.running;
pthread_mutex_unlock(&s->clients_lock);
if (running) {
err = pthread_join(client.thread_handle, NULL);
assert(err == 0);
}
}
close(sock);
return NULL;
}
void party(cl_device_id device_id, cl_context context) {
VisionState state = {};
VisionState *s = &state;
MultiCameraState cameras = {};
VisionIpcServer vipc_server("camerad", device_id, context);
cameras_init(&s->cameras, device_id, context);
cameras_open(&s->cameras);
cameras_init(&vipc_server, &cameras, device_id, context);
cameras_open(&cameras);
std::thread server_thread(visionserver_thread, s);
vipc_server.start_listener();
// priority for cameras
int err = set_realtime_priority(53);
LOG("setpriority returns %d", err);
cameras_run(&s->cameras);
server_thread.join();
cameras_run(&cameras);
}
#ifdef QCOM

View File

@ -3,10 +3,15 @@ import os
import signal
import subprocess
import time
import numpy as np
from PIL import Image
import cereal.messaging as messaging
from common.basedir import BASEDIR
from common.params import Params
from selfdrive.camerad.snapshot.visionipc import VisionIPC
from common.transformations.camera import eon_f_frame_size, eon_d_frame_size, leon_d_frame_size, tici_f_frame_size
from selfdrive.hardware import TICI
from selfdrive.controls.lib.alertmanager import set_offroad_alert
@ -15,58 +20,79 @@ def jpeg_write(fn, dat):
img.save(fn, "JPEG")
def extract_image(dat, frame_sizes):
img = np.frombuffer(dat, dtype=np.uint8)
w, h = frame_sizes[len(img) // 3]
b = img[::3].reshape(h, w)
g = img[1::3].reshape(h, w)
r = img[2::3].reshape(h, w)
return np.dstack([r, g, b])
def get_snapshots(frame="frame", front_frame="frontFrame"):
frame_sizes = [eon_f_frame_size, eon_d_frame_size, leon_d_frame_size, tici_f_frame_size]
frame_sizes = {w * h: (w, h) for (w, h) in frame_sizes}
sm = messaging.SubMaster([frame, front_frame])
while min(sm.logMonoTime.values()) == 0:
sm.update()
rear = extract_image(sm[frame].image, frame_sizes)
front = extract_image(sm[front_frame].image, frame_sizes)
return rear, front
def snapshot():
params = Params()
front_camera_allowed = int(params.get("RecordFront"))
if params.get("IsOffroad") != b"1" or params.get("IsTakingSnapshot") == b"1":
return None
print("Already taking snapshot")
return None, None
params.put("IsTakingSnapshot", "1")
set_offroad_alert("Offroad_IsTakingSnapshot", True)
time.sleep(2.0) # Give thermald time to read the param, or if just started give camerad time to start
# Check if camerad is already started
ps = subprocess.Popen("ps | grep camerad", shell=True, stdout=subprocess.PIPE)
ret = list(filter(lambda x: 'grep ' not in x, ps.communicate()[0].decode('utf-8').strip().split("\n")))
if len(ret) > 0:
try:
subprocess.check_call(["pgrep", "camerad"])
print("Camerad already running")
params.put("IsTakingSnapshot", "0")
params.delete("Offroad_IsTakingSnapshot")
return None
return None, None
except subprocess.CalledProcessError:
pass
proc = subprocess.Popen(os.path.join(BASEDIR, "selfdrive/camerad/camerad"), cwd=os.path.join(BASEDIR, "selfdrive/camerad"))
env = os.environ.copy()
env["SEND_REAR"] = "1"
env["SEND_WIDE"] = "1"
env["SEND_FRONT"] = "1"
proc = subprocess.Popen(os.path.join(BASEDIR, "selfdrive/camerad/camerad"),
cwd=os.path.join(BASEDIR, "selfdrive/camerad"), env=env)
time.sleep(3.0)
ret = None
start_time = time.time()
while time.time() - start_time < 5.0:
try:
ipc = VisionIPC()
pic = ipc.get()
del ipc
if front_camera_allowed:
ipc_front = VisionIPC(front=True)
fpic = ipc_front.get()
del ipc_front
else:
fpic = None
ret = pic, fpic
break
except Exception:
time.sleep(1)
frame = "wideFrame" if TICI else "frame"
rear, front = get_snapshots(frame)
proc.send_signal(signal.SIGINT)
proc.communicate()
params.put("IsTakingSnapshot", "0")
set_offroad_alert("Offroad_IsTakingSnapshot", False)
return ret
if not front_camera_allowed:
front = None
return rear, front
if __name__ == "__main__":
pic, fpic = snapshot()
print(pic.shape)
jpeg_write("/tmp/back.jpg", pic)
jpeg_write("/tmp/front.jpg", fpic)
if pic is not None:
print(pic.shape)
jpeg_write("/tmp/back.jpg", pic)
jpeg_write("/tmp/front.jpg", fpic)
else:
print("Error taking snapshot")

View File

@ -1,93 +0,0 @@
#!/usr/bin/env python3
import os
from cffi import FFI
import numpy as np
gf_dir = os.path.dirname(os.path.abspath(__file__))
ffi = FFI()
ffi.cdef("""
typedef enum VisionStreamType {
VISION_STREAM_RGB_BACK,
VISION_STREAM_RGB_FRONT,
VISION_STREAM_YUV,
VISION_STREAM_YUV_FRONT,
VISION_STREAM_MAX,
} VisionStreamType;
typedef struct VisionUIInfo {
int big_box_x, big_box_y;
int big_box_width, big_box_height;
int transformed_width, transformed_height;
int front_box_x, front_box_y;
int front_box_width, front_box_height;
} VisionUIInfo;
typedef struct VisionStreamBufs {
VisionStreamType type;
int width, height, stride;
size_t buf_len;
union {
VisionUIInfo ui_info;
} buf_info;
} VisionStreamBufs;
typedef struct VIPCBuf {
int fd;
size_t len;
void* addr;
} VIPCBuf;
typedef struct VIPCBufExtra {
// only for yuv
uint32_t frame_id;
uint64_t timestamp_eof;
} VIPCBufExtra;
typedef struct VisionStream {
int ipc_fd;
int last_idx;
int last_type;
int num_bufs;
VisionStreamBufs bufs_info;
VIPCBuf *bufs;
} VisionStream;
int visionstream_init(VisionStream *s, VisionStreamType type, bool tbuffer, VisionStreamBufs *out_bufs_info);
VIPCBuf* visionstream_get(VisionStream *s, VIPCBufExtra *out_extra);
void visionstream_destroy(VisionStream *s);
"""
)
class VisionIPCError(Exception):
pass
class VisionIPC():
def __init__(self, front=False):
self.clib = ffi.dlopen(os.path.join(gf_dir, "libvisionipc.so"))
self.s = ffi.new("VisionStream*")
self.buf_info = ffi.new("VisionStreamBufs*")
err = self.clib.visionstream_init(self.s, self.clib.VISION_STREAM_RGB_FRONT if front else self.clib.VISION_STREAM_RGB_BACK, True, self.buf_info)
if err != 0:
self.clib.visionstream_destroy(self.s)
raise VisionIPCError
def __del__(self):
self.clib.visionstream_destroy(self.s)
def get(self):
buf = self.clib.visionstream_get(self.s, ffi.NULL)
pbuf = ffi.buffer(buf.addr, buf.len)
ret = np.frombuffer(pbuf, dtype=np.uint8).reshape((-1, self.buf_info.stride//3, 3))
return ret[:self.buf_info.height, :self.buf_info.width, [2, 1, 0]]

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python3
import os
import random
import time
import unittest
@ -7,7 +8,7 @@ import numpy as np
import cereal.messaging as messaging
from selfdrive.test.helpers import with_processes
from selfdrive.camerad.snapshot.visionipc import VisionIPC
from selfdrive.camerad.snapshot.snapshot import get_snapshots
# only tests for EON and TICI
from selfdrive.hardware import EON, TICI
@ -32,24 +33,8 @@ class TestCamerad(unittest.TestCase):
if not (EON or TICI):
raise unittest.SkipTest
def _get_snapshots(self):
ret = None
start_time = time.time()
while time.time() - start_time < 5.0:
try:
ipc = VisionIPC()
pic = ipc.get()
del ipc
ipc_front = VisionIPC(front=True) # need to add another for tici
fpic = ipc_front.get()
del ipc_front
ret = pic, fpic
break
except Exception:
time.sleep(1)
return ret
assert "SEND_REAR" in os.environ
assert "SEND_FRONT" in os.environ
def _numpy_bgr2gray(self, im):
ret = np.clip(im[:,:,0] * 0.114 + im[:,:,1] * 0.587 + im[:,:,2] * 0.299, 0, 255).astype(np.uint8)
@ -95,14 +80,14 @@ class TestCamerad(unittest.TestCase):
if EON:
# run checks similar to prov
time.sleep(15) # wait for startup and AF
pic, fpic = self._get_snapshots()
pic, fpic = get_snapshots()
self.assertTrue(self._is_really_sharp(pic))
self.assertTrue(self._is_exposure_okay(pic))
self.assertTrue(self._is_exposure_okay(fpic))
time.sleep(30)
# check again for consistency
pic, fpic = self._get_snapshots()
pic, fpic = get_snapshots()
self.assertTrue(self._is_really_sharp(pic))
self.assertTrue(self._is_exposure_okay(pic))
self.assertTrue(self._is_exposure_okay(fpic))

View File

@ -5,15 +5,12 @@ if SHARED:
else:
fxn = env.Library
common_libs = ['params.cc', 'swaglog.cc', 'util.c', 'cqueue.c', 'gpio.cc', 'i2c.cc']
common_libs = ['params.cc', 'swaglog.cc', 'cqueue.c', 'util.c', 'gpio.cc', 'i2c.cc']
_common = fxn('common', common_libs, LIBS="json11")
_visionipc = fxn('visionipc', ['visionipc.c', 'ipc.c'])
files = [
'buffering.c',
'clutil.cc',
'efd.c',
'glutil.c',
'visionimg.cc',
]
@ -23,21 +20,9 @@ if arch == "aarch64":
'framebuffer.cc',
'touch.c',
]
if QCOM_REPLAY:
files += ['visionbuf_cl.c']
else:
files += ['visionbuf_ion.c']
_gpu_libs = ['gui', 'adreno_utils']
elif arch == "larch64":
files += [
'visionbuf_ion.c',
]
_gpu_libs = ['GL']
else:
files += [
'visionbuf_cl.c',
]
_gpu_libs = ["GL"]
_gpucommon = fxn('gpucommon', files, LIBS=_gpu_libs)
Export('_common', '_visionipc', '_gpucommon', '_gpu_libs')
Export('_common', '_gpucommon', '_gpu_libs')

View File

@ -1,438 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <assert.h>
#include <string.h>
#include <unistd.h>
#include "common/efd.h"
#include "buffering.h"
void tbuffer_init(TBuffer *tb, int num_bufs, const char* name) {
assert(num_bufs >= 3);
memset(tb, 0, sizeof(TBuffer));
tb->reading = (bool*)calloc(num_bufs, sizeof(bool));
assert(tb->reading);
tb->pending_idx = -1;
tb->num_bufs = num_bufs;
tb->name = name;
pthread_mutex_init(&tb->lock, NULL);
pthread_cond_init(&tb->cv, NULL);
tb->efd = efd_init();
assert(tb->efd >= 0);
}
void tbuffer_init2(TBuffer *tb, int num_bufs, const char* name,
void (*release_cb)(void* c, int idx),
void* cb_cookie) {
tbuffer_init(tb, num_bufs, name);
tb->release_cb = release_cb;
tb->cb_cookie = cb_cookie;
}
int tbuffer_efd(TBuffer *tb) {
return tb->efd;
}
int tbuffer_select(TBuffer *tb) {
pthread_mutex_lock(&tb->lock);
int i;
for (i=0; i<tb->num_bufs; i++) {
if (!tb->reading[i] && i != tb->pending_idx) {
break;
}
}
assert(i < tb->num_bufs);
pthread_mutex_unlock(&tb->lock);
return i;
}
void tbuffer_dispatch(TBuffer *tb, int idx) {
pthread_mutex_lock(&tb->lock);
if (tb->pending_idx != -1) {
//printf("tbuffer (%s) dropped!\n", tb->name ? tb->name : "?");
if (tb->release_cb) {
tb->release_cb(tb->cb_cookie, tb->pending_idx);
}
tb->pending_idx = -1;
}
tb->pending_idx = idx;
efd_write(tb->efd);
pthread_cond_signal(&tb->cv);
pthread_mutex_unlock(&tb->lock);
}
int tbuffer_acquire(TBuffer *tb) {
pthread_mutex_lock(&tb->lock);
if (tb->stopped) {
pthread_mutex_unlock(&tb->lock);
return -1;
}
while (tb->pending_idx == -1) {
pthread_cond_wait(&tb->cv, &tb->lock);
if (tb->stopped) {
pthread_mutex_unlock(&tb->lock);
return -1;
}
}
efd_clear(tb->efd);
int ret = tb->pending_idx;
assert(ret < tb->num_bufs);
tb->reading[ret] = true;
tb->pending_idx = -1;
pthread_mutex_unlock(&tb->lock);
return ret;
}
static void tbuffer_release_locked(TBuffer *tb, int idx) {
assert(idx < tb->num_bufs);
if (!tb->reading[idx]) {
printf("!! releasing tbuffer we aren't reading %d\n", idx);
}
if (tb->release_cb) {
tb->release_cb(tb->cb_cookie, idx);
}
tb->reading[idx] = false;
}
void tbuffer_release(TBuffer *tb, int idx) {
pthread_mutex_lock(&tb->lock);
tbuffer_release_locked(tb, idx);
pthread_mutex_unlock(&tb->lock);
}
void tbuffer_release_all(TBuffer *tb) {
pthread_mutex_lock(&tb->lock);
for (int i=0; i<tb->num_bufs; i++) {
if (tb->reading[i]) {
tbuffer_release_locked(tb, i);
}
}
pthread_mutex_unlock(&tb->lock);
}
void tbuffer_stop(TBuffer *tb) {
pthread_mutex_lock(&tb->lock);
tb->stopped = true;
efd_write(tb->efd);
pthread_cond_signal(&tb->cv);
pthread_mutex_unlock(&tb->lock);
}
void pool_init(Pool *s, int num_bufs) {
assert(num_bufs > 3);
memset(s, 0, sizeof(*s));
s->num_bufs = num_bufs;
s->refcnt = (int*)calloc(num_bufs, sizeof(int));
s->ts = (int*)calloc(num_bufs, sizeof(int));
s->counter = 1;
pthread_mutex_init(&s->lock, NULL);
}
void pool_init2(Pool *s, int num_bufs,
void (*release_cb)(void* c, int idx), void* cb_cookie) {
pool_init(s, num_bufs);
s->cb_cookie = cb_cookie;
s->release_cb = release_cb;
}
void pool_acquire(Pool *s, int idx) {
pthread_mutex_lock(&s->lock);
assert(idx >= 0 && idx < s->num_bufs);
s->refcnt[idx]++;
pthread_mutex_unlock(&s->lock);
}
static void pool_release_locked(Pool *s, int idx) {
// printf("release %d refcnt %d\n", idx, s->refcnt[idx]);
assert(idx >= 0 && idx < s->num_bufs);
assert(s->refcnt[idx] > 0);
s->refcnt[idx]--;
// printf("release %d -> %d, %p\n", idx, s->refcnt[idx], s->release_cb);
if (s->refcnt[idx] == 0 && s->release_cb) {
// printf("call %p\b", s->release_cb);
s->release_cb(s->cb_cookie, idx);
}
}
void pool_release(Pool *s, int idx) {
pthread_mutex_lock(&s->lock);
pool_release_locked(s, idx);
pthread_mutex_unlock(&s->lock);
}
TBuffer* pool_get_tbuffer(Pool *s) {
pthread_mutex_lock(&s->lock);
assert(s->num_tbufs < POOL_MAX_TBUFS);
TBuffer* tbuf = &s->tbufs[s->num_tbufs];
s->num_tbufs++;
tbuffer_init2(tbuf, s->num_bufs,
"pool", (void (*)(void *, int))pool_release, s);
bool stopped = s->stopped;
pthread_mutex_unlock(&s->lock);
// Stop the tbuffer so we can return a valid object.
// We must stop here because the pool_stop may have already been called,
// in which case tbuffer_stop may never be called again.
if (stopped) {
tbuffer_stop(tbuf);
}
return tbuf;
}
PoolQueue* pool_get_queue(Pool *s) {
pthread_mutex_lock(&s->lock);
int i;
for (i = 0; i < POOL_MAX_QUEUES; i++) {
if (!s->queues[i].inited) {
break;
}
}
assert(i < POOL_MAX_QUEUES);
PoolQueue *c = &s->queues[i];
memset(c, 0, sizeof(*c));
c->pool = s;
c->inited = true;
c->efd = efd_init();
assert(c->efd >= 0);
c->num_bufs = s->num_bufs;
c->num = c->num_bufs+1;
c->idx = (int*)malloc(sizeof(int)*c->num);
memset(c->idx, -1, sizeof(int)*c->num);
pthread_mutex_init(&c->lock, NULL);
pthread_cond_init(&c->cv, NULL);
pthread_mutex_unlock(&s->lock);
return c;
}
void pool_release_queue(PoolQueue *c) {
Pool *s = c->pool;
pthread_mutex_lock(&s->lock);
pthread_mutex_lock(&c->lock);
for (int i=0; i<c->num; i++) {
if (c->idx[i] != -1) {
pool_release_locked(s, c->idx[i]);
}
}
close(c->efd);
free(c->idx);
c->inited = false;
pthread_mutex_unlock(&c->lock);
pthread_mutex_destroy(&c->lock);
pthread_cond_destroy(&c->cv);
pthread_mutex_unlock(&s->lock);
}
int pool_select(Pool *s) {
pthread_mutex_lock(&s->lock);
int i;
for (i=0; i<s->num_bufs; i++) {
if (s->refcnt[i] == 0) {
break;
}
}
if (i >= s->num_bufs) {
// overwrite the oldest
// still being using in a queue or tbuffer :/
int min_k = 0;
int min_ts = s->ts[0];
for (int k=1; k<s->num_bufs; k++) {
if (s->ts[k] < min_ts) {
min_ts = s->ts[k];
min_k = k;
}
}
i = min_k;
printf("pool is full! evicted %d\n", min_k);
// might be really bad if the user is doing pointery stuff
if (s->release_cb) {
s->release_cb(s->cb_cookie, min_k);
}
}
s->refcnt[i]++;
s->ts[i] = s->counter;
s->counter++;
pthread_mutex_unlock(&s->lock);
return i;
}
void pool_push(Pool *s, int idx) {
pthread_mutex_lock(&s->lock);
// printf("push %d head %d tail %d\n", idx, s->head, s->tail);
assert(idx >= 0 && idx < s->num_bufs);
s->ts[idx] = s->counter;
s->counter++;
assert(s->refcnt[idx] > 0);
s->refcnt[idx]--; //push is a implcit release
int num_tbufs = s->num_tbufs;
s->refcnt[idx] += num_tbufs;
// dispatch pool queues
for (int i=0; i<POOL_MAX_QUEUES; i++) {
PoolQueue *c = &s->queues[i];
if (!c->inited) continue;
pthread_mutex_lock(&c->lock);
if (((c->head+1) % c->num) == c->tail) {
// queue is full. skip for now
pthread_mutex_unlock(&c->lock);
continue;
}
s->refcnt[idx]++;
c->idx[c->head] = idx;
c->head = (c->head+1) % c->num;
assert(c->head != c->tail);
pthread_mutex_unlock(&c->lock);
efd_write(c->efd);
pthread_cond_signal(&c->cv);
}
pthread_mutex_unlock(&s->lock);
for (int i=0; i<num_tbufs; i++) {
tbuffer_dispatch(&s->tbufs[i], idx);
}
}
int poolq_pop(PoolQueue *c) {
pthread_mutex_lock(&c->lock);
if (c->stopped) {
pthread_mutex_unlock(&c->lock);
return -1;
}
while (c->head == c->tail) {
pthread_cond_wait(&c->cv, &c->lock);
if (c->stopped) {
pthread_mutex_unlock(&c->lock);
return -1;
}
}
// printf("pop head %d tail %d\n", s->head, s->tail);
assert(c->head != c->tail);
int r = c->idx[c->tail];
c->idx[c->tail] = -1;
c->tail = (c->tail+1) % c->num;
// queue event is level triggered
if (c->head == c->tail) {
efd_clear(c->efd);
}
// printf("pop %d head %d tail %d\n", r, s->head, s->tail);
assert(r >= 0 && r < c->num_bufs);
pthread_mutex_unlock(&c->lock);
return r;
}
int poolq_efd(PoolQueue *c) {
return c->efd;
}
void poolq_release(PoolQueue *c, int idx) {
pool_release(c->pool, idx);
}
void pool_stop(Pool *s) {
for (int i=0; i<s->num_tbufs; i++) {
tbuffer_stop(&s->tbufs[i]);
}
pthread_mutex_lock(&s->lock);
s->stopped = true;
for (int i=0; i<POOL_MAX_QUEUES; i++) {
PoolQueue *c = &s->queues[i];
if (!c->inited) continue;
pthread_mutex_lock(&c->lock);
c->stopped = true;
pthread_mutex_unlock(&c->lock);
efd_write(c->efd);
pthread_cond_signal(&c->cv);
}
pthread_mutex_unlock(&s->lock);
}

View File

@ -1,123 +0,0 @@
#ifndef BUFFERING_H
#define BUFFERING_H
#include <stdbool.h>
#include <pthread.h>
#ifdef __cplusplus
extern "C" {
#endif
// Tripple buffering helper
typedef struct TBuffer {
pthread_mutex_t lock;
pthread_cond_t cv;
int efd;
bool* reading;
int pending_idx;
int num_bufs;
const char* name;
void (*release_cb)(void* c, int idx);
void *cb_cookie;
bool stopped;
} TBuffer;
// num_bufs must be at least the number of buffers that can be acquired simultaniously plus two
void tbuffer_init(TBuffer *tb, int num_bufs, const char* name);
void tbuffer_init2(TBuffer *tb, int num_bufs, const char* name,
void (*release_cb)(void* c, int idx),
void* cb_cookie);
// returns an eventfd that signals if a buffer is ready and tbuffer_acquire shouldn't to block.
// useful to polling on multiple tbuffers.
int tbuffer_efd(TBuffer *tb);
// Chooses a buffer that's not reading or pending
int tbuffer_select(TBuffer *tb);
// Called when the writer is done with their buffer
// - Wakes up the reader if it's waiting
// - releases the pending buffer if the reader's too slow
void tbuffer_dispatch(TBuffer *tb, int idx);
// Called when the reader wants a new buffer, will return -1 when stopped
int tbuffer_acquire(TBuffer *tb);
// Called when the reader is done with their buffer
void tbuffer_release(TBuffer *tb, int idx);
void tbuffer_release_all(TBuffer *tb);
void tbuffer_stop(TBuffer *tb);
// pool: buffer pool + queue thing...
#define POOL_MAX_TBUFS 8
#define POOL_MAX_QUEUES 8
typedef struct Pool Pool;
typedef struct PoolQueue {
pthread_mutex_t lock;
pthread_cond_t cv;
Pool* pool;
bool inited;
bool stopped;
int efd;
int num_bufs;
int num;
int head, tail;
int* idx;
} PoolQueue;
int poolq_pop(PoolQueue *s);
int poolq_efd(PoolQueue *s);
void poolq_release(PoolQueue *c, int idx);
typedef struct Pool {
pthread_mutex_t lock;
bool stopped;
int num_bufs;
int counter;
int* ts;
int* refcnt;
void (*release_cb)(void* c, int idx);
void *cb_cookie;
int num_tbufs;
TBuffer tbufs[POOL_MAX_TBUFS];
PoolQueue queues[POOL_MAX_QUEUES];
} Pool;
void pool_init(Pool *s, int num_bufs);
void pool_init2(Pool *s, int num_bufs,
void (*release_cb)(void* c, int idx), void* cb_cookie);
TBuffer* pool_get_tbuffer(Pool *s);
PoolQueue* pool_get_queue(Pool *s);
void pool_release_queue(PoolQueue *q);
int pool_select(Pool *s);
void pool_push(Pool *s, int idx);
void pool_acquire(Pool *s, int idx);
void pool_release(Pool *s, int idx);
void pool_stop(Pool *s);
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -4,6 +4,8 @@
#include "cqueue.h"
// TODO: replace by C++ queue and CV. See camerad
void queue_init(Queue *q) {
memset(q, 0, sizeof(*q));
TAILQ_INIT(&q->q);

View File

@ -1,56 +0,0 @@
#include <stdlib.h>
#include <assert.h>
#ifdef __linux__
#include <sys/eventfd.h>
#else
#include <sys/time.h>
#include <sys/event.h>
#define EVENT_IDENT 42
#endif
#include "efd.h"
int efd_init() {
#ifdef __linux__
return eventfd(0, EFD_CLOEXEC);
#else
int fd = kqueue();
assert(fd >= 0);
struct kevent kev;
EV_SET(&kev, EVENT_IDENT, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, NULL);
struct timespec timeout = {0, 0};
int err = kevent(fd, &kev, 1, NULL, 0, &timeout);
assert(err != -1);
return fd;
#endif
}
void efd_write(int fd) {
#ifdef __linux__
eventfd_write(fd, 1);
#else
struct kevent kev;
EV_SET(&kev, EVENT_IDENT, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL);
struct timespec timeout = {0, 0};
int err = kevent(fd, &kev, 1, NULL, 0, &timeout);
assert(err != -1);
#endif
}
void efd_clear(int fd) {
#ifdef __linux__
eventfd_t efd_cnt;
eventfd_read(fd, &efd_cnt);
#else
struct kevent kev;
struct timespec timeout = {0, 0};
int nfds = kevent(fd, NULL, 0, &kev, 1, &timeout);
assert(nfds != -1);
#endif
}

View File

@ -1,17 +0,0 @@
#ifndef EFD_H
#define EFD_H
#ifdef __cplusplus
extern "C" {
#endif
// event fd: a semaphore that can be poll()'d
int efd_init();
void efd_write(int fd);
void efd_clear(int fd);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,125 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/un.h>
#include "ipc.h"
int ipc_connect(const char* socket_path) {
int err;
#ifdef __APPLE__
int sock = socket(AF_UNIX, SOCK_STREAM, 0);
#else
int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
#endif
if (sock < 0) return -1;
struct sockaddr_un addr = {
.sun_family = AF_UNIX,
};
snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", socket_path);
err = connect(sock, (struct sockaddr*)&addr, sizeof(addr));
if (err != 0) {
close(sock);
return -1;
}
return sock;
}
int ipc_bind(const char* socket_path) {
int err;
unlink(socket_path);
#ifdef __APPLE__
int sock = socket(AF_UNIX, SOCK_STREAM, 0);
#else
int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
#endif
struct sockaddr_un addr = {
.sun_family = AF_UNIX,
};
snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", socket_path);
err = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
assert(err == 0);
err = listen(sock, 3);
assert(err == 0);
return sock;
}
int ipc_sendrecv_with_fds(bool send, int fd, void *buf, size_t buf_size, int* fds, int num_fds,
int *out_num_fds) {
char control_buf[CMSG_SPACE(sizeof(int) * num_fds)];
memset(control_buf, 0, CMSG_SPACE(sizeof(int) * num_fds));
struct iovec iov = {
.iov_base = buf,
.iov_len = buf_size,
};
struct msghdr msg = {
.msg_iov = &iov,
.msg_iovlen = 1,
};
if (num_fds > 0) {
assert(fds);
msg.msg_control = control_buf;
msg.msg_controllen = CMSG_SPACE(sizeof(int) * num_fds);
}
if (send) {
if (num_fds) {
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
assert(cmsg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int) * num_fds);
memcpy(CMSG_DATA(cmsg), fds, sizeof(int) * num_fds);
// printf("send clen %d -> %d\n", num_fds, cmsg->cmsg_len);
}
return sendmsg(fd, &msg, 0);
} else {
int r = recvmsg(fd, &msg, 0);
if (r < 0) return r;
int recv_fds = 0;
if (msg.msg_controllen > 0) {
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
assert(cmsg);
assert(cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS);
recv_fds = (cmsg->cmsg_len - CMSG_LEN(0));
assert(recv_fds > 0 && (recv_fds % sizeof(int)) == 0);
recv_fds /= sizeof(int);
// printf("recv clen %d -> %d\n", cmsg->cmsg_len, recv_fds);
// assert(cmsg->cmsg_len == CMSG_LEN(sizeof(int) * num_fds));
assert(fds && recv_fds <= num_fds);
memcpy(fds, CMSG_DATA(cmsg), sizeof(int) * recv_fds);
}
if (msg.msg_flags) {
for (int i=0; i<recv_fds; i++) {
close(fds[i]);
}
return -1;
}
if (fds) {
assert(out_num_fds);
*out_num_fds = recv_fds;
}
return r;
}
}

View File

@ -1,19 +0,0 @@
#ifndef IPC_H
#define IPC_H
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
int ipc_connect(const char* socket_path);
int ipc_bind(const char* socket_path);
int ipc_sendrecv_with_fds(bool send, int fd, void *buf, size_t buf_size, int* fds, int num_fds,
int *out_num_fds);
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,37 +0,0 @@
#pragma once
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
#ifdef __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef struct VisionBuf {
size_t len;
size_t mmap_len;
void* addr;
int handle;
int fd;
cl_context ctx;
cl_device_id device_id;
cl_mem buf_cl;
cl_command_queue copy_q;
} VisionBuf;
#define VISIONBUF_SYNC_FROM_DEVICE 0
#define VISIONBUF_SYNC_TO_DEVICE 1
VisionBuf visionbuf_allocate(size_t len);
VisionBuf visionbuf_allocate_cl(size_t len, cl_device_id device_id, cl_context ctx);
void visionbuf_sync(const VisionBuf* buf, int dir);
void visionbuf_free(const VisionBuf* buf);
#ifdef __cplusplus
}
#endif

View File

@ -1,101 +0,0 @@
#include "visionbuf.h"
#include <stdio.h>
#include <fcntl.h>
#include <assert.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/types.h>
#include "common/clutil.h"
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
#ifdef __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
int offset = 0;
void *malloc_with_fd(size_t len, int *fd) {
char full_path[0x100];
#ifdef __APPLE__
snprintf(full_path, sizeof(full_path)-1, "/tmp/visionbuf_%d_%d", getpid(), offset++);
#else
snprintf(full_path, sizeof(full_path)-1, "/dev/shm/visionbuf_%d_%d", getpid(), offset++);
#endif
*fd = open(full_path, O_RDWR | O_CREAT, 0777);
assert(*fd >= 0);
unlink(full_path);
ftruncate(*fd, len);
void *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0);
assert(addr != MAP_FAILED);
return addr;
}
VisionBuf visionbuf_allocate(size_t len) {
// const size_t alignment = 4096;
// void* addr = aligned_alloc(alignment, alignment * ((len - 1) / alignment + 1));
//void* addr = calloc(1, len);
int fd;
void *addr = malloc_with_fd(len, &fd);
return (VisionBuf){
.len = len, .addr = addr, .handle = 1, .fd = fd,
};
}
VisionBuf visionbuf_allocate_cl(size_t len, cl_device_id device_id, cl_context ctx) {
#if __OPENCL_VERSION__ >= 200
void* host_ptr =
clSVMAlloc(ctx, CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, len, 0);
assert(host_ptr);
#else
int fd;
void* host_ptr = malloc_with_fd(len, &fd);
cl_command_queue q = CL_CHECK_ERR(clCreateCommandQueue(ctx, device_id, 0, &err));
#endif
cl_mem mem = CL_CHECK_ERR(clCreateBuffer(ctx, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, len, host_ptr, &err));
return (VisionBuf){
.len = len, .addr = host_ptr, .handle = 0, .fd = fd,
.device_id = device_id, .ctx = ctx, .buf_cl = mem,
#if __OPENCL_VERSION__ < 200
.copy_q = q,
#endif
};
}
void visionbuf_sync(const VisionBuf* buf, int dir) {
if (!buf->buf_cl) return;
#if __OPENCL_VERSION__ < 200
if (dir == VISIONBUF_SYNC_FROM_DEVICE) {
CL_CHECK(clEnqueueReadBuffer(buf->copy_q, buf->buf_cl, CL_FALSE, 0, buf->len, buf->addr, 0, NULL, NULL));
} else {
CL_CHECK(clEnqueueWriteBuffer(buf->copy_q, buf->buf_cl, CL_FALSE, 0, buf->len, buf->addr, 0, NULL, NULL));
}
clFinish(buf->copy_q);
#endif
}
void visionbuf_free(const VisionBuf* buf) {
if (buf->handle) {
munmap(buf->addr, buf->len);
close(buf->fd);
} else {
CL_CHECK(clReleaseMemObject(buf->buf_cl));
#if __OPENCL_VERSION__ >= 200
clSVMFree(buf->ctx, buf->addr);
#else
CL_CHECK(clReleaseCommandQueue(buf->copy_q));
munmap(buf->addr, buf->len);
close(buf->fd);
#endif
}
}

View File

@ -1,144 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <linux/ion.h>
#include <CL/cl_ext.h>
#include "common/clutil.h"
#include <msm_ion.h>
#include "visionbuf.h"
// just hard-code these for convenience
// size_t device_page_size = 0;
// clGetDeviceInfo(device_id, CL_DEVICE_PAGE_SIZE_QCOM,
// sizeof(device_page_size), &device_page_size,
// NULL);
// size_t padding_cl = 0;
// clGetDeviceInfo(device_id, CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM,
// sizeof(padding_cl), &padding_cl,
// NULL);
#define DEVICE_PAGE_SIZE_CL 4096
#define PADDING_CL 0
static int ion_fd = -1;
static void ion_init() {
if (ion_fd == -1) {
ion_fd = open("/dev/ion", O_RDWR | O_NONBLOCK);
}
}
VisionBuf visionbuf_allocate(size_t len) {
int err;
ion_init();
struct ion_allocation_data ion_alloc = {0};
ion_alloc.len = len + PADDING_CL;
ion_alloc.align = 4096;
ion_alloc.heap_id_mask = 1 << ION_IOMMU_HEAP_ID;
ion_alloc.flags = ION_FLAG_CACHED;
err = ioctl(ion_fd, ION_IOC_ALLOC, &ion_alloc);
assert(err == 0);
struct ion_fd_data ion_fd_data = {0};
ion_fd_data.handle = ion_alloc.handle;
err = ioctl(ion_fd, ION_IOC_SHARE, &ion_fd_data);
assert(err == 0);
void *addr = mmap(NULL, ion_alloc.len,
PROT_READ | PROT_WRITE,
MAP_SHARED, ion_fd_data.fd, 0);
assert(addr != MAP_FAILED);
memset(addr, 0, ion_alloc.len);
return (VisionBuf){
.len = len,
.mmap_len = ion_alloc.len,
.addr = addr,
.handle = ion_alloc.handle,
.fd = ion_fd_data.fd,
};
}
VisionBuf visionbuf_allocate_cl(size_t len, cl_device_id device_id, cl_context ctx) {
VisionBuf buf = visionbuf_allocate(len);
assert(((uintptr_t)buf.addr % DEVICE_PAGE_SIZE_CL) == 0);
cl_mem_ion_host_ptr ion_cl = {0};
ion_cl.ext_host_ptr.allocation_type = CL_MEM_ION_HOST_PTR_QCOM;
ion_cl.ext_host_ptr.host_cache_policy = CL_MEM_HOST_UNCACHED_QCOM;
ion_cl.ion_filedesc = buf.fd;
ion_cl.ion_hostptr = buf.addr;
buf.buf_cl = CL_CHECK_ERR(clCreateBuffer(ctx,
CL_MEM_USE_HOST_PTR | CL_MEM_EXT_HOST_PTR_QCOM,
buf.len, &ion_cl, &err));
return buf;
}
void visionbuf_sync(const VisionBuf* buf, int dir) {
int err;
struct ion_fd_data fd_data = {0};
fd_data.fd = buf->fd;
err = ioctl(ion_fd, ION_IOC_IMPORT, &fd_data);
assert(err == 0);
struct ion_flush_data flush_data = {0};
flush_data.handle = fd_data.handle;
flush_data.vaddr = buf->addr;
flush_data.offset = 0;
flush_data.length = buf->len;
// ION_IOC_INV_CACHES ~= DMA_FROM_DEVICE
// ION_IOC_CLEAN_CACHES ~= DMA_TO_DEVICE
// ION_IOC_CLEAN_INV_CACHES ~= DMA_BIDIRECTIONAL
struct ion_custom_data custom_data = {0};
switch (dir) {
case VISIONBUF_SYNC_FROM_DEVICE:
custom_data.cmd = ION_IOC_INV_CACHES;
break;
case VISIONBUF_SYNC_TO_DEVICE:
custom_data.cmd = ION_IOC_CLEAN_CACHES;
break;
default:
assert(0);
}
custom_data.arg = (unsigned long)&flush_data;
err = ioctl(ion_fd, ION_IOC_CUSTOM, &custom_data);
assert(err == 0);
struct ion_handle_data handle_data = {0};
handle_data.handle = fd_data.handle;
err = ioctl(ion_fd, ION_IOC_FREE, &handle_data);
assert(err == 0);
}
void visionbuf_free(const VisionBuf* buf) {
if (buf->buf_cl) {
CL_CHECK(clReleaseMemObject(buf->buf_cl));
}
munmap(buf->addr, buf->mmap_len);
close(buf->fd);
struct ion_handle_data handle_data = {
.handle = buf->handle,
};
int ret = ioctl(ion_fd, ION_IOC_FREE, &handle_data);
assert(ret == 0);
}

View File

@ -1,6 +1,7 @@
#include <cassert>
#ifdef QCOM
#include <system/graphics.h>
#include <ui/GraphicBuffer.h>
#include <ui/PixelFormat.h>
@ -14,60 +15,23 @@
#define EGL_EGLEXT_PROTOTYPES
#include <EGL/eglext.h>
#else // ifdef QCOM
#ifdef __APPLE__
#include <OpenGL/gl3.h>
#else
#include <GLES3/gl3.h>
#endif
#include "common/util.h"
#include "common/visionbuf.h"
#endif // ifdef QCOM
#include "common/util.h"
#include "common/visionimg.h"
#ifdef QCOM
using namespace android;
// from libadreno_utils.so
extern "C" void compute_aligned_width_and_height(int width,
int height,
int bpp,
int tile_mode,
int raster_mode,
int padding_threshold,
int *aligned_w,
int *aligned_h);
#endif
void visionimg_compute_aligned_width_and_height(int width, int height, int *aligned_w, int *aligned_h) {
#if defined(QCOM) && !defined(QCOM_REPLAY)
compute_aligned_width_and_height(ALIGN(width, 32), ALIGN(height, 32), 3, 0, 0, 512, aligned_w, aligned_h);
#else
*aligned_w = width; *aligned_h = height;
#endif
}
VisionImg visionimg_alloc_rgb24(cl_device_id device_id, cl_context ctx, int width, int height, VisionBuf *out_buf) {
assert(out_buf != nullptr);
int aligned_w = 0, aligned_h = 0;
visionimg_compute_aligned_width_and_height(width, height, &aligned_w, &aligned_h);
int stride = aligned_w * 3;
size_t size = (size_t) aligned_w * aligned_h * 3;
*out_buf = visionbuf_allocate_cl(size, device_id, ctx);
return (VisionImg){
.fd = out_buf->fd,
.format = VISIONIMG_FORMAT_RGB24,
.width = width,
.height = height,
.stride = stride,
.size = size,
.bpp = 3,
};
}
#ifdef QCOM
EGLClientBuffer visionimg_to_egl(const VisionImg *img, void **pph) {
static EGLClientBuffer visionimg_to_egl(const VisionImg *img, void **pph) {
assert((img->size % img->stride) == 0);
assert((img->stride % img->bpp) == 0);
@ -118,4 +82,19 @@ void visionimg_destroy_gl(EGLImageKHR khr, void *ph) {
delete (private_handle_t*)ph;
}
#endif
#else // ifdef QCOM
GLuint visionimg_to_gl(const VisionImg *img, EGLImageKHR *pkhr, void **pph) {
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->width, img->height, 0, GL_RGB, GL_UNSIGNED_BYTE, *pph);
glGenerateMipmap(GL_TEXTURE_2D);
*pkhr = (EGLImageKHR)1; // not NULL
return texture;
}
void visionimg_destroy_gl(EGLImageKHR khr, void *ph) {
// empty
}
#endif // ifdef QCOM

View File

@ -1,7 +1,7 @@
#ifndef VISIONIMG_H
#define VISIONIMG_H
#include "common/visionbuf.h"
#include "visionbuf.h"
#include "common/glutil.h"
#ifdef QCOM
@ -27,10 +27,6 @@ typedef struct VisionImg {
size_t size;
} VisionImg;
void visionimg_compute_aligned_width_and_height(int width, int height, int *aligned_w, int *aligned_h);
VisionImg visionimg_alloc_rgb24(cl_device_id device_id, cl_context ctx, int width, int height, VisionBuf *out_buf);
EGLClientBuffer visionimg_to_egl(const VisionImg *img, void **pph);
GLuint visionimg_to_gl(const VisionImg *img, EGLImageKHR *pkhr, void **pph);
void visionimg_destroy_gl(EGLImageKHR khr, void *ph);

View File

@ -1,196 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/un.h>
#include "ipc.h"
#include "visionipc.h"
typedef struct VisionPacketWire {
int type;
VisionPacketData d;
} VisionPacketWire;
int vipc_connect() {
return ipc_connect(VIPC_SOCKET_PATH);
}
int vipc_recv(int fd, VisionPacket *out_p) {
VisionPacketWire p = {0};
VisionPacket p2 = {0};
int ret = ipc_sendrecv_with_fds(false, fd, &p, sizeof(p), (int*)p2.fds, VIPC_MAX_FDS, &p2.num_fds);
if (ret < 0) {
printf("vipc_recv err: %s\n", strerror(errno));
} else {
p2.type = p.type;
p2.d = p.d;
*out_p = p2;
}
//printf("%d = vipc_recv(%d, %d): %d %d %d %zu\n", ret, fd, p2.num_fds, out_p->d.stream_bufs.type, out_p->d.stream_bufs.width, out_p->d.stream_bufs.height, out_p->d.stream_bufs.buf_len);
return ret;
}
int vipc_send(int fd, const VisionPacket *p2) {
assert(p2->num_fds <= VIPC_MAX_FDS);
VisionPacketWire p = {
.type = p2->type,
.d = p2->d,
};
int ret = ipc_sendrecv_with_fds(true, fd, (void*)&p, sizeof(p), (int*)p2->fds, p2->num_fds, NULL);
//printf("%d = vipc_send(%d, %d): %d %d %d %zu\n", ret, fd, p2->num_fds, p2->d.stream_bufs.type, p2->d.stream_bufs.width, p2->d.stream_bufs.height, p2->d.stream_bufs.buf_len);
return ret;
}
void vipc_bufs_load(VIPCBuf *bufs, const VisionStreamBufs *stream_bufs,
int num_fds, const int* fds) {
for (int i=0; i<num_fds; i++) {
if (bufs[i].addr) {
munmap(bufs[i].addr, bufs[i].len);
bufs[i].addr = NULL;
close(bufs[i].fd);
}
bufs[i].fd = fds[i];
bufs[i].len = stream_bufs->buf_len;
bufs[i].addr = mmap(NULL, bufs[i].len,
PROT_READ | PROT_WRITE,
MAP_SHARED, bufs[i].fd, 0);
// printf("b %d %zu -> %p\n", bufs[i].fd, bufs[i].len, bufs[i].addr);
assert(bufs[i].addr != MAP_FAILED);
}
}
int visionstream_init(VisionStream *s, VisionStreamType type, bool tbuffer, VisionStreamBufs *out_bufs_info) {
int err;
memset(s, 0, sizeof(*s));
s->last_idx = -1;
s->ipc_fd = vipc_connect();
if (s->ipc_fd < 0) return -1;
VisionPacket p = {
.type = VIPC_STREAM_SUBSCRIBE,
.d = { .stream_sub = {
.type = type,
.tbuffer = tbuffer,
}, },
};
err = vipc_send(s->ipc_fd, &p);
if (err < 0) {
close(s->ipc_fd);
s->ipc_fd = -1;
return -1;
}
VisionPacket rp;
err = vipc_recv(s->ipc_fd, &rp);
if (err <= 0) {
close(s->ipc_fd);
s->ipc_fd = -1;
return -1;
}
assert(rp.type == VIPC_STREAM_BUFS);
assert(rp.d.stream_bufs.type == type);
s->bufs_info = rp.d.stream_bufs;
s->num_bufs = rp.num_fds;
s->bufs = calloc(s->num_bufs, sizeof(VIPCBuf));
assert(s->bufs);
vipc_bufs_load(s->bufs, &rp.d.stream_bufs, s->num_bufs, rp.fds);
if (out_bufs_info) {
*out_bufs_info = s->bufs_info;
}
return 0;
}
void visionstream_release(VisionStream *s) {
int err;
if (s->last_idx >= 0) {
VisionPacket rep = {
.type = VIPC_STREAM_RELEASE,
.d = { .stream_rel = {
.type = s->last_type,
.idx = s->last_idx,
}}
};
err = vipc_send(s->ipc_fd, &rep);
s->last_idx = -1;
}
}
VIPCBuf* visionstream_get(VisionStream *s, VIPCBufExtra *out_extra) {
int err;
VisionPacket rp;
err = vipc_recv(s->ipc_fd, &rp);
if (err <= 0) {
return NULL;
}
assert(rp.type == VIPC_STREAM_ACQUIRE);
if (s->last_idx >= 0) {
VisionPacket rep = {
.type = VIPC_STREAM_RELEASE,
.d = { .stream_rel = {
.type = s->last_type,
.idx = s->last_idx,
}}
};
err = vipc_send(s->ipc_fd, &rep);
if (err <= 0) {
return NULL;
}
}
s->last_type = rp.d.stream_acq.type;
s->last_idx = rp.d.stream_acq.idx;
assert(s->last_idx < s->num_bufs);
if (out_extra) {
*out_extra = rp.d.stream_acq.extra;
}
return &s->bufs[s->last_idx];
}
void visionstream_destroy(VisionStream *s) {
int err;
if (s->last_idx >= 0) {
VisionPacket rep = {
.type = VIPC_STREAM_RELEASE,
.d = { .stream_rel = {
.type = s->last_type,
.idx = s->last_idx,
}}
};
err = vipc_send(s->ipc_fd, &rep);
s->last_idx = -1;
}
for (int i=0; i<s->num_bufs; i++) {
if (s->bufs[i].addr) {
munmap(s->bufs[i].addr, s->bufs[i].len);
s->bufs[i].addr = NULL;
close(s->bufs[i].fd);
}
}
if (s->bufs) free(s->bufs);
if (s->ipc_fd >= 0) close(s->ipc_fd);
}

View File

@ -1,120 +0,0 @@
#ifndef VISIONIPC_H
#define VISIONIPC_H
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#define VIPC_SOCKET_PATH "/tmp/vision_socket"
#define VIPC_MAX_FDS 64
#ifdef __cplusplus
extern "C" {
#endif
typedef enum VisionIPCPacketType {
VIPC_INVALID = 0,
VIPC_STREAM_SUBSCRIBE,
VIPC_STREAM_BUFS,
VIPC_STREAM_ACQUIRE,
VIPC_STREAM_RELEASE,
} VisionIPCPacketType;
typedef enum VisionStreamType {
VISION_STREAM_RGB_BACK,
VISION_STREAM_RGB_FRONT,
VISION_STREAM_RGB_WIDE,
VISION_STREAM_YUV,
VISION_STREAM_YUV_FRONT,
VISION_STREAM_YUV_WIDE,
VISION_STREAM_MAX,
} VisionStreamType;
typedef struct VisionUIInfo {
int big_box_x, big_box_y;
int big_box_width, big_box_height;
int transformed_width, transformed_height;
int front_box_x, front_box_y;
int front_box_width, front_box_height;
int wide_box_x, wide_box_y;
int wide_box_width, wide_box_height;
} VisionUIInfo;
typedef struct VisionStreamBufs {
VisionStreamType type;
int width, height, stride;
size_t buf_len;
union {
VisionUIInfo ui_info;
} buf_info;
} VisionStreamBufs;
typedef struct VIPCBufExtra {
// only for yuv
uint32_t frame_id;
uint64_t timestamp_sof;
uint64_t timestamp_eof;
} VIPCBufExtra;
typedef union VisionPacketData {
struct {
VisionStreamType type;
bool tbuffer;
} stream_sub;
VisionStreamBufs stream_bufs;
struct {
VisionStreamType type;
int idx;
VIPCBufExtra extra;
} stream_acq;
struct {
VisionStreamType type;
int idx;
} stream_rel;
} VisionPacketData;
typedef struct VisionPacket {
int type;
VisionPacketData d;
int num_fds;
int fds[VIPC_MAX_FDS];
} VisionPacket;
int vipc_connect(void);
int vipc_recv(int fd, VisionPacket *out_p);
int vipc_send(int fd, const VisionPacket *p);
typedef struct VIPCBuf {
int fd;
size_t len;
void* addr;
} VIPCBuf;
void vipc_bufs_load(VIPCBuf *bufs, const VisionStreamBufs *stream_bufs,
int num_fds, const int* fds);
typedef struct VisionStream {
int ipc_fd;
int last_idx;
int last_type;
int num_bufs;
VisionStreamBufs bufs_info;
VIPCBuf *bufs;
} VisionStream;
int visionstream_init(VisionStream *s, VisionStreamType type, bool tbuffer, VisionStreamBufs *out_bufs_info);
void visionstream_release(VisionStream *s);
VIPCBuf* visionstream_get(VisionStream *s, VIPCBufExtra *out_extra);
void visionstream_destroy(VisionStream *s);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,13 +1,13 @@
Import('env', 'arch', 'cereal', 'messaging', 'common', 'visionipc')
Import('env', 'arch', 'cereal', 'messaging', 'common', 'visionipc', 'gpucommon')
src = ['loggerd.cc', 'logger.cc']
libs = ['zmq', 'capnp', 'kj', 'z',
'avformat', 'avcodec', 'swscale', 'avutil',
'yuv', 'bz2', common, cereal, messaging, visionipc]
'yuv', 'bz2', 'OpenCL', common, cereal, messaging, visionipc]
if arch in ["aarch64", "larch64"]:
src += ['encoder.cc']
libs += ['OmxVenc', 'OmxCore']
libs += ['OmxVenc', 'OmxCore', 'gsl', 'CB'] + gpucommon
if arch == "aarch64":
libs += ['cutils']
else:
@ -15,4 +15,9 @@ if arch in ["aarch64", "larch64"]:
else:
libs += ['pthread']
if arch == "Darwin":
# fix OpenCL
del libs[libs.index('OpenCL')]
env['FRAMEWORKS'] = ['OpenCL']
env.Program(src, LIBS=libs)

View File

@ -457,7 +457,7 @@ static void handle_out_buf(EncoderState *s, OMX_BUFFERHEADERTYPE *out_buf) {
int encoder_encode_frame(EncoderState *s,
const uint8_t *y_ptr, const uint8_t *u_ptr, const uint8_t *v_ptr,
int in_width, int in_height,
int *frame_segment, VIPCBufExtra *extra) {
int *frame_segment, VisionIpcBufExtra *extra) {
int err;
pthread_mutex_lock(&s->lock);

View File

@ -68,7 +68,7 @@ void encoder_init(EncoderState *s, const char* filename, int width, int height,
int encoder_encode_frame(EncoderState *s,
const uint8_t *y_ptr, const uint8_t *u_ptr, const uint8_t *v_ptr,
int in_width, int in_height,
int *frame_segment, VIPCBufExtra *extra);
int *frame_segment, VisionIpcBufExtra *extra);
void encoder_open(EncoderState *s, const char* path);
void encoder_rotate(EncoderState *s, const char* new_path, int new_segment);
void encoder_close(EncoderState *s);

View File

@ -28,7 +28,6 @@
#include "common/timing.h"
#include "common/params.h"
#include "common/swaglog.h"
#include "common/visionipc.h"
#include "common/utilpp.h"
#include "common/util.h"
#include "camerad/cameras/camera_common.h"
@ -36,6 +35,9 @@
#include "messaging.hpp"
#include "services.h"
#include "visionipc.h"
#include "visionipc_client.h"
#if !(defined(QCOM) || defined(QCOM2))
#define DISABLE_ENCODER // TODO: loggerd for PC
#endif
@ -58,7 +60,7 @@ constexpr int DCAM_BITRATE = MAIN_BITRATE;
LogCameraInfo cameras_logged[LOG_CAMERA_ID_MAX] = {
[LOG_CAMERA_ID_FCAMERA] = {
.stream_type = VISION_STREAM_YUV,
.stream_type = VISION_STREAM_YUV_BACK,
.filename = "fcamera.hevc",
.frame_packet_name = "frame",
.fps = MAIN_FPS,
@ -200,7 +202,6 @@ void encoder_thread(int cam_idx) {
LogCameraInfo &cam_info = cameras_logged[cam_idx];
set_thread_name(cam_info.filename);
VisionStream stream;
RotateState &rotate_state = s.rotate_state[cam_idx];
std::vector<EncoderState*> encoders;
@ -213,15 +214,15 @@ void encoder_thread(int cam_idx) {
int cnt = 0;
LoggerHandle *lh = NULL;
VisionIpcClient vipc_client = VisionIpcClient("camerad", cam_info.stream_type, false);
while (!do_exit) {
VisionStreamBufs buf_info;
int err = visionstream_init(&stream, cam_info.stream_type, false, &buf_info);
if (err != 0) {
LOGD("visionstream connect fail");
util::sleep_for(100);
if (!vipc_client.connect(false)){
std::this_thread::sleep_for(std::chrono::milliseconds(100));
continue;
}
VisionBuf buf_info = vipc_client.buffers[0];
// init encoders
if (encoders.empty()) {
LOGD("encoder init %dx%d", buf_info.width, buf_info.height);
@ -241,11 +242,10 @@ void encoder_thread(int cam_idx) {
}
while (!do_exit) {
VIPCBufExtra extra;
VIPCBuf* buf = visionstream_get(&stream, &extra);
if (buf == NULL) {
LOG("visionstream get failed");
break;
VisionIpcBufExtra extra;
VisionBuf* buf = vipc_client.recv(&extra);
if (buf == nullptr){
continue;
}
//uint64_t current_time = nanos_since_boot();
@ -313,19 +313,18 @@ void encoder_thread(int cam_idx) {
rotate_state.setStreamFrameId(extra.frame_id);
uint8_t *y = (uint8_t*)buf->addr;
uint8_t *u = y + (buf_info.width*buf_info.height);
uint8_t *v = u + (buf_info.width/2)*(buf_info.height/2);
{
// encode hevc
int out_segment = -1;
int out_id = encoder_encode_frame(encoders[0], y, u, v,
buf_info.width, buf_info.height,
int out_id = encoder_encode_frame(encoders[0],
buf->y, buf->u, buf->v,
buf->width, buf->height,
&out_segment, &extra);
if (encoders.size() > 1) {
int out_segment_alt = -1;
encoder_encode_frame(encoders[1], y, u, v,
buf_info.width, buf_info.height,
encoder_encode_frame(encoders[1],
buf->y, buf->u, buf->v,
buf->width, buf->height,
&out_segment_alt, &extra);
}
@ -361,7 +360,6 @@ void encoder_thread(int cam_idx) {
lh = NULL;
}
visionstream_destroy(&stream);
}
LOG("encoder destroy");

View File

@ -28,7 +28,7 @@ int main() {
vidlogger.Open("o1");
for (int cnt=0; cnt<200; cnt++) {
VIPCBufExtra extra;
VisionIpcBufExtra extra;
VIPSBuf* buf = visionstream_get(&stream, &extra);
if (buf == NULL) {
printf("visionstream get failed\n");

View File

@ -4,10 +4,10 @@
#include <cassert>
#include <sys/resource.h>
#include "common/utilpp.h"
#include "common/visionbuf.h"
#include "common/visionipc.h"
#include "visionbuf.h"
#include "visionipc_client.h"
#include "common/swaglog.h"
#include "common/utilpp.h"
#include "models/dmonitoring.h"
@ -19,7 +19,6 @@
ExitHandler do_exit;
int main(int argc, char **argv) {
int err;
setpriority(PRIO_PROCESS, 0, -15);
PubMaster pm({"driverState"});
@ -28,30 +27,28 @@ int main(int argc, char **argv) {
DMonitoringModelState dmonitoringmodel;
dmonitoring_init(&dmonitoringmodel);
// loop
VisionStream stream;
while (!do_exit) {
VisionStreamBufs buf_info;
err = visionstream_init(&stream, VISION_STREAM_YUV_FRONT, true, &buf_info);
if (err) {
printf("visionstream connect fail\n");
util::sleep_for(100);
VisionIpcClient vipc_client = VisionIpcClient("camerad", VISION_STREAM_YUV_FRONT, true);
while (!do_exit){
if (!vipc_client.connect(false)){
std::this_thread::sleep_for(std::chrono::milliseconds(100));
continue;
}
LOGW("connected with buffer size: %d", buf_info.buf_len);
break;
}
while (!do_exit) {
LOGW("connected with buffer size: %d", vipc_client.buffers[0].len);
double last = 0;
while (!do_exit) {
VIPCBuf *buf;
VIPCBufExtra extra;
buf = visionstream_get(&stream, &extra);
if (buf == NULL) {
printf("visionstream get failed\n");
break;
VisionIpcBufExtra extra = {0};
VisionBuf *buf = vipc_client.recv(&extra);
if (buf == nullptr){
continue;
}
double t1 = millis_since_boot();
DMonitoringResult res = dmonitoring_eval_frame(&dmonitoringmodel, buf->addr, buf_info.width, buf_info.height);
DMonitoringResult res = dmonitoring_eval_frame(&dmonitoringmodel, buf->addr, buf->width, buf->height);
double t2 = millis_since_boot();
// send dm packet
@ -61,7 +58,6 @@ int main(int argc, char **argv) {
LOGD("dmonitoring process: %.2fms, from last %.2fms", t2-t1, t1-last);
last = t1;
}
visionstream_destroy(&stream);
}
dmonitoring_free(&dmonitoringmodel);

View File

@ -3,8 +3,8 @@
#include <unistd.h>
#include <eigen3/Eigen/Dense>
#include "common/visionbuf.h"
#include "common/visionipc.h"
#include "visionbuf.h"
#include "visionipc_client.h"
#include "common/swaglog.h"
#include "common/clutil.h"
#include "common/utilpp.h"
@ -118,17 +118,20 @@ int main(int argc, char **argv) {
model_init(&model, device_id, context);
LOGW("models loaded, modeld starting");
// loop
VisionStream stream;
while (!do_exit) {
VisionStreamBufs buf_info;
err = visionstream_init(&stream, VISION_STREAM_YUV, true, &buf_info);
if (err) {
LOGW("visionstream connect failed");
util::sleep_for(100);
VisionIpcClient vipc_client = VisionIpcClient("camerad", VISION_STREAM_YUV_BACK, true, device_id, context);
while (!do_exit){
if (!vipc_client.connect(false)){
std::this_thread::sleep_for(std::chrono::milliseconds(100));
continue;
}
LOGW("connected with buffer size: %d", buf_info.buf_len);
break;
}
// loop
while (!do_exit) {
VisionBuf *b = &vipc_client.buffers[0];
LOGW("connected with buffer size: %d (%d x %d)", b->len, b->width, b->height);
// setup filter to track dropped frames
const float dt = 1. / MODEL_FREQ;
@ -136,20 +139,16 @@ int main(int argc, char **argv) {
const float frame_filter_k = (dt / ts) / (1. + dt / ts);
float frames_dropped = 0;
// one frame in memory
VisionBuf yuv_ion = visionbuf_allocate_cl(buf_info.buf_len, device_id, context);
uint32_t frame_id = 0, last_vipc_frame_id = 0;
double last = 0;
int desire = -1;
uint32_t run_count = 0;
while (!do_exit) {
VIPCBuf *buf;
VIPCBufExtra extra;
buf = visionstream_get(&stream, &extra);
if (buf == NULL) {
LOGW("visionstream get failed");
break;
VisionIpcBufExtra extra;
VisionBuf *buf = vipc_client.recv(&extra);
if (buf == nullptr){
continue;
}
pthread_mutex_lock(&transform_lock);
@ -174,12 +173,8 @@ int main(int argc, char **argv) {
mt1 = millis_since_boot();
// TODO: don't make copies!
memcpy(yuv_ion.addr, buf->addr, buf_info.buf_len);
visionbuf_sync(&yuv_ion, VISIONBUF_SYNC_TO_DEVICE);
ModelDataRaw model_buf =
model_eval_frame(&model, yuv_ion.buf_cl, buf_info.width, buf_info.height,
model_eval_frame(&model, buf->buf_cl, buf->width, buf->height,
model_transform, vec_desire);
mt2 = millis_since_boot();
float model_execution_time = (mt2 - mt1) / 1000.0;
@ -194,14 +189,12 @@ int main(int argc, char **argv) {
model_publish(pm, extra.frame_id, frame_id, frame_drop_ratio, model_buf, raw_pred_ptr, extra.timestamp_eof, model_execution_time);
posenet_publish(pm, extra.frame_id, vipc_dropped_frames, model_buf, extra.timestamp_eof);
LOGD("model process: %.2fms, from last %.2fms, vipc_frame_id %zu, frame_id, %zu, frame_drop %.3f", mt2-mt1, mt1-last, extra.frame_id, frame_id, frame_drop_ratio);
LOGD("model process: %.2fms, from last %.2fms, vipc_frame_id %u, frame_id, %u, frame_drop %.3f", mt2-mt1, mt1-last, extra.frame_id, frame_id, frame_drop_ratio);
last = mt1;
last_vipc_frame_id = extra.frame_id;
}
}
visionbuf_free(&yuv_ion);
visionstream_destroy(&stream);
}
model_free(&model);

View File

@ -3,7 +3,7 @@ Import('env', 'qt_env', 'arch', 'common', 'messaging', 'gpucommon', 'visionipc',
src = ['ui.cc', 'paint.cc', 'sidebar.cc', '#phonelibs/nanovg/nanovg.c']
libs = [common, 'zmq', 'capnp', 'kj', 'm', cereal, messaging, gpucommon, visionipc]
libs = [common, 'zmq', 'capnp', 'kj', 'm', 'OpenCL', cereal, messaging, gpucommon, visionipc]
if qt_env is None:
@ -24,6 +24,11 @@ else:
LIBS=qt_libs)
qt_libs.append(widgets)
if arch == "Darwin":
# fix OpenCL
del qt_libs[qt_libs.index('OpenCL')]
qt_env['FRAMEWORKS'] += ['OpenCL']
qt_src = ["qt/ui.cc", "qt/window.cc", "qt/home.cc", "qt/offroad/settings.cc", "qt/offroad/onboarding.cc"] + src
qt_env.Program("_ui", qt_src, LIBS=qt_libs)

View File

@ -156,12 +156,12 @@ static void draw_frame(UIState *s) {
}
glActiveTexture(GL_TEXTURE0);
if (s->stream.last_idx >= 0) {
glBindTexture(GL_TEXTURE_2D, s->frame_texs[s->stream.last_idx]);
if (s->last_frame) {
glBindTexture(GL_TEXTURE_2D, s->frame_texs[s->last_frame->idx]);
#ifndef QCOM
// this is handled in ion on QCOM
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, s->stream.bufs_info.width, s->stream.bufs_info.height,
0, GL_RGB, GL_UNSIGNED_BYTE, s->priv_hnds[s->stream.last_idx]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, s->last_frame->width, s->last_frame->height,
0, GL_RGB, GL_UNSIGNED_BYTE, s->priv_hnds[s->last_frame->idx]);
#endif
}
@ -444,11 +444,11 @@ void ui_draw(UIState *s) {
}
const bool draw_vision = s->started && s->status != STATUS_OFFROAD &&
s->active_app == cereal::UiLayoutState::App::NONE;
s->active_app == cereal::UiLayoutState::App::NONE && s->vipc_client->connected;
// GL drawing functions
ui_draw_background(s);
if (draw_vision && s->vision_connected) {
if (draw_vision) {
ui_draw_vision_frame(s);
}
glEnable(GL_BLEND);
@ -458,7 +458,7 @@ void ui_draw(UIState *s) {
// NVG drawing functions - should be no GL inside NVG frame
nvgBeginFrame(s->vg, s->fb_w, s->fb_h, 1.0f);
ui_draw_sidebar(s);
if (draw_vision && s->vision_connected) {
if (draw_vision) {
ui_draw_vision(s);
}

View File

@ -270,19 +270,6 @@ void GLWindow::wake() {
}
}
GLuint visionimg_to_gl(const VisionImg *img, EGLImageKHR *pkhr, void **pph) {
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->width, img->height, 0, GL_RGB, GL_UNSIGNED_BYTE, *pph);
glGenerateMipmap(GL_TEXTURE_2D);
*pkhr = (EGLImageKHR)1; // not NULL
return texture;
}
void visionimg_destroy_gl(EGLImageKHR khr, void *ph) {
// empty
}
FramebufferState* framebuffer_init(const char* name, int32_t layer, int alpha,
int *out_w, int *out_h) {

View File

@ -23,41 +23,29 @@ int write_param_float(float param, const char* param_name, bool persistent_param
return Params(persistent_param).write_db_value(param_name, s, size < sizeof(s) ? size : sizeof(s));
}
void ui_init(UIState *s) {
s->sm = new SubMaster({"modelV2", "controlsState", "uiLayoutState", "liveCalibration", "radarState", "thermal", "frame",
"health", "carParams", "ubloxGnss", "driverState", "dMonitoringState", "sensorEvents"});
s->started = false;
s->status = STATUS_OFFROAD;
s->scene.satelliteCount = -1;
s->fb = framebuffer_init("ui", 0, true, &s->fb_w, &s->fb_h);
assert(s->fb);
ui_nvg_init(s);
}
static void ui_init_vision(UIState *s) {
// Invisible until we receive a calibration message.
s->scene.world_objects_visible = false;
for (int i = 0; i < UI_BUF_COUNT; i++) {
for (int i = 0; i < s->vipc_client->num_buffers; i++) {
if (s->khr[i] != 0) {
visionimg_destroy_gl(s->khr[i], s->priv_hnds[i]);
glDeleteTextures(1, &s->frame_texs[i]);
}
VisionBuf * buf = &s->vipc_client->buffers[i];
VisionImg img = {
.fd = s->stream.bufs[i].fd,
.fd = buf->fd,
.format = VISIONIMG_FORMAT_RGB24,
.width = s->stream.bufs_info.width,
.height = s->stream.bufs_info.height,
.stride = s->stream.bufs_info.stride,
.width = (int)buf->width,
.height = (int)buf->height,
.stride = (int)buf->stride,
.bpp = 3,
.size = s->stream.bufs_info.buf_len,
.size = buf->len,
};
#ifndef QCOM
s->priv_hnds[i] = s->stream.bufs[i].addr;
s->priv_hnds[i] = buf->addr;
#endif
s->frame_texs[i] = visionimg_to_gl(&img, &s->khr[i], &s->priv_hnds[i]);
@ -73,36 +61,24 @@ static void ui_init_vision(UIState *s) {
assert(glGetError() == GL_NO_ERROR);
}
void ui_update_vision(UIState *s) {
if (!s->vision_connected && s->started) {
const VisionStreamType type = s->scene.frontview ? VISION_STREAM_RGB_FRONT : VISION_STREAM_RGB_BACK;
int err = visionstream_init(&s->stream, type, true, nullptr);
if (err == 0) {
ui_init_vision(s);
s->vision_connected = true;
}
}
void ui_init(UIState *s) {
s->sm = new SubMaster({"modelV2", "controlsState", "uiLayoutState", "liveCalibration", "radarState", "thermal", "frame",
"health", "carParams", "ubloxGnss", "driverState", "dMonitoringState", "sensorEvents"});
if (s->vision_connected) {
if (!s->started) goto destroy;
s->started = false;
s->status = STATUS_OFFROAD;
s->scene.satelliteCount = -1;
read_param(&s->is_metric, "IsMetric");
// poll for a new frame
struct pollfd fds[1] = {{
.fd = s->stream.ipc_fd,
.events = POLLOUT,
}};
int ret = poll(fds, 1, 100);
if (ret > 0) {
if (!visionstream_get(&s->stream, nullptr)) goto destroy;
}
}
s->fb = framebuffer_init("ui", 0, true, &s->fb_w, &s->fb_h);
assert(s->fb);
return;
ui_nvg_init(s);
s->last_frame = nullptr;
s->vipc_client = new VisionIpcClient("camerad", VISION_STREAM_RGB_BACK, true);
destroy:
visionstream_destroy(&s->stream);
s->vision_connected = false;
}
template <class T>
@ -280,6 +256,21 @@ static void ui_read_params(UIState *s) {
}
}
void ui_update_vision(UIState *s) {
if (!s->vipc_client->connected && s->started) {
if (s->vipc_client->connect(false)){
ui_init_vision(s);
}
}
if (s->vipc_client->connected){
VisionBuf * buf = s->vipc_client->recv();
if (buf != nullptr){
s->last_frame = buf;
}
}
}
void ui_update(UIState *s) {
ui_read_params(s);
update_sockets(s);
@ -291,6 +282,7 @@ void ui_update(UIState *s) {
s->active_app = cereal::UiLayoutState::App::HOME;
s->scene.sidebar_collapsed = false;
s->sound->stop();
s->vipc_client->connected = false;
} else if (s->started && s->status == STATUS_OFFROAD) {
s->status = STATUS_DISENGAGED;
s->started_frame = s->sm->frame;

View File

@ -19,12 +19,13 @@
#include "nanovg.h"
#include "common/mat.h"
#include "common/visionipc.h"
#include "common/visionimg.h"
#include "common/framebuffer.h"
#include "common/modeldata.h"
#include "common/params.h"
#include "sound.hpp"
#include "visionipc.h"
#include "visionipc_client.h"
#define COLOR_BLACK nvgRGBA(0, 0, 0, 255)
#define COLOR_BLACK_ALPHA(x) nvgRGBA(0, 0, 0, x)
@ -134,6 +135,9 @@ typedef struct UIScene {
} UIScene;
typedef struct UIState {
VisionIpcClient * vipc_client;
VisionBuf * last_frame;
// framebuffer
FramebufferState *fb;
int fb_w, fb_h;
@ -161,10 +165,6 @@ typedef struct UIState {
UIScene scene;
cereal::UiLayoutState::App active_app;
// vision state
bool vision_connected;
VisionStream stream;
// graphics
GLuint frame_program;
GLuint frame_texs[UI_BUF_COUNT];