

想要实现yolov5的物体识别快速推理输出并且RTSP推送处理后画面,于是我在第二个demo下的main.cc中加入了第一个demo中RTSP推流逻辑,并且添加了多的RTSP库的那个操作修改了CMAKElist等成功编译并生成了项目,但是放到板子上运行没有推理结果打印并且板子输出这个提示,想要求助该如何解决才能实现这个demo的推流。
板子输出如下所示

,我的main.cc代码修改成为了这样
Code: Select all
// Copyright (c) 2021 by Rockchip Electronics Co., Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*-------------------------------------------
Includes
-------------------------------------------*/
#include "rknn_api.h"
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <vector>
#include <fcntl.h>
#include <termios.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
//#include <fcntl.h>
#include <getopt.h>
#include <pthread.h>
#include <signal.h>
#include <stdbool.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <string.h>
#include <sys/poll.h>
//#include <time.h>
//#include <unistd.h>
//#include <vector>
#include "rtsp_demo.h"
#include "sample_comm.h"
#include "retinaface.h"
#include "postprocess.h"
// #define STB_IMAGE_IMPLEMENTATION
// #include "stb/stb_image.h"
// #define STB_IMAGE_RESIZE_IMPLEMENTATION
// #include <stb/stb_image_resize.h>
//Opencv
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <unistd.h> // sleep()
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#define PERF_WITH_POST 1
int width = 640;
int height = 640;
int channels = 3;
/*-------------------------------------------
Functions
-------------------------------------------*/
static void dump_tensor_attr(rknn_tensor_attr *attr)
{
char dims[128] = {0};
for (int i = 0; i < attr->n_dims; ++i)
{
int idx = strlen(dims);
sprintf(&dims[idx], "%d%s", attr->dims[i], (i == attr->n_dims - 1) ? "" : ", ");
}
printf(" index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
"zp=%d, scale=%f\n",
attr->index, attr->name, attr->n_dims, dims, attr->n_elems, attr->size, get_format_string(attr->fmt),
get_type_string(attr->type), get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}
//RTSP Funtions
static RK_S32 test_venc_init(int chnId, int width, int height, RK_CODEC_ID_E enType) {
printf("%s\n",__func__);
VENC_RECV_PIC_PARAM_S stRecvParam;
VENC_CHN_ATTR_S stAttr;
memset(&stAttr, 0, sizeof(VENC_CHN_ATTR_S));
// RTSP H264
stAttr.stVencAttr.enType = enType;
//stAttr.stVencAttr.enPixelFormat = RK_FMT_YUV420SP;
stAttr.stVencAttr.enPixelFormat = RK_FMT_RGB888;
stAttr.stVencAttr.u32Profile = H264E_PROFILE_MAIN;
stAttr.stVencAttr.u32PicWidth = width;
stAttr.stVencAttr.u32PicHeight = height;
stAttr.stVencAttr.u32VirWidth = width;
stAttr.stVencAttr.u32VirHeight = height;
stAttr.stVencAttr.u32StreamBufCnt = 2;
stAttr.stVencAttr.u32BufSize = width * height * 3 / 2;
stAttr.stVencAttr.enMirror = MIRROR_NONE;
stAttr.stRcAttr.enRcMode = VENC_RC_MODE_H264CBR;
stAttr.stRcAttr.stH264Cbr.u32BitRate = 10 * 1024;
stAttr.stRcAttr.stH264Cbr.u32Gop = 1;
RK_MPI_VENC_CreateChn(chnId, &stAttr);
memset(&stRecvParam, 0, sizeof(VENC_RECV_PIC_PARAM_S));
stRecvParam.s32RecvPicNum = -1;
RK_MPI_VENC_StartRecvFrame(chnId, &stRecvParam);
return 0;
}
// demo板dev默认都是0,根据不同的channel 来选择不同的vi节点
int vi_dev_init() {
printf("%s\n", __func__);
int ret = 0;
int devId = 0;
int pipeId = devId;
VI_DEV_ATTR_S stDevAttr;
VI_DEV_BIND_PIPE_S stBindPipe;
memset(&stDevAttr, 0, sizeof(stDevAttr));
memset(&stBindPipe, 0, sizeof(stBindPipe));
// 0. get dev config status
ret = RK_MPI_VI_GetDevAttr(devId, &stDevAttr);
if (ret == RK_ERR_VI_NOT_CONFIG) {
// 0-1.config dev
ret = RK_MPI_VI_SetDevAttr(devId, &stDevAttr);
if (ret != RK_SUCCESS) {
printf("RK_MPI_VI_SetDevAttr %x\n", ret);
return -1;
}
} else {
printf("RK_MPI_VI_SetDevAttr already\n");
}
// 1.get dev enable status
ret = RK_MPI_VI_GetDevIsEnable(devId);
if (ret != RK_SUCCESS) {
// 1-2.enable dev
ret = RK_MPI_VI_EnableDev(devId);
if (ret != RK_SUCCESS) {
printf("RK_MPI_VI_EnableDev %x\n", ret);
return -1;
}
// 1-3.bind dev/pipe
stBindPipe.u32Num = pipeId;
stBindPipe.PipeId[0] = pipeId;
ret = RK_MPI_VI_SetDevBindPipe(devId, &stBindPipe);
if (ret != RK_SUCCESS) {
printf("RK_MPI_VI_SetDevBindPipe %x\n", ret);
return -1;
}
} else {
printf("RK_MPI_VI_EnableDev already\n");
}
return 0;
}
int vi_chn_init(int channelId, int width, int height) {
int ret;
int buf_cnt = 2;
// VI init
VI_CHN_ATTR_S vi_chn_attr;
memset(&vi_chn_attr, 0, sizeof(vi_chn_attr));
vi_chn_attr.stIspOpt.u32BufCount = buf_cnt;
vi_chn_attr.stIspOpt.enMemoryType =
VI_V4L2_MEMORY_TYPE_DMABUF; // VI_V4L2_MEMORY_TYPE_MMAP;
vi_chn_attr.stSize.u32Width = width;
vi_chn_attr.stSize.u32Height = height;
vi_chn_attr.enPixelFormat = RK_FMT_YUV420SP;
vi_chn_attr.enCompressMode = COMPRESS_MODE_NONE; // COMPRESS_AFBC_16x16;
vi_chn_attr.u32Depth = 2;
ret = RK_MPI_VI_SetChnAttr(0, channelId, &vi_chn_attr);
ret |= RK_MPI_VI_EnableChn(0, channelId);
if (ret) {
printf("ERROR: create VI error! ret=%d\n", ret);
return ret;
}
return ret;
}
int test_vpss_init(int VpssChn, int width, int height) {
printf("%s\n",__func__);
int s32Ret;
VPSS_CHN_ATTR_S stVpssChnAttr;
VPSS_GRP_ATTR_S stGrpVpssAttr;
int s32Grp = 0;
stGrpVpssAttr.u32MaxW = 4096;
stGrpVpssAttr.u32MaxH = 4096;
stGrpVpssAttr.enPixelFormat = RK_FMT_YUV420SP;
stGrpVpssAttr.stFrameRate.s32SrcFrameRate = -1;
stGrpVpssAttr.stFrameRate.s32DstFrameRate = -1;
stGrpVpssAttr.enCompressMode = COMPRESS_MODE_NONE;
stVpssChnAttr.enChnMode = VPSS_CHN_MODE_USER;
stVpssChnAttr.enDynamicRange = DYNAMIC_RANGE_SDR8;
stVpssChnAttr.enPixelFormat = RK_FMT_RGB888;
stVpssChnAttr.stFrameRate.s32SrcFrameRate = -1;
stVpssChnAttr.stFrameRate.s32DstFrameRate = -1;
stVpssChnAttr.u32Width = width;
stVpssChnAttr.u32Height = height;
stVpssChnAttr.enCompressMode = COMPRESS_MODE_NONE;
s32Ret = RK_MPI_VPSS_CreateGrp(s32Grp, &stGrpVpssAttr);
if (s32Ret != RK_SUCCESS) {
return s32Ret;
}
s32Ret = RK_MPI_VPSS_SetChnAttr(s32Grp, VpssChn, &stVpssChnAttr);
if (s32Ret != RK_SUCCESS) {
return s32Ret;
}
s32Ret = RK_MPI_VPSS_EnableChn(s32Grp, VpssChn);
if (s32Ret != RK_SUCCESS) {
return s32Ret;
}
s32Ret = RK_MPI_VPSS_StartGrp(s32Grp);
if (s32Ret != RK_SUCCESS) {
return s32Ret;
}
return s32Ret;
}
/*-------------------------------------------
Main Functions
-------------------------------------------*/
int main(int argc, char *argv[])
{
char *model_path = "./model/RV1106/yolov5s-640-640.rknn";
int loop_count = 1;
const float nms_threshold = NMS_THRESH;
const float box_conf_threshold = BOX_THRESH;
int img_width = 640;
int img_height = 640;
rknn_context ctx = 0;
// Load RKNN Model Init rknn from model path
int ret = rknn_init(&ctx, model_path, 0, 0, NULL);
if (ret < 0)
{
printf("rknn_init fail! ret=%d\n", ret);
return -1;
}
// Get sdk and driver version
rknn_sdk_version sdk_ver;
ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &sdk_ver, sizeof(sdk_ver));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("rknn_api/rknnrt version: %s, driver version: %s\n", sdk_ver.api_version, sdk_ver.drv_version);
// Get Model Input Output Info
rknn_input_output_num io_num;
ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num)); //get num of in/out tensor
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);
//Get Model Input/Output attribute
printf("input tensors:\n");
rknn_tensor_attr input_attrs[io_num.n_input];
memset(input_attrs, 0, io_num.n_input * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_input; i++)
{
input_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
if (ret < 0)
{
printf("rknn_init error! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&input_attrs[i]);
}
printf("output tensors:\n");
rknn_tensor_attr output_attrs[io_num.n_output];
memset(output_attrs, 0, io_num.n_output * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_output; i++)
{
output_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_NATIVE_NHWC_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&output_attrs[i]);
}
//different from here........................................
// Get custom string
rknn_custom_string custom_string;
ret = rknn_query(ctx, RKNN_QUERY_CUSTOM_STRING, &custom_string, sizeof(custom_string));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("custom string: %s\n", custom_string.string);
unsigned char *input_data = new unsigned char[width * height * channels] ;
rknn_tensor_type input_type = RKNN_TENSOR_UINT8;
rknn_tensor_format input_layout = RKNN_TENSOR_NHWC;
//Init Opencv
cv::VideoCapture cap;
//cv::Mat bgr;
cap.set(cv::CAP_PROP_FRAME_WIDTH, 240);
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 240);
cap.open(0);
//Init RTSP
RK_S32 s32Ret = 0;
//h264_frame
VENC_STREAM_S stFrame;
stFrame.pstPack = (VENC_PACK_S *)malloc(sizeof(VENC_PACK_S));
VIDEO_FRAME_INFO_S h264_frame;
VIDEO_FRAME_INFO_S stVpssFrame;
// rkaiq init
RK_BOOL multi_sensor = RK_FALSE;
const char *iq_dir = "/etc/iqfiles";
rk_aiq_working_mode_t hdr_mode = RK_AIQ_WORKING_MODE_NORMAL;
//hdr_mode = RK_AIQ_WORKING_MODE_ISP_HDR2;
SAMPLE_COMM_ISP_Init(0, hdr_mode, multi_sensor, iq_dir);
SAMPLE_COMM_ISP_Run(0);
// rkmpi init
if (RK_MPI_SYS_Init() != RK_SUCCESS) {
RK_LOGE("rk mpi sys init fail!");
return -1;
}
// rtsp init
rtsp_demo_handle g_rtsplive = NULL;
rtsp_session_handle g_rtsp_session;
g_rtsplive = create_rtsp_demo(554);
g_rtsp_session = rtsp_new_session(g_rtsplive, "/live/0");
rtsp_set_video(g_rtsp_session, RTSP_CODEC_ID_VIDEO_H264, NULL, 0);
rtsp_sync_video_ts(g_rtsp_session, rtsp_get_reltime(), rtsp_get_ntptime());
//new
//rtsp_set_server_address(g_rtsplive, "172.32.0.93"); // set RTSP server IP 172.32.0.93
// vi init
vi_dev_init();
vi_chn_init(0, width, height);
// vpss init
test_vpss_init(0, width, height);
// bind vi to vpss
MPP_CHN_S stSrcChn, stvpssChn;
stSrcChn.enModId = RK_ID_VI;
stSrcChn.s32DevId = 0;
stSrcChn.s32ChnId = 0;
stvpssChn.enModId = RK_ID_VPSS;
stvpssChn.s32DevId = 0;
stvpssChn.s32ChnId = 0;
printf("====RK_MPI_SYS_Bind vi0 to vpss0====\n");
s32Ret = RK_MPI_SYS_Bind(&stSrcChn, &stvpssChn);
if (s32Ret != RK_SUCCESS) {
RK_LOGE("bind 0 ch venc failed");
return -1;
}
// venc init
RK_CODEC_ID_E enCodecType = RK_VIDEO_ID_AVC;
test_venc_init(0, width, height, enCodecType);
//RTSP Init down
// Create input tensor memory
rknn_tensor_mem *input_mems[1];
// default input type is int8 (normalize and quantize need compute in outside)
// if set uint8, will fuse normalize and quantize to npu
input_attrs[0].type = input_type;
// default fmt is NHWC, npu only support NHWC in zero copy mode
input_attrs[0].fmt = input_layout;
input_mems[0] = rknn_create_mem(ctx, input_attrs[0].size_with_stride);
// Copy input data to input tensor memory
int width = input_attrs[0].dims[2];
int stride = input_attrs[0].w_stride;
// Create output tensor memory
rknn_tensor_mem *output_mems[io_num.n_output];
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
output_mems[i] = rknn_create_mem(ctx, output_attrs[i].size_with_stride);
}
// Set input tensor memory
ret = rknn_set_io_mem(ctx, input_mems[0], &input_attrs[0]);
if (ret < 0)
{
printf("rknn_set_io_mem fail! ret=%d\n", ret);
return -1;
}
// Set output tensor memory
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
// set output memory and attribute
ret = rknn_set_io_mem(ctx, output_mems[i], &output_attrs[i]);
if (ret < 0)
{
printf("rknn_set_io_mem fail! ret=%d\n", ret);
return -1;
}
}
//get model size
int model_width = 0;
int model_height = 0;
printf("model is NHWC input fmt\n");
model_width = input_attrs[0].dims[1];
model_height = input_attrs[0].dims[2];
// For post_process
float scale_w = (float)model_width / img_width;
float scale_h = (float)model_height / img_height;
detect_result_group_t detect_result_group;
std::vector<float> out_scales;
std::vector<int32_t> out_zps;
for (int i = 0; i < io_num.n_output; ++i)
{
out_scales.push_back(output_attrs[i].scale);
out_zps.push_back(output_attrs[i].zp);
}
while(1)
{// get vpss frame
s32Ret = RK_MPI_VPSS_GetChnFrame(0,0, &stVpssFrame,-1);
if(s32Ret == RK_SUCCESS)
{
void *data = RK_MPI_MB_Handle2VirAddr(stVpssFrame.stVFrame.pMbBlk);
//test
cv::Mat bgr(height,width,CV_8UC3, data);
cap >> bgr;
cv::resize(bgr, bgr, cv::Size(640,640), 0, 0, cv::INTER_LINEAR);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
cv::Vec3b pixel = bgr.at<cv::Vec3b>(y, x); //GET different 3 colour in each pixel
input_data[(y * width + x) * channels + 0] = pixel[2]; // Red
input_data[(y * width + x) * channels + 1] = pixel[1]; // Green
input_data[(y * width + x) * channels + 2] = pixel[0]; // Blue
}
}
//printf("copy input data\n");
memcpy(input_mems[0]->virt_addr, input_data, width * input_attrs[0].dims[1] * input_attrs[0].dims[3]);
// Run
//printf("Begin perf ...\n");
ret = rknn_run(ctx, NULL);
if (ret < 0)
{
printf("rknn run error %d\n", ret);
return -1;
}
// post process
post_process((int8_t *)output_mems[0]->virt_addr, (int8_t *)output_mems[1]->virt_addr, (int8_t *)output_mems[2]->virt_addr, 640, 640,
box_conf_threshold, nms_threshold, scale_w, scale_h, out_zps, out_scales, &detect_result_group);
//char text[256];
for (int i = 0; i < detect_result_group.count; i++)
{
detect_result_t *det_result = &(detect_result_group.results[i]);
//sprintf(text, "%s %.1f%%", det_result->name, det_result->prop * 100);
printf("%s @ (%d %d %d %d) %f\n",
det_result->name,
det_result->box.left, det_result->box.top, det_result->box.right, det_result->box.bottom,
det_result->prop);
cv::rectangle(bgr,cv::Point(det_result->box.left ,det_result->box.top),
cv::Point(det_result->box.right,det_result->box.bottom),cv::Scalar(0,255,0),2);
cv::putText(bgr, det_result->name, cv::Point(det_result->box.left, det_result->box.top - 8),
cv::FONT_HERSHEY_SIMPLEX,1,
cv::Scalar(0,255,0),2);
}
// copy RTSP image
memcpy(data, bgr.data, 720 * 480 * 3);
}
// send stream
// encode H264
RK_MPI_VENC_SendFrame(0, &stVpssFrame,-1);
// rtsp
s32Ret = RK_MPI_VENC_GetStream(0, &stFrame, -1);
if(s32Ret == RK_SUCCESS)
{
if(g_rtsplive && g_rtsp_session)
{
//printf("len = %d PTS = %d \n",stFrame.pstPack->u32Len, stFrame.pstPack->u64PTS);
void *pData = RK_MPI_MB_Handle2VirAddr(stFrame.pstPack->pMbBlk);
rtsp_tx_video(g_rtsp_session, (uint8_t *)pData, stFrame.pstPack->u32Len,
stFrame.pstPack->u64PTS);
rtsp_do_event(g_rtsplive);
}
}
// release frame
s32Ret = RK_MPI_VPSS_ReleaseChnFrame(0, 0, &stVpssFrame);
if (s32Ret != RK_SUCCESS) {
RK_LOGE("RK_MPI_VI_ReleaseChnFrame fail %x", s32Ret);
}
s32Ret = RK_MPI_VENC_ReleaseStream(0, &stFrame);
if (s32Ret != RK_SUCCESS) {
RK_LOGE("RK_MPI_VENC_ReleaseStream fail %x", s32Ret);
}
}
RK_MPI_SYS_UnBind(&stSrcChn, &stvpssChn);
RK_MPI_VI_DisableChn(0, 0);
RK_MPI_VI_DisableDev(0);
RK_MPI_VPSS_StopGrp(0);
RK_MPI_VPSS_DestroyGrp(0);
RK_MPI_VENC_StopRecvFrame(0);
RK_MPI_VENC_DestroyChn(0);
free(stFrame.pstPack);
if (g_rtsplive)
rtsp_del_demo(g_rtsplive);
SAMPLE_COMM_ISP_Stop(0);
RK_MPI_SYS_Exit();
// Destroy rknn memory
rknn_destroy_mem(ctx, input_mems[0]);
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
rknn_destroy_mem(ctx, output_mems[i]);
}
// destroy
rknn_destroy(ctx);
delete[] input_data;
return 0;
}