> 文档中心 > Allwinner V3s RFBNet

Allwinner V3s RFBNet


在Allwinner V3s 上面运行 RFBNet 检测

RFBNet具备同非常深的主干网络检测器的精度,但是保持了实时性。

论文:Receptive Field Block Net for Accurate and Fast Object Detection (CVPR2017)

链接:https://arxiv.org/abs/1711.07767

Github:https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB
优点:

基于ssd修改。速度超级快,MNN上测试,MTCNN得20ms,该模型只要4ms。精度上不如MTCNN。

支持NCNN,MNN。包含2个版本的模型,slim版本(速度快)和RFB版本(精度高)。

缺点:

不支持人脸关键点检测。

交叉编译MNN

MNN是阿里的开源机器学习推理框架,有着详细的官方文档。

从github上下载 https://github.com/alibaba/MNN

交叉编译工具链可使用Linaro
使用 cmake 命令,我使用的是:arm-linux-gnueabihf-g++ 6.3
cmake …
-DCMAKE_SYSTEM_NAME=Linux
-DCMAKE_SYSTEM_VERSION=1
-DCMAKE_SYSTEM_PROCESSOR=arm
-DCMAKE_C_COMPILER=arm-linux-gnueabihf-gcc
-DCMAKE_CXX_COMPILER=arm-linux-gnueabihf-g++

编译MNN
mkdir build
cd build
cmake …
make -j4

编译得到:libMNN.so,可以使用命令:file libMNN.so 查看
file libMNN.so
libMNN.so: ELF 32-bit LSB shared object, ARM, EABI5 version 1 (GNU/Linux), dynamically linked, BuildID[sha1]=8da2a34050d48644ca34e7bf6a622381475e6777, not stripped

交叉编译RFB-MNN

将 libMNN.so 拷贝到 /home/t/Zero/Ultra-Light-Fast-Generic-Face-Detector-1MB-master/MNN/mnn/lib
把 opencv 相关屏蔽

main.cpp 修改如下:

//  Created by Linzaer on 2019/11/15.//  Copyright © 2019 Linzaer. All rights reserved.#include "UltraFace.hpp"#include //#include #include #include #include #include #include #include using namespace std;static unique_ptr<char[]> file_to_buffer(char *filename, int *sizeptr) {    ifstream fin(filename, ios::in | ios::binary);    if (!fin.is_open()) { cout << "Could not open file: " << filename << endl; exit(-1);    }    fin.seekg(0, std::ios::end);    *sizeptr = fin.tellg();    fin.seekg(0, std::ios::beg);    unique_ptr<char[]> buffer(new char[*sizeptr]);    fin.read((char *)buffer.get(), *sizeptr);    fin.close();    return move(buffer);}int main(int argc, char **argv) {    if (argc <= 2) { fprintf(stderr, "Usage: %s  [image files...]\n", argv[0]); return 1;    }    string mnn_path = argv[1];    UltraFace ultraface(mnn_path, 320, 240, 4, 0.65); // config model input    string image_file = argv[2];    cout << "Processing " << image_file << endl; int datasize = 0;    unique_ptr<char[]> datafile = file_to_buffer(argv[2], &datasize); printf("datasize = %d\n", datasize);    auto start = chrono::steady_clock::now();    vector<FaceInfo> face_info;  ultraface.detect((uint8_t*)datafile.get(), face_info);    for (auto face : face_info) {printf("x1 %f y1 %f, x2 %f y2 %f \n", face.x1, face.y1, face.x2, face.y2);    }    auto end = chrono::steady_clock::now();    chrono::duration<double> elapsed = end - start;    cout << "all time: " << elapsed.count() << " s" << endl;/*    for (int i = 2; i < argc; i++) { string image_file = argv[i]; cout << "Processing " << image_file << endl; cv::Mat frame = cv::imread(image_file); auto start = chrono::steady_clock::now(); vector face_info; ultraface.detect(frame, face_info); for (auto face : face_info) {     cv::Point pt1(face.x1, face.y1);     cv::Point pt2(face.x2, face.y2);     cv::rectangle(frame, pt1, pt2, cv::Scalar(0, 255, 0), 2); } auto end = chrono::steady_clock::now(); chrono::duration elapsed = end - start; cout << "all time: " << elapsed.count() << " s" << endl; cv::imshow("UltraFace", frame); cv::waitKey(); string result_name = "result" + to_string(i) + ".jpg"; cv::imwrite(result_name, frame);    }*/    return 0;}

UltraFace.cpp 修改如下:

int UltraFace::detect(/*cv::Mat &raw_image*/uint8_t* source, std::vector<FaceInfo> &face_list) {/*    if (raw_image.empty()) { std::cout << "image is empty ,please check!" << std::endl; return -1;    }    image_h = raw_image.rows;    image_w = raw_image.cols;    cv::Mat image;    cv::resize(raw_image, image, cv::Size(in_w, in_h));*/    image_h = 1024;//  原图的大小    image_w = 768;    ultraface_interpreter->resizeTensor(input_tensor, {1, 3, in_h, in_w});    ultraface_interpreter->resizeSession(ultraface_session);    std::shared_ptr<MNN::CV::ImageProcess> pretreat(     MNN::CV::ImageProcess::create(MNN::CV::BGR, MNN::CV::RGB, mean_vals, 3,norm_vals, 3));    pretreat->convert(/*image.data*/source, in_w, in_h, /*image.step[0]*/960, input_tensor);   // 这个960 是 因为图片 resize 320 * 3    auto start = chrono::steady_clock::now();    // run network    ultraface_interpreter->runSession(ultraface_session);    // get output data    string scores = "scores";    string boxes = "boxes";    MNN::Tensor *tensor_scores = ultraface_interpreter->getSessionOutput(ultraface_session, scores.c_str());    MNN::Tensor *tensor_boxes = ultraface_interpreter->getSessionOutput(ultraface_session, boxes.c_str());    MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());    tensor_scores->copyToHostTensor(&tensor_scores_host);    MNN::Tensor tensor_boxes_host(tensor_boxes, tensor_boxes->getDimensionType());    tensor_boxes->copyToHostTensor(&tensor_boxes_host);    std::vector<FaceInfo> bbox_collection;    auto end = chrono::steady_clock::now();    chrono::duration<double> elapsed = end - start;    cout << "inference time:" << elapsed.count() << " s" << endl;    generateBBox(bbox_collection, tensor_scores, tensor_boxes);    nms(bbox_collection, face_list);    return 0;}

UltraFace.hpp 修改如下:

//#include int detect(/*cv::Mat &img*/ uint8_t* source, std::vector<FaceInfo> &face_list);

编译测试

使用命令编译:arm-linux-gnueabihf-g++ -o test main.cpp UltraFace.cpp -L…/mnn/lib/ -I…/mnn/include -lMNN

将 test、libMNN.so 拷贝到Allwinner V3s 测试运行。
注:opencv 的作用就是图片的rgb数据。

Allwinner V3s 使用V4l2 获取数据

参考代码:

#include "v4l2_device.h"  typedef struct {void *start;int length;} BUFTYPE;BUFTYPE *usr_buf;static unsigned int n_buffer = 0;static int tmp = 0;/*set video capture ways(mmap)*/int init_mmap(int fd) {/*to request frame cache, contain requested counts*/struct v4l2_requestbuffers reqbufs;memset(&reqbufs, 0, sizeof(reqbufs));reqbufs.count = 4; /*the number of buffer*/reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;reqbufs.memory = V4L2_MEMORY_MMAP;if (-1 == ioctl(fd, VIDIOC_REQBUFS, &reqbufs)) {perror("Fail to ioctl 'VIDIOC_REQBUFS'");system("sync");system("reboot");exit (EXIT_FAILURE);}n_buffer = reqbufs.count;printf("n_buffer = %d\n", n_buffer);usr_buf = (BUFTYPE *) calloc(reqbufs.count, sizeof(BUFTYPE));if (usr_buf == NULL) {printf("Out of memory\n");system("sync");system("reboot");exit(-1);}/*map kernel cache to user process*/for (n_buffer = 0; n_buffer < reqbufs.count; ++n_buffer) {//stand for a framestruct v4l2_buffer buf;memset(&buf, 0, sizeof(buf));buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;buf.memory = V4L2_MEMORY_MMAP;buf.index = n_buffer;/*check the information of the kernel cache requested*/if (-1 == ioctl(fd, VIDIOC_QUERYBUF, &buf)) {perror("Fail to ioctl : VIDIOC_QUERYBUF");system("sync");system("reboot");exit (EXIT_FAILURE);}usr_buf[n_buffer].length = buf.length;usr_buf[n_buffer].start = (char *) mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset);if (MAP_FAILED == usr_buf[n_buffer].start) {perror("Fail to mmap");system("sync");system("reboot");exit (EXIT_FAILURE);}}return 0;}int open_camera(void) {int fd;struct v4l2_input inp;fd = open(FILE_VIDEO, O_RDWR | O_NONBLOCK, 0);if (fd < 0) {fprintf(stderr, "%s open err \n", FILE_VIDEO);exit (EXIT_FAILURE);};inp.index = 0;if (-1 == ioctl(fd, VIDIOC_S_INPUT, &inp)) {system("sync");system("reboot");fprintf(stderr, "VIDIOC_S_INPUT \n");}return fd;}int init_camera(int fd, int width, int height) {struct v4l2_capability cap; /* decive fuction, such as video input */struct v4l2_format tv_fmt; /* frame format */struct v4l2_fmtdesc fmtdesc; /* detail control value *///struct v4l2_control     ctrl;int ret;/*show all the support format*/memset(&fmtdesc, 0, sizeof(fmtdesc));fmtdesc.index = 0; /* the number to check */fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;/* check video decive driver capability */ret = ioctl(fd, VIDIOC_QUERYCAP, &cap);if (ret < 0) {fprintf(stderr, "fail to ioctl VIDEO_QUERYCAP \n");exit (EXIT_FAILURE);}/*judge wherher or not to be a video-get device*/if (!(cap.capabilities & V4L2_BUF_TYPE_VIDEO_CAPTURE)) {fprintf(stderr, "The Current device is not a video capture device \n");exit (EXIT_FAILURE);}/*judge whether or not to supply the form of video stream*/if (!(cap.capabilities & V4L2_CAP_STREAMING)) {printf("The Current device does not support streaming i/o\n");exit (EXIT_FAILURE);}printf("\ncamera driver name is : %s\n", cap.driver);printf("camera device name is : %s\n", cap.card);printf("camera bus information: %s\n", cap.bus_info);/*display the format device support*/printf("\n");while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc) != -1) {printf("support device %d.%s\n", fmtdesc.index + 1, fmtdesc.description);fmtdesc.index++;}printf("\n");/*set the form of camera capture data*/tv_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /*v4l2_buf_typea,camera must use V4L2_BUF_TYPE_VIDEO_CAPTURE*/tv_fmt.fmt.pix.width = width;tv_fmt.fmt.pix.height = height;//tv_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;   /*V4L2_PIX_FMT_YYUV*/tv_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YVU420; /*V4L2_PIX_FMT_YYUV*/tv_fmt.fmt.pix.field = V4L2_FIELD_ANY; /*V4L2_FIELD_NONE*/if (ioctl(fd, VIDIOC_S_FMT, &tv_fmt) < 0) {fprintf(stderr, "VIDIOC_S_FMT set err\n");exit(-1);close(fd);}init_mmap(fd);return 0;}int start_capture(int fd) {unsigned int i;enum v4l2_buf_type type;/*place the kernel cache to a queue*/for (i = 0; i < n_buffer; i++) {struct v4l2_buffer buf;memset(&buf, 0, sizeof(buf));buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;buf.memory = V4L2_MEMORY_MMAP;buf.index = i;if (-1 == ioctl(fd, VIDIOC_QBUF, &buf)) {perror("Fail to ioctl 'VIDIOC_QBUF'");exit (EXIT_FAILURE);}}type = V4L2_BUF_TYPE_VIDEO_CAPTURE;if (-1 == ioctl(fd, VIDIOC_STREAMON, &type)) {printf("i=%d.\n", i);perror("VIDIOC_STREAMON");system("sync");system("reboot");close(fd);exit (EXIT_FAILURE);}return 0;}int read_frame(int fd, unsigned char *outbuf, int *len) {struct v4l2_buffer buf;//unsigned int i;fd_set fds;struct timeval tv;int r;FD_ZERO(&fds);FD_SET(fd, &fds);/*Timeout*/tv.tv_sec = 2;tv.tv_usec = 0;r = select(fd + 1, &fds, NULL, NULL, &tv);if (-1 == r) {if (EINTR == errno) {printf("select received SIGINT \n");return 0;//perror("Fail to select");//exit(EXIT_FAILURE);}}if (0 == r) {fprintf(stderr, "select Timeout\n");exit(-1);}memset(&buf, 0, sizeof(buf));buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;buf.memory = V4L2_MEMORY_MMAP;if (-1 == ioctl(fd, VIDIOC_DQBUF, &buf)) {perror("Fail to ioctl 'VIDIOC_DQBUF'");exit (EXIT_FAILURE);}assert(buf.index < n_buffer);//unsigned char outbuf[1024*1024];  //process_image(usr_buf[buf.index].start, usr_buf[buf.index].length); memcpy(outbuf, usr_buf[buf.index].start, usr_buf[buf.index].length);*len = usr_buf[buf.index].length;//memmove(outbuf, usr_buf[buf.index].start, usr_buf[buf.index].length);tmp++;printf("index = %d \n", tmp);if (tmp == 30) {tmp = 0;FILE* fp = fopen("test.yuyv","w");fwrite(usr_buf[buf.index].start, 1, usr_buf[buf.index].length, fp);fclose(fp);}if (-1 == ioctl(fd, VIDIOC_QBUF, &buf)) {perror("Fail to ioctl 'VIDIOC_QBUF'");exit (EXIT_FAILURE);}return 1;}void stop_capture(int fd) {enum v4l2_buf_type type;type = V4L2_BUF_TYPE_VIDEO_CAPTURE;if (-1 == ioctl(fd, VIDIOC_STREAMOFF, &type)) {perror("Fail to ioctl 'VIDIOC_STREAMOFF'");exit (EXIT_FAILURE);}}void close_camera_device(int fd) {unsigned int i;for (i = 0; i < n_buffer; i++) {if (-1 == munmap(usr_buf[i].start, usr_buf[i].length)) {exit(-1);}}free(usr_buf);if (-1 == close(fd)) {perror("Fail to close fd");exit (EXIT_FAILURE);}}

main.cpp

int main() {pthread_t t1,t2;states = SysStatesRead();if(states == 0) {system("aplay idla.wav");}int err = pthread_create(&t1, NULL, GpioProcess, NULL);if (err != 0) {printf("GpioProcess thread_create Failed :%s\n", strerror(err));}TP recTime_s = getTime();pModel = GtiCreateModel(recog_modelFile);TP recTime_e = getTime();printf("create model time diff %lld\n", getTimeDiff(recTime_s, recTime_e));err = pthread_create(&t2, NULL, VideoPorcess, NULL);if (err != 0) {printf("VideoPorcess thread_create Failed :%s\n", strerror(err));}int height = 600;int width = 800;int fd;int len = 0;unsigned char *cam_buf;int index = 0;int fps = 30;unsigned int tick_gap = 1000 / fps;uint32_t now = 0;uint32_t last_update = 0;cam_buf = (unsigned char*) malloc(1024 * 1024 * 3);memset(cam_buf, 0, 1024 * 1024 * 3);if (signal(SIGINT, sig_user) == SIG_ERR) {perror("catch SIGINT err");}fd = open_camera();if (fd > 0) {printf("Open Camera succ\n");}if (0 == init_camera(fd, width, height))printf("Init camera succ\n");usleep(100);start_capture(fd);printf("inited \n");runflag = 1;while (runflag) {last_update = GetTime();//printf("------------%ld \n", last_update);read_frame(fd, cam_buf, &len);now = GetTime();printf("++++++++++++%ld \n", now - last_update);index ++;if (states == 1) {memcpy(Cam_buf, cam_buf, len);continue;}if (index == 14) {index = 0;//printf(">>>> d\n");memcpy(Cam_buf, cam_buf, len);//printf(">>>> sd\n");}}free(cam_buf);stop_capture(fd);close_camera_device(fd);GtiDestroyModel(pModel);return 0;}

就集成实现了在荔枝派上面实现人脸检测功能。