发帖
3 0 0

【安信可小安派BW21-CBV-Kit】久坐检测器

1055173307
论坛元老

14

主题

612

回帖

4209

积分

论坛元老

积分
4209
小安派·BW21-CBV-KIt 59 3 3 天前

零、性能测评

之前测试了BW21的人脸检测例程,感觉对人脸的识别还是十分的准确的。这次在例程里找到了另一个人脸检测的例程进行测试,相比于之前的例程多实现了不同人脸的识别功能,并且可以标记存储人脸记录。

实际运行效果:(真实把自己命名成USER1)

image.png

image.png

人脸识别功能整体使用体验还是很棒的,识别的帧率按串口输出看大概是十帧左右,但RTSP的视频流看起来挺流畅的,没有卡顿的感觉,只是稍微有点小延迟,但是并不影响实际的使用。

而且识别结果准确率和容错都很棒啊,我是在正面录得人脸,实际测试摄像头倾斜到三四十度斜视的时候还能准确识别。

一、功能需求

因为之前也用过安信可的雷达产品,寻思可以试着做个久坐检测的功能(打工牛马持续久坐对下肢的影响还是挺大的,建议大伙坐半小时到一小时尽量起来活动几分钟),但是使用雷达的话有可能对附近的环境会误识别,所以考虑可以用BW21人脸识别实现精准的人脸识别久坐检测并提醒。

计划实现功能:

  • 标记存储要检测的人脸
  • 实现对检测人脸的久坐检测
  • 避免因为转头/短暂站立(丢失检测人脸)导致的久坐时长误识别
  • 久坐后提醒(LED)
  • 久坐信息上传实现设备联动(还没实现)

二、功能实现

以下就是本次小diy的源码。

  • 通过 NTPClient获取实时的时间计算久坐时长

    • NTPClient timeClient(ntpUDP, "europe.pool.ntp.org", 28800, 60000);
  • 使用LED提醒久坐

  • 通过几个全局变量记录久坐或检测丢失的开始时间

  • 宏定义配置久坐时间和活动时间(用于刷新久坐开始时间)

    当前实现的功能就是当被检测到持续半小时时就会亮起蓝色LED提醒站立活动一会,活动时间超过一分钟后就会刷新久坐检测开始时间,当人脸重新被检测到时开始新一轮的久坐检测

/*

 Example guide:
 https://www.amebaiot.com/en/amebapro2-arduino-neuralnework-face-recognition/

 Face registration commands
 --------------------------
 Point the camera at a target face and enter the following commands into the serial monitor,
 Register face:                       "REG={Name}"  Ensure that there is only one face detected in frame
 Remove face:                         "DEL={Name}"  Remove a registered face
 Reset registered faces:              "RESET"       Forget all previously registered faces
 Backup registered faces to flash:    "BACKUP"      Save registered faces to flash
 Restore registered faces from flash: "RESTORE"     Load registered faces from flash

 NN Model Selection
 -------------------
 Select Neural Network(NN) task and models using modelSelect(nntask, objdetmodel, facedetmodel, facerecogmodel).
 Replace with NA_MODEL if they are not necessary for your selected NN Task.

 NN task
 =======
 OBJECT_DETECTION/ FACE_DETECTION/ FACE_RECOGNITION

 Models
 =======
 YOLOv3 model         DEFAULT_YOLOV3TINY   / CUSTOMIZED_YOLOV3TINY
 YOLOv4 model         DEFAULT_YOLOV4TINY   / CUSTOMIZED_YOLOV4TINY
 YOLOv7 model         DEFAULT_YOLOV7TINY   / CUSTOMIZED_YOLOV7TINY
 SCRFD model          DEFAULT_SCRFD        / CUSTOMIZED_SCRFD
 MobileFaceNet model  DEFAULT_MOBILEFACENET/ CUSTOMIZED_MOBILEFACENET
 No model             NA_MODEL

*/

#include <NTPClient.h>
#include "WiFi.h"
#include <WiFiUdp.h>
#include "StreamIO.h"
#include "VideoStream.h"
#include "RTSP.h"
#include "NNFaceDetectionRecognition.h"
#include "VideoStreamOverlay.h"

#define CHANNEL   0
#define CHANNELNN 3

// Customised resolution for NN
#define NNWIDTH  576
#define NNHEIGHT 320

#define RECOGNITION_MAX 1800
#define LOSS_MAX 60

VideoSetting config(VIDEO_FHD, 30, VIDEO_H264, 0);
VideoSetting configNN(NNWIDTH, NNHEIGHT, 10, VIDEO_RGB, 0);
NNFaceDetectionRecognition facerecog;
RTSP rtsp;
StreamIO videoStreamer(1, 1);
StreamIO videoStreamerFDFR(1, 1);
StreamIO videoStreamerRGBFD(1, 1);

char ssid[] = "XM-15959385520";    // your network SSID (name)
char pass[] = "15959385520a";        // your network password
int status = WL_IDLE_STATUS;

char* lastRecoName[100] = {0};
unsigned long recognitionTime = 0;
unsigned long lossTime = 0;
bool isSedentary = false;

IPAddress ip;
int rtsp_portnum;

WiFiUDP ntpUDP;
NTPClient timeClient(ntpUDP, "europe.pool.ntp.org", 28800, 60000);

void setup()
{
    Serial.begin(115200);
    pinMode(LED_B, OUTPUT);
    pinMode(LED_G, OUTPUT);

    digitalWrite(LED_B, LOW);
    digitalWrite(LED_G, LOW);

    // Attempt to connect to Wifi network:
    while (status != WL_CONNECTED) {
        Serial.print("Attempting to connect to WPA SSID: ");
        Serial.println(ssid);
        status = WiFi.begin(ssid, pass);

        // wait 2 seconds for connection:
        delay(2000);
    }

    ip = WiFi.localIP();

    timeClient.begin();

    // Configure camera video channels with video format information
    // Adjust the bitrate based on your WiFi network quality
    config.setBitrate(2 * 1024 * 1024);    // Recommend to use 2Mbps for RTSP streaming to prevent network congestion
    Camera.configVideoChannel(CHANNEL, config);
    Camera.configVideoChannel(CHANNELNN, configNN);
    Camera.videoInit();

    // Configure RTSP with corresponding video format information
    rtsp.configVideo(config);
    rtsp.begin();
    rtsp_portnum = rtsp.getPort();

    // Configure Face Recognition model
    // Select Neural Network(NN) task and models
    facerecog.configVideo(configNN);
    facerecog.modelSelect(FACE_RECOGNITION, NA_MODEL, DEFAULT_SCRFD, DEFAULT_MOBILEFACENET);
    facerecog.begin();
    facerecog.setResultCallback(FRPostProcess);

    // Configure StreamIO object to stream data from video channel to RTSP
    videoStreamer.registerInput(Camera.getStream(CHANNEL));
    videoStreamer.registerOutput(rtsp);
    if (videoStreamer.begin() != 0) {
        Serial.println("StreamIO link start failed");
    }
    // Start data stream from video channel
    Camera.channelBegin(CHANNEL);

    // Configure StreamIO object to stream data from RGB video channel to face detection
    videoStreamerRGBFD.registerInput(Camera.getStream(CHANNELNN));
    videoStreamerRGBFD.setStackSize();
    videoStreamerRGBFD.setTaskPriority();
    videoStreamerRGBFD.registerOutput(facerecog);
    if (videoStreamerRGBFD.begin() != 0) {
        Serial.println("StreamIO link start failed");
    }

    // Start video channel for NN
    Camera.channelBegin(CHANNELNN);

    // Start OSD drawing on RTSP video channel
    OSD.configVideo(CHANNEL, config);
    OSD.begin();

    facerecog.restoreRegisteredFace();
}

void loop()
{
    if (Serial.available() > 0) {
        String input = Serial.readString();
        input.trim();

        if (input.startsWith(String("REG="))) {
            String name = input.substring(4);
            facerecog.registerFace(name);
        } else if (input.startsWith(String("DEL="))) {
            String name = input.substring(4);
            facerecog.removeFace(name);
        } else if (input.startsWith(String("RESET"))) {
            facerecog.resetRegisteredFace();
        } else if (input.startsWith(String("BACKUP"))) {
            facerecog.backupRegisteredFace();
        } else if (input.startsWith(String("RESTORE"))) {
            facerecog.restoreRegisteredFace();
        }
    }

    delay(2000);
    OSD.createBitmap(CHANNEL);
    OSD.update(CHANNEL);
}

// User callback function for post processing of face recognition results
void FRPostProcess(std::vector<FaceRecognitionResult> results)
{
    uint16_t im_h = config.height();
    uint16_t im_w = config.width();

    // Serial.print("Network URL for RTSP Streaming: ");
    // Serial.print("rtsp://");
    // Serial.print(ip);
    // Serial.print(":");
    // Serial.println(rtsp_portnum);
    // Serial.println(" ");

    // printf("Total number of faces detected = %d\r\n", facerecog.getResultCount());
    OSD.createBitmap(CHANNEL);

    if (facerecog.getResultCount() > 0) {
        for (int i = 0; i < facerecog.getResultCount(); i++) {
            FaceRecognitionResult item = results[i];
            // Result coordinates are floats ranging from 0.00 to 1.00
            // Multiply with RTSP resolution to get coordinates in pixels
            int xmin = (int)(item.xMin() * im_w);
            int xmax = (int)(item.xMax() * im_w);
            int ymin = (int)(item.yMin() * im_h);
            int ymax = (int)(item.yMax() * im_h);

            uint32_t osd_color;
            if (String(item.name()) == String("unknown")) {
                osd_color = OSD_COLOR_RED;
                // 丢失检测
                timeClient.update();
                if(lossTime == 0){
                  lossTime = timeClient.getEpochTime();
                }else if((timeClient.getEpochTime() - lossTime) > LOSS_MAX){
                  isSedentary = false;
                  digitalWrite(LED_B, LOW);
                  recognitionTime = 0;
                }
            } else {
                osd_color = OSD_COLOR_GREEN;
                // 对比名称
                if(strcmp(item.name(),lastRecoName) == 0){
                  // 持续存在
                  timeClient.update();
                  if(recognitionTime == 0){
                    recognitionTime = timeClient.getEpochTime();
                  }else if((timeClient.getEpochTime() - recognitionTime) > RECOGNITION_MAX){
                    isSedentary = true;
                    digitalWrite(LED_B, HIGH);
                  }
                }else{
                  // 目标丢失
                  strcpy(lastRecoName,item.name());
                  timeClient.update();
                  if(lossTime == 0){
                    lossTime = timeClient.getEpochTime();
                  }else if((timeClient.getEpochTime() - lossTime) > LOSS_MAX){
                    isSedentary = false;
                    digitalWrite(LED_B, LOW);
                    recognitionTime = 0;
                  }
                }
            }

            // Draw boundary box
            // printf("Face %d name %s:\t%d %d %d %d\n\r", i, item.name(), xmin, xmax, ymin, ymax);
            OSD.drawRect(CHANNEL, xmin, ymin, xmax, ymax, 3, osd_color);

            // Print identification text above boundary box
            char text_str[40];
            snprintf(text_str, sizeof(text_str), "Face:%s", item.name());
            OSD.drawText(CHANNEL, xmin, ymin - OSD.getTextHeight(CHANNEL), text_str, osd_color);
        }
    }
    else{ // 没有检测到人脸 = 丢失
      lastRecoName = "";
      timeClient.update();
      if(lossTime == 0){
        lossTime = timeClient.getEpochTime();
      }else if((timeClient.getEpochTime() - lossTime) > LOSS_MAX){
        isSedentary = false;
        digitalWrite(LED_B, LOW);
        recognitionTime = 0;
      }
    }
    OSD.update(CHANNEL);
}

提醒亮起时效果:

image.png

image.png

──── 0人觉得很赞 ────

使用道具 举报

建议附上效果视频~

是不是准备安装到马桶旁边titter

昨天 09:05
HaydenHu 发表于 2025-3-24 16:28
是不是准备安装到马桶旁边

也是个很棒的思路了
您需要登录后才可以回帖 立即登录
高级模式
返回
统计信息
  • 会员数: 28167 个
  • 话题数: 39946 篇