使用ESP32-CAM和OpenCV实现图片获取
我觉得这是一种廉价并且较为可靠的图像获取方案,目前无法输出视频流,因为我还不知道怎么提升传输速度…
进入正题-- ESP32-CAM模组在某宝上面差不多25块一个(不是M5STACK)我的图像传输方案是先在esp32上面获取图像的16进制字符串,再publish到MQTT服务器上面,接着电脑的客户端把这个hexchar下载下来以后转成2进制,用opencv decode成图像再打开。
ESP32用Arduino IDE编程,电脑上面是用python写的一个小程序来读图像的。
python需要安装opencv。安装方法是在电脑的命令行,或者bash,或者ps里面输入pip install opencv-contrib-python 来安装,推荐把python的pip源换成清华的国内源,这样下载速度会比较快。
#include <Arduino.h>
#include <esp_camera.h>
#include <WiFi.h>
#include <WiFiClient.h>
#include "PubSubClient.h"
#include "CamConfig.h"
WiFiClient Wclient;
PubSubClient client(Wclient);
const char *hostA = "esp32";
const char *ssid = "***********";
const char *password = "***********";
const char *mqtt_server = "************";
boolean shotflag = false;
boolean WiFiDisconnect = false;
String msg;
int timeCount = 0;
void callback(char *topic, byte *message, unsigned int length);
void getimg();
void reconnect();
void setupCamera();
void WiFiEvent(WiFiEvent_t event);
void setup()
{
Serial.begin(115200);
WiFi.begin(ssid, password);
while (WiFi.status() != WL_CONNECTED)
{
}
Serial.println("WiFi connected");
Serial.println(WiFi.localIP());
WiFi.onEvent(WiFiEvent);
client.setServer(mqtt_server, 1883);
client.setCallback(callback);
setupCamera();
Serial.println("Ready");
}
void loop()
{
reconnect();
client.loop();
if (shotflag == true)
{
getimg();
shotflag = false;
}
}
void getimg()
{
camera_fb_t *fb = esp_camera_fb_get();
// int tranCount = (fb->len * 2 + (1000 - 1)) / 1000;
if (fb)
{
Serial.printf("width: %d, height: %d, buf: 0x%x, len: %d\n", fb->width, fb->height, fb->buf, fb->len);
char data[4104];
for (int i = 0; i < fb->len; i++)
{
sprintf(data, "%02X", *((fb->buf + i)));
msg += data;
if (msg.length() == 4096)
{
timeCount += 1;
client.beginPublish("img", msg.length(), 0);
client.print(msg);
client.endPublish();
msg = "";
// Serial.println(timeCount);
}
}
if (msg.length() > 0)
{
client.beginPublish("img", msg.length(), 0);
client.print(msg);
client.endPublish();
msg = "";
}
client.publish("img", "1");
timeCount = 0;
esp_camera_fb_return(fb);
}
}
void callback(char *topic, byte *payload, unsigned int length)
{
// Serial.print("Message arrived [");
// Serial.print(topic); // 打印主题信息
// Serial.print("] ");
// for (int i = 0; i < length; i++)
// {
// Serial.print((char)payload[i]); // 打印主题内容
// }
// Serial.println();
if ((char)payload[0] == '1')
{
shotflag = true;
}
if ((char)payload[0] == '0')
{
shotflag = false;
}
}
void reconnect()
{
if (WiFiDisconnect)
{
WiFi.reconnect();
}
while (!client.connected())
{
client.connect("EspClient");
client.subscribe("CAMcontrol");
}
}
void setupCamera()
{
const camera_config_t config = {
.pin_pwdn = PWDN_GPIO_NUM,
.pin_reset = RESET_GPIO_NUM,
.pin_xclk = XCLK_GPIO_NUM,
.pin_sscb_sda = SIOD_GPIO_NUM,
.pin_sscb_scl = SIOC_GPIO_NUM,
.pin_d7 = Y9_GPIO_NUM,
.pin_d6 = Y8_GPIO_NUM,
.pin_d5 = Y7_GPIO_NUM,
.pin_d4 = Y6_GPIO_NUM,
.pin_d3 = Y5_GPIO_NUM,
.pin_d2 = Y4_GPIO_NUM,
.pin_d1 = Y3_GPIO_NUM,
.pin_d0 = Y2_GPIO_NUM,
.pin_vsync = VSYNC_GPIO_NUM,
.pin_href = HREF_GPIO_NUM,
.pin_pclk = PCLK_GPIO_NUM,
.xclk_freq_hz = 20000000,
.ledc_timer = LEDC_TIMER_0,
.ledc_channel = LEDC_CHANNEL_0,
.pixel_format = PIXFORMAT_JPEG,
.frame_size = FRAMESIZE_SVGA,
.jpeg_quality = 10,
.fb_count = 2,
};
esp_err_t err = esp_camera_init(&config);
Serial.printf("esp_camera_init: 0x%x\n", err);
sensor_t *s = esp_camera_sensor_get();
s->set_framesize(s, FRAMESIZE_XGA);
}
void WiFiEvent(WiFiEvent_t event)
{
switch (event)
{
case SYSTEM_EVENT_STA_DISCONNECTED:
WiFiDisconnect = true;
case SYSTEM_EVENT_STA_CONNECTED:
WiFiDisconnect = false;
}
}
这是ESP32部分的程序,我把相机设定的那一部分放到独立的头文件那里了,都是引脚的宏定义,如果你也用的是ESP32-CAM的话可以在Arduino IDE的示例代码里面找到,在这之前只需要导入一下ESP32的开发板包,因为Arduino IDE本身是不带ESP32这个开发板型号的。
void getimg()
{
camera_fb_t *fb = esp_camera_fb_get();
// int tranCount = (fb->len * 2 + (1000 - 1)) / 1000;
if (fb)
{
Serial.printf("width: %d, height: %d, buf: 0x%x, len: %d\n", fb->width, fb->height, fb->buf, fb->len);
char data[4104];
for (int i = 0; i < fb->len; i++)
{
sprintf(data, "%02X", *((fb->buf + i)));
msg += data;
if (msg.length() == 4096)
{
timeCount += 1;
client.beginPublish("img", msg.length(), 0);
client.print(msg);
client.endPublish();
msg = "";
// Serial.println(timeCount);
}
}
if (msg.length() > 0)
{
client.beginPublish("img", msg.length(), 0);
client.print(msg);
client.endPublish();
msg = "";
}
client.publish("img", "1");
timeCount = 0;
esp_camera_fb_return(fb);
}
}
getimg子程序是发送图片信息的部分,这个图片必须分段发送,要不然32会崩溃,同时这个 data[4104];
数组已经差不多是esp32内存的极限了,再大就有可能爆stack。这个分段发送我想了好久,也查了不少资料,所以能发送成功我也是挺开心的。
下面就是python的接收部分。要成功接收到消息,首先我们需要建立一个MQTT服务器,我这里用的是mosquitto,因为这个比较方便,不同的服务端程序是对客户端没有影响的。
import paho.mqtt.client as mqtt
import threading
import time
import cv2.cv2 as cv2
import numpy as np
import binascii
mqttclient = mqtt.Client()
HOST = "*******"
PORT = 1883
class GlobalValue:
data = [""]
def on_client_connect(client, userdata, flags, rc):
if rc == 0:
print('connected!!!')
elif rc == 3:
print("Server can't be used")
else:
print('Other errors')
def connect():
mqttclient.connect(HOST, PORT, 60)
def publish(topic, payload, qos):
mqttclient.publish(topic, payload, qos)
def on_rec(client, userdata, message):
if message.topic == "img":
# print(str(message.payload.decode()))
GlobalValue.data.append(str(message.payload.decode()))
def subscribe(topic, qos):
mqttclient.subscribe(topic, qos)
def main():
mqttclient.on_message = on_rec
mqttclient.on_connect = on_client_connect
connect()
subscribe('img', 0)
subscribe('control', 0)
publish('control', "1", 0)
imgcount = 0
# data2 = ""
while True:
mqttclient.loop_start()
if (GlobalValue.data[len(GlobalValue.data) - 1]) == "1":
# print("Reached EOF")
data1 = "".join(GlobalValue.data[0:(len(GlobalValue.data) - 1)])
GlobalValue.data = [""]
data1 = binascii.a2b_hex(data1)
# label = "img/image" + str(imgcount) + ".jpg"
# with open(label, "wb") as image_file:
# image_file.write(data1)
data1 = np.frombuffer(data1, dtype=np.uint8)
img = cv2.imdecode(data1, 1)
cv2.imshow("Door", img)
cv2.waitKey(1)
# if imgcount >= 40:
# exit(0)
# else:
imgcount += 1
publish('CAMcontrol', "1", 0)
if __name__ == '__main__':
main()
这里比较有趣的是几个回调函数,和一个全局的数组。Python写起程序来是挺舒服的,至少对于我这个初学者而言,但是在我看来也有两个比较难以处理的问题–第一个是全局变量的问题,这个作用域有时候让人摸不清楚头脑,对于定义全局变量,我的解决方式是在开头定义一个class,把全局变量写到class里面,个人觉得是方便多了,而且比较好管理。
class GlobalValue:
data = [""]
#比如说这样
第二个问题是python的解释方式,他是从上到下解释的,就让我一开始非常难写回调函数,特别容易出现unresolved referance 这个问题只能自己注意,知错就改了。
开头的这个import cv2.cv2 as cv2
写成这样是因为普通的import方法pylint没办法显示代码提示,好像是因为opencv的包结构的问题,多了一个cv2文件夹,导致pylint找不到。
就先写到这里吧,之后的任务还有物体识别和追踪,deadline是下周一。
上一篇: ESP32-cam SD卡读写图片
下一篇: Hibernate4之主键映射机制
推荐阅读
-
Data URI scheme详解和使用实例及图片base64编码实现方法
-
使用gd库实现php服务端图片裁剪和生成缩略图功能分享
-
在Asp.Net Core中配置使用MarkDown富文本编辑器实现图片上传和截图上传(开源代码.net core3.0)
-
Python基于opencv调用摄像头获取个人图片的实现方法
-
在iOS开发的Quartz2D使用中实现图片剪切和截屏功能
-
iOS开发中使用UIScrollView实现图片轮播和点击加载
-
使用Python-OpenCV向图片添加噪声的实现(高斯噪声、椒盐噪声)
-
使用python获取CPU和内存信息的思路与实现(linux系统)
-
Asp.Net Core中配置使用Kindeditor富文本编辑器实现图片上传和截图上传及文件管理和上传(开源代码.net core3.0)
-
使用ESP32-CAM和OpenCV实现图片获取