需求:需要将rtsp视频流放到openharmony界面显示
方案一:使用openharmonyAPP中集成ffmpeg(后续更新)
方案二:使用openharmonyAPP中集成opencv(实际原理和方案一一致,因为opencv中集成了ffmpeg,后续更新)
方案三:将视频在服务端拉取,转base64之后使用websocket发送到前端,在openharmonyAPP中使用一个嵌套的WEB显示html
crow的环境搭建参考Crow 一个c++的后端开发库,类似spring boot、flask等
网上没有找到能白嫖的代码,所以自己写了一个分享出来
//frame_generator.h
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <thread.h>
// 用于生成视频帧的生成器
class FrameGenerator {
public:
bool isOpen = false;
std::string _url;
FrameGenerator(const std::string& rtsp_url) : cap(rtsp_url), _url(rtsp_url) {
if (!cap.isOpened()) {
reconnect();
}
isOpen = true;
}
void reconnect(){
while (!cap.isOpened())
{
std::cout << "reconnect rtsp ." << std::endl;
isOpen = false;
cap.open(_url);
std::this_thread::sleep_for(std::chrono::seconds(10));
}
}
const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
std::string base64_encode(const std::string &input) {
std::string encoded;
size_t i = 0, j = 0;
uint8_t byte3[3] = {0};
uint8_t byte4[4] = {0};
// 遍历输入字符串中的每个字符
for (char byte : input) {
byte3[i++] = static_cast<uint8_t>(byte); // 假设输入是ASCII
if (i == 3) {
byte4[0] = (byte3[0] & 0xfc) >> 2;
byte4[1] = ((byte3[0] & 0x03) << 4) | ((byte3[1] & 0xf0) >> 4);
byte4[2] = ((byte3[1] & 0x0f) << 2) | ((byte3[2] & 0xc0) >> 6);
byte4[3] = byte3[2] & 0x3f;
// 添加编码后的字符到结果字符串
for (int k = 0; k < 4; k++) {
encoded += base64_chars[byte4[k]];
}
i = 0;
}
}
// 处理剩余字符(如果有)
if (i != 0) {
for (size_t k = i; k < 3; k++) {
byte3[k] = 0; // 填充剩余字节为0
}
// 执行编码,类似于前面的处理
byte4[0] = (byte3[0] & 0xfc) >> 2;
byte4[1] = ((byte3[0] & 0x03) << 4) | ((byte3[1] & 0xf0) >> 4);
byte4[2] = ((byte3[1] & 0x0f) << 2) | ((byte3[2] & 0xc0) >> 6);
// 添加编码后的字符到结果字符串
for (size_t k = 0; k < i + 1; k++) {
encoded += base64_chars[byte4[k]];
}
// 添加'='以填充到4的倍数
while (i++ < 3) {
encoded += '=';
}
}
std::cout << "base64 size:" << encoded.size() << std::endl;
return encoded;
}
std::string getFrame() {
cv::Mat frame;
if (!cap.isOpened()) {
cap.open(_url);
std::this_thread::sleep_for(std::chrono::seconds(10));
// throw std::runtime_error("Error opening video stream or file");
}
cap >> frame;
if (frame.empty()) {
std::cerr << "Error capturing frame" << std::endl;
//throw std::runtime_error("Error capturing frame");
}
std::vector<uchar> buffer;
cv::imencode(".jpg", frame, buffer);
std::string _f = std::string(buffer.begin(), buffer.end());
return base64_encode(_f);
}
private:
cv::VideoCapture cap;
};
//crow websocket
std::mutex mtx2;
std::unordered_set<crow::websocket::connection *> users2;
// opecv recv rtsp to ws
CROW_WEBSOCKET_ROUTE(app, "/video")
.onopen([&](crow::websocket::connection &conn)
{
std::cout << "New websocket connection from " << conn.get_remote_ip() << std::endl;
std::lock_guard<std::mutex> lock(mtx2);
users2.insert(&conn); // 添加新用户到集合中
})
.onclose([&](crow::websocket::connection &conn, const std::string &reason)
{
std::cout << "Websocket connection closed: " << reason << std::endl;
std::lock_guard<std::mutex> lock(mtx2);
users2.erase(&conn); // 从集合中移除用户
})
.onmessage([&](crow::websocket::connection &conn, const std::string &data, bool is_binary)
{
});
std::thread videoMessageThread([&]()
{
FrameGenerator generator("rtsp://XXX:8554/main.264");
while (true)
{
if (!generator.isOpen)
{
generator.reconnect();
continue;
}
std::lock_guard<std::mutex> lock(mtx2);
for (auto user : users2)
{
std::string frame = generator.getFrame();
user->send_text(frame);
}
} });
videoMessageThread.detach();
//前端接收
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>WebSocket Video Stream</title>
<style>
html, body {
margin: 0;
padding: 0;
height: 100%;
overflow: hidden; /* 隐藏超出视口的内容 */
}
canvas {
display: block;
background-color: #f0f0f0; /* 可选:添加一个背景色以便在没有内容时看到canvas */
width: 100vw; /* 使用视口宽度 */
height: 100vh; /* 使用视口高度 */
object-fit: cover; /* 如果需要的话,可以保持图像的宽高比 */
}
</style>
</head>
<body>
<canvas id="videoCanvas" width="1280" height="720"></canvas>
<script>
// 获取canvas元素和它的2D渲染上下文
let canvas = document.getElementById('videoCanvas');
let ctx = canvas.getContext('2d');
// 创建WebSocket连接
let ws = new WebSocket('ws://XXX:8080/video');
// WebSocket连接打开时的处理
ws.onopen = function (event) {
console.log('WebSocket is open now.');
// 如果需要,向服务器发送开始传输的消息
ws.send('START_STREAMING');
};
// 接收服务器发送的消息
ws.onmessage = function (event) {
console.log('imageData.', event.data);
// 假设服务器发送的是base64编码的JPEG图片
let imageData = 'data:image/jpeg;base64,' + event.data;
// 创建一个新的Image对象来加载图片
let img = new Image();
// 图片加载完成后绘制到canvas上
img.onload = function () {
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.drawImage(img, 0, 0, img.width, img.height, 0, 0, canvas.width, canvas.height);
};
// 设置图片的src为base64编码的数据
img.src = imageData;
};
// WebSocket连接错误时的处理
ws.onerror = function (error) {
console.error('WebSocket Error: ', error);
};
// WebSocket连接关闭时的处理
ws.onclose = function (event) {
if (event.wasClean) {
console.log('WebSocket connection closed cleanly, code=' + event.code + ' reason=' + event.reason);
} else {
console.error('WebSocket connection died');
}
};
</script>
</body>
</html>
本站资源均来自互联网,仅供研究学习,禁止违法使用和商用,产生法律纠纷本站概不负责!如果侵犯了您的权益请与我们联系!
转载请注明出处: 免费源码网-免费的源码资源网站 » Crow+opencv+websocket实现实时rtsp视频拉取以及显示
发表评论 取消回复