由于需要在ubuntu18.04下基于python3进行图像处理,18.04ROS中默认使用python2的cv_bridge,为方便进行图像传输,本文直接将图片编码为字符串后传输,并在主机端进行解码显示。同时,在主机端对多个无人机画面同步显示。
无人机端
#!/usr/bin/env python3
# coding:utf-8
import rospy
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from std_msgs.msg import String
import base64
def encode_image(img):
_, buffer = cv2.imencode('.jpg', img)
img_str = base64.b64encode(buffer).decode('utf-8')
return img_str
if __name__=="__main__":
cap = cv2.VideoCapture(0)
# 检查摄像头是否成功打开
if not cap.isOpened():
print("无法打开摄像头0,尝试使用配置1")
# 尝试打开另一个摄像头(配置1)
cap = cv2.VideoCapture(1)
rospy.init_node('yolo_detector_node', anonymous=True)
bridge = CvBridge()
image_pub = rospy.Publisher('/image_topic_7', String, queue_size=10)
while not rospy.is_shutdown():
ret, cv_image = cap.read()
x_position_denied = drone_x + x_offset
y_position_denied = drone_y + y_offset
pos_text = f"uav:7 pos: {x_position_denied:.2f},{y_position_denied:.2f}"
cv2.putText(cv_image, pos_text, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 4)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
_, buffer = cv2.imencode('.jpg', cv_image, encode_param)
encoded_img_str = base64.b64encode(buffer).decode('utf-8')
image_pub.publish(encoded_img_str)
cv2.imshow("cv_image",cv_image)
if cv2.waitKey(10) & 0xFF == ord("q"):
break
cap.release()
output.release()
cv2.destroyAllWindows()
主机端
#!/usr/bin/env python3
# coding:utf-8
import cv2
import base64
import numpy as np
import rospy
import time
from std_msgs.msg import String
# 初始化一个列表来存储每个图片
images = [None] * 8
# 初始化一个列表来存储每张图片的最后更新时间
last_update_time = [0] * 8
# 设置黑色占位图的大小为 320x240
placeholder_image = np.zeros((240, 320, 3), dtype=np.uint8)
# 用于指示是否有新的图像更新
new_image_received = False
# 设置超时时间(秒)
timeout_duration = 2.0 # 2秒内没有更新则置为黑色
def decode_image(img_str):
img_bytes = base64.b64decode(img_str)
img_np = np.frombuffer(img_bytes, dtype=np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
img_resized = cv2.resize(img, (320, 240))
return img_resized
def image_callback(msg, index):
global images, last_update_time, new_image_received
try:
# 解码并存储图像
images[index] = decode_image(msg.data)
last_update_time[index] = time.time() # 更新图像的接收时间
new_image_received = True # 标记有新图像更新
except Exception as e:
rospy.logwarn(f"Failed to decode image at index {index}: {e}")
images[index] = None # 如果解码失败,使用None表示
def update_display():
# 检查每张图像是否超时
current_time = time.time()
for i in range(len(images)):
if current_time - last_update_time[i] > timeout_duration:
images[i] = placeholder_image # 如果超时,将该图像置为黑色
# 拼接图像,未接收到的图像用黑色占位图代替
resized_images = [img if img is not None else placeholder_image for img in images]
row1 = np.hstack(resized_images[:4])
row2 = np.hstack(resized_images[4:])
combined_image = np.vstack((row1, row2))
# 显示组合图像
cv2.imshow("Combined Image (with placeholders)", combined_image)
cv2.waitKey(1)
def image_receiver():
global new_image_received
rospy.init_node('image_receiver', anonymous=True)
# 创建8个订阅者,每个接收一个图片主题
for i in range(8):
rospy.Subscriber(f'/image_topic_{i}', String, image_callback, i)
# 在接收节点运行时显示窗口
cv2.namedWindow("Combined Image (with placeholders)", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Combined Image (with placeholders)", 1280, 480)
# 循环刷新显示
rate = rospy.Rate(10) # 控制刷新率
while not rospy.is_shutdown():
# 只有在接收到新图像时才更新显示
if new_image_received:
update_display()
new_image_received = False # 重置标志
rate.sleep()
# 退出时关闭窗口
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
image_receiver()
except rospy.ROSInterruptException:
pass
从而实现多机画面实时显示在终端
待解决
基于ROS主节点传输过多消息导致通道堵塞,可能由于电台通信堵塞或机载板处理能力较差,导致画面越多延迟越大,可尝试绕过ROS进行点对点传输,或增强通信设备进行充分测试
本站资源均来自互联网,仅供研究学习,禁止违法使用和商用,产生法律纠纷本站概不负责!如果侵犯了您的权益请与我们联系!
转载请注明出处: 免费源码网-免费的源码资源网站 » 使用ROS节点进行多无人机的画面同步接收
发表评论 取消回复