Hello,
I would like to understand how to perform two tasks at the same time to both acquire two camera streams and save stereo images useful for the calibration of my cameras. This would allow me to better orient my image recordings during acquisitions.
On the one side, I use the visu_multistream.py script to visualize the streams with cvb.multistreamhandler and pyside2. Nevertheless, I’m not be able to save images with this script. I tried to launch two specific threads for that, but it is difficult to parallelize them with the visualization stream handlers A and B.
visu_multistream.py
import os, sys import cvb import cvb.ui import datetime import threading from PySide2.QtCore import QObject, QUrl from PySide2.QtQml import qmlRegisterType from PySide2.QtWidgets import QApplication, QWidget from PySide2.QtQuick import QQuickView, QQuickPaintedItem from PySide2.QtGui import QIcon class MyStreamHandler(cvb.SingleStreamHandler): def __init__(self, port, stream): super().__init__(stream) self.rate_counter = cvb.RateCounter() self.count = 0 self.port = port self.stream = stream def camera_acquire(self): print("Starting device: {}".format(self.port)) # stream.ring_buffer.change_count(250, 0) #stream = device.stream() self.stream.start() start_time = datetime.datetime.now() ok_count = 0 for i in range(100): image, status = self.stream.wait() if status == cvb.WaitStatus.Ok: print(str(self.port) + " image: " + image.__class__.__name__ + " " + str(image) + " | Status: " + str( status) + " | Buffer Index: " + str(image.buffer_index)) ok_count += 1 total_time = (datetime.datetime.now() - start_time).total_seconds() print("DEVICE: {}, IMAGES: {}, SECONDS: {}".format(self.port, ok_count, total_time)) self.stream.stop() def run(self): self.camera_acquire() if __name__ == "__main__": app_A = QApplication([]) app_A.setApplicationName('Display camera') # tell Windows the correct AppUserModelID for this process (shows icon in the taskbar) device_A = cvb.DeviceFactory.open(os.path.join(cvb.install_path(), "drivers", "GenICam.vin"), port=0) device_B = cvb.DeviceFactory.open(os.path.join(cvb.install_path(), "drivers", "GenICam.vin"), port=1) # use a single stream handler to setup an acquisition thread and acquire images stream_A = device_A.stream() stream_B = device_B.stream() handler_A = cvb.SingleStreamHandler(stream_A) handler_B = cvb.SingleStreamHandler(stream_B) # handler_A = MyStreamHandler(0, stream_A) # handler_B = MyStreamHandler(1, stream_B) # create an image controller to interact with the UI image_controller_A = cvb.ui.ImageController() image_controller_B = cvb.ui.ImageController() # register the display component with QML cvb.ui.ImageViewItem.register() # setup the QML UI view_A = QQuickView() view_A.setResizeMode(QQuickView.SizeRootObjectToView) context_A = view_A.rootContext() context_A.setContextProperty("mainImage", image_controller_A) filepath = os.path.dirname(os.path.realpath(__file__)) view_A.setSource(QUrl.fromLocalFile(os.path.join(filepath, "main_A.qml"))) view_A.resize(640, 480) view_A.show() view_B = QQuickView() view_B.setResizeMode(QQuickView.SizeRootObjectToView) context_B = view_B.rootContext() context_B.setContextProperty("mainImage", image_controller_B) filepath = os.path.dirname(os.path.realpath(__file__)) view_B.setSource(QUrl.fromLocalFile(os.path.join(filepath, "main_B.qml"))) view_B.resize(640, 480) view_B.show() image_controller_A.refresh(device_A.device_image, cvb.ui.AutoRefresh.On) image_controller_B.refresh(device_B.device_image, cvb.ui.AutoRefresh.On) # start the acquisition thread handler_A.run() handler_B.run() # start the UI event handler app_A.exec_() # stop the acquisition after UI exits handler_A.try_finish() handler_B.try_finish() # register the device image with UI controller to trigger automatic refreshes
On the other side, I use the test_camera_teledyne.py script to synchronize the acquisition of two stereo images and then save them for my calibration, but I can’t visualize in the same time the streams and I get limited in memory due to the bufferization (x = 400).
test_camera_teledyne.py
import datetime import time import cvb import os import cvb.foundation import sys import threading import numpy import cv2 x = 400 print("set up start") start_time = datetime.datetime.now() device_A = cvb.DeviceFactory.open(os.path.join(cvb.install_path(), "drivers", "GenICam.vin"), port=0) deviceNodeMap_A = device_A.node_maps["Device"] deviceStream_A = device_A.stream() ringBufferCount_A = deviceStream_A.ring_buffer.count deviceStream_A.ring_buffer.change_count(x, 0) print("Current RingBuffer amount: " + str(ringBufferCount_A)) device_B = cvb.DeviceFactory.open(os.path.join(cvb.install_path(), "drivers", "GenICam.vin"), port=1) deviceNodeMap_B = device_B.node_maps["Device"] deviceStream_B = device_B.stream() ringBufferCount_B = deviceStream_B.ring_buffer.count deviceStream_B.ring_buffer.change_count(x, 0) print("Current RingBuffer amount: " + str(ringBufferCount_B)) ringBufferCount_A = deviceStream_A.ring_buffer.count print("Changed RingBuffer amount to: " + str(ringBufferCount_A)) ringBufferCount_B = deviceStream_B.ring_buffer.count print("Changed RingBuffer amount to: " + str(ringBufferCount_B)) autoBrightnessMode_A = deviceNodeMap_A["autoBrightnessMode"] balanceWhiteAuto_A = deviceNodeMap_A["BalanceWhiteAuto"] triggerMode_A = deviceNodeMap_A["TriggerMode"] oldTriggerMode_A = deviceNodeMap_A["TriggerMode"] triggerSource_A = deviceNodeMap_A["TriggerSource"] oldTriggerSource_A = deviceNodeMap_A["TriggerSource"] triggerSoftware_A = deviceNodeMap_A["TriggerSoftware"] autoBrightnessMode_A.value = "Active" balanceWhiteAuto_A.value = "Periodic" triggerMode_A.value = "On" triggerSource_A.value = "Software" autoBrightnessMode_B = deviceNodeMap_B["autoBrightnessMode"] balanceWhiteAuto_B = deviceNodeMap_B["BalanceWhiteAuto"] triggerMode_B = deviceNodeMap_B["TriggerMode"] oldTriggerMode_B = deviceNodeMap_B["TriggerMode"] triggerSource_B = deviceNodeMap_B["TriggerSource"] oldTriggerSource_B = deviceNodeMap_B["TriggerSource"] triggerSoftware_B = deviceNodeMap_B["TriggerSoftware"] autoBrightnessMode_B.value = "Active" balanceWhiteAuto_B.value = "Periodic" triggerMode_B.value = "On" triggerSource_B.value = "Software" print("Starting stream and trigger camera given amount of times") print("Set up done: {}".format(datetime.datetime.now() - start_time)) a_images = [] b_images = [] a_times = [] b_times = [] a_video = [] b_video = [] def Stream_A(): global a_times global a_images global a_video ctr=0 #Start Stream frameSize = (1936, 1216) a_video = cv2.VideoWriter('output_A_video.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, frameSize) device_A.stream().start() print("Triggering A") start_time = datetime.datetime.now() # Collect images to fill buffer while (deviceStream_A.statistics[cvb.StreamInfo.NumBuffersPending] < x): triggerSoftware_A.execute() #print("Currently locked buffers: " + str(deviceStream_A.statistics[cvb.StreamInfo.NumBuffersLocked])) a_count = 0 print("Triggering A done: {}".format(datetime.datetime.now() - start_time)) start_time = datetime.datetime.now() while (deviceStream_A.statistics[cvb.StreamInfo.NumBuffersPending] > 0): image_A = device_A.stream().wait() np_array = cvb.as_array(image_A[0], copy=True) a_video.write(np_array) # if (a_count % 10) == 0: # #dst_image = image_A[0].map(np_array.bounds, cvb.Size2D(np_array.size.width * 2, np_array.size.height * 3)) # #dst_image.save('chessboard_A_'+str(a_count)+'.png') # cv2.imwrite('chessboard_A_'+str(a_count)+'.png', np_array) # #cv2.waitKey(0) a_image_time = datetime.datetime.now() a_images.append(np_array) a_times.append(a_image_time) a_count += 1 a_time = (datetime.datetime.now() - start_time).total_seconds() #Stop Stream device_A.stream().stop() a_video.release() print("A: ", a_count, "images", a_time, "seconds") print("A: last image time: ", a_image_time) #Reset changed settings of camera deviceStream_A.ring_buffer.change_count(3, 0) triggerMode_A = oldTriggerMode_A triggerSource_A = oldTriggerSource_A def Stream_B(): global b_times global b_images global b_video ctr=0 frameSize = (1936, 1216) b_video = cv2.VideoWriter('output_B_video.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, frameSize) device_B.stream().start() print("Triggering B") start_time = datetime.datetime.now() while (deviceStream_B.statistics[cvb.StreamInfo.NumBuffersPending] < x): triggerSoftware_B.execute() # Process through images in buffer b_count = 0 print("Triggering B done: {}".format(datetime.datetime.now() - start_time)) start_time = datetime.datetime.now() while (deviceStream_B.statistics[cvb.StreamInfo.NumBuffersPending] > 0): image_B = device_B.stream().wait() np_array = cvb.as_array(image_B[0], copy=True) b_video.write(np_array) # if (b_count % 10) == 0: # #dst_image = image_A[0].map(np_array.bounds, cvb.Size2D(np_array.size.width * 2, np_array.size.height * 3)) # #dst_image.save('chessboard_A_'+str(a_count)+'.png') # cv2.imwrite('chessboard_B_'+str(b_count)+'.png', np_array) # #cv2.waitKey(0) b_image_time = datetime.datetime.now() b_images.append(np_array) b_times.append(b_image_time) b_count += 1 b_time = (datetime.datetime.now() - start_time).total_seconds() # Stop Stream device_B.stream().stop() b_video.release() print("B:", b_count, "images", b_time, "seconds") print("B: last image time: ", b_image_time) # Reset changed settings of camera deviceStream_B.ring_buffer.change_count(3, 0) triggerMode_B = oldTriggerMode_B triggerSource_B = oldTriggerSource_B thread1 = threading.Thread(target=Stream_A) thread1.start() thread2 = threading.Thread(target=Stream_B) thread2.start() thread1.join() thread2.join() print(len(a_images), len(b_images))
Does anyone know which script to use in CvbPy to perform these two tasks at the same time?
Thank you in advance for your help.