diff --git a/.gitignore b/.gitignore index f34c2100f302..bf0c693b577f 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,21 @@ build node_modules CMakeSettings.json xcuserdata/ + +# Compiled executables and test files +test/detect_backend +test/test_libcamera +test/test_libcamera2 +test/test_libcamera_plugin +test/*.jpg +test/*.png +test/frame/ +test/test_results/ +test/libcamera_test_results/ +test/build_plugin/ + +# General compiled files +*.exe +*.out +*.app +compile_commands.json \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 93a7ad27998d..8147a8745732 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -268,6 +268,9 @@ OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" (NOT ANDROID) OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON VISIBLE_IF NOT ANDROID AND NOT IOS AND NOT XROS AND NOT WINRT VERIFY HAVE_GSTREAMER AND GSTREAMER_VERSION VERSION_GREATER "0.99") +OCV_OPTION(WITH_LIBCAMERA "Include Libcamera support" ON + VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID + VERIFY HAVE_LIBCAMERA OR HAVE_VIDEOIO) OCV_OPTION(WITH_GTK "Include GTK support" ON VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID VERIFY HAVE_GTK) @@ -1720,6 +1723,13 @@ if(ANDROID) status(" NDK Camera:" HAVE_ANDROID_NATIVE_CAMERA THEN "YES" ELSE NO) endif() +if(WITH_LIBCAMERA OR HAVE_LIBCAMERA) + ocv_build_features_string(libcamera_status + IF HAVE_LIBCAMERA THEN "libcamera/libcamera/libcamera.h" + ELSE "NO") + status(" libcamera:" HAVE_LIBCAMERA THEN YES ELSE NO) +endif() + # Order is similar to CV_PARALLEL_FRAMEWORK in core/src/parallel.cpp ocv_build_features_string(parallel_status EXCLUSIVE IF HAVE_TBB THEN "TBB (ver ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} interface ${TBB_INTERFACE_VERSION})" diff --git a/modules/videoio/CMakeLists.txt b/modules/videoio/CMakeLists.txt index c8a03e72ae54..7e2da52e20c7 100644 --- a/modules/videoio/CMakeLists.txt +++ b/modules/videoio/CMakeLists.txt @@ -108,6 +108,16 @@ if(TARGET ocv.3rdparty.dshow) list(APPEND tgts ocv.3rdparty.dshow) endif() +if(TARGET ocv.3rdparty.libcamera) + if("libcamera" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all") + ocv_create_builtin_videoio_plugin("opencv_videoio_libcamera" ocv.3rdparty.libcamera "cap_libcamera.cpp" "cap_libcamera_plugin.cpp") + else() + list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_libcamera.cpp) + list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_libcamera.hpp) + list(APPEND tgts ocv.3rdparty.libcamera) + endif() +endif() + if(TARGET ocv.3rdparty.msmf) if("msmf" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all") ocv_create_builtin_videoio_plugin("opencv_videoio_msmf" ocv.3rdparty.msmf "cap_msmf.cpp") diff --git a/modules/videoio/cmake/detect_libcamera.cmake b/modules/videoio/cmake/detect_libcamera.cmake new file mode 100644 index 000000000000..a8e1ae25301d --- /dev/null +++ b/modules/videoio/cmake/detect_libcamera.cmake @@ -0,0 +1,19 @@ +# --- Libcamera --- + +if(NOT HAVE_LIBCAMERA AND PKG_CONFIG_FOUND) + ocv_check_modules(LIBCAMERA libcamera) + if(LIBCAMERA_FOUND) + set(HAVE_LIBCAMERA TRUE) + endif() +endif() + +if(HAVE_LIBCAMERA) + if((CMAKE_CXX_STANDARD EQUAL 98) OR (CMAKE_CXX_STANDARD LESS 17)) + message(STATUS "CMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} is too old to support libcamera. Use C++17 or later. Turning HAVE_LIBCAMERA off") + set(HAVE_LIBCAMERA FALSE) + endif() +endif() + +if(HAVE_LIBCAMERA) + ocv_add_external_target(libcamera "${LIBCAMERA_INCLUDE_DIRS}" "${LIBCAMERA_LINK_LIBRARIES}" "HAVE_LIBCAMERA") +endif() diff --git a/modules/videoio/cmake/init.cmake b/modules/videoio/cmake/init.cmake index 2237b97a09ae..5bb9e069598c 100644 --- a/modules/videoio/cmake/init.cmake +++ b/modules/videoio/cmake/init.cmake @@ -11,6 +11,7 @@ endmacro() add_backend("ffmpeg" WITH_FFMPEG) add_backend("gstreamer" WITH_GSTREAMER) add_backend("v4l" WITH_V4L) +add_backend("libcamera" WITH_LIBCAMERA) add_backend("aravis" WITH_ARAVIS) add_backend("dc1394" WITH_1394) diff --git a/modules/videoio/cmake/plugin.cmake b/modules/videoio/cmake/plugin.cmake index 7eaee3e5a2c3..8c70d22df8e1 100644 --- a/modules/videoio/cmake/plugin.cmake +++ b/modules/videoio/cmake/plugin.cmake @@ -38,7 +38,9 @@ function(ocv_create_builtin_videoio_plugin name target) endif() set_target_properties(${name} PROPERTIES - CXX_STANDARD 11 + # CXX_STANDARD 11 + CXX_STANDARD 17 + CXX_STANDARD_REQUIRED ON CXX_VISIBILITY_PRESET hidden DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" OUTPUT_NAME "${name}${OPENCV_PLUGIN_VERSION}${OPENCV_PLUGIN_ARCH}" diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp index 6b5c1b6cc41e..a562416d5814 100644 --- a/modules/videoio/include/opencv2/videoio.hpp +++ b/modules/videoio/include/opencv2/videoio.hpp @@ -128,7 +128,9 @@ enum VideoCaptureAPIs { CAP_INTEL_MFX = 2300, //!< Intel MediaSDK CAP_XINE = 2400, //!< XINE engine (Linux) CAP_UEYE = 2500, //!< uEye Camera API - CAP_OBSENSOR = 2600, //!< For Orbbec 3D-Sensor device/module (Astra+, Femto, Astra2, Gemini2, Gemini2L, Gemini2XL, Gemini330, Femto Mega) attention: Astra2 cameras currently only support Windows and Linux kernel versions no higher than 4.15, and higher versions of Linux kernel may have exceptions. + CAP_OBSENSOR = 2600, //!< For Orbbec 3D-Sensor device/module (Astra+, Femto, Astra2, Gemini2, Gemini2L, Gemini2XL, Femto Mega) attention: Astra2 cameras currently only support Windows and Linux kernel versions no higher than 4.15, and higher versions of Linux kernel may have exceptions. + CAP_LIBCAMERA = 2700, //!< Libcamera API + }; diff --git a/modules/videoio/src/cap_interface.hpp b/modules/videoio/src/cap_interface.hpp index a1924f568255..be34a595b4d6 100644 --- a/modules/videoio/src/cap_interface.hpp +++ b/modules/videoio/src/cap_interface.hpp @@ -7,6 +7,7 @@ #include "opencv2/core.hpp" #include "opencv2/core/core_c.h" +#include "opencv2/core/utils/logger.hpp" #include "opencv2/videoio.hpp" #include "opencv2/videoio/videoio_c.h" #include "opencv2/videoio/utils.private.hpp" @@ -188,6 +189,8 @@ class VideoParameters found = true; CV_LOG_INFO(NULL, "VIDEOIO: unused parameter: [" << param.key << "]=" << cv::format("%lld / 0x%016llx", (long long)param.value, (long long)param.value)); + CV_LOG_INFO(NULL, cv::format("VIDEOIO: unused parameter: [%d]=%lld / 0x%016llx", + param.key, (long long)param.value, (long long)param.value).c_str()); } } return found; @@ -402,6 +405,10 @@ Ptr createAndroidVideoWriter(const std::string& filename, int four Ptr create_obsensor_capture(int index); +Ptr createLibcameraCapture_cam(int index); +Ptr createLibcameraCapture_file(const std::string &filename); + + bool VideoCapture_V4L_waitAny( const std::vector& streams, CV_OUT std::vector& ready, diff --git a/modules/videoio/src/cap_libcamera.cpp b/modules/videoio/src/cap_libcamera.cpp new file mode 100644 index 000000000000..da1900fb4bd5 --- /dev/null +++ b/modules/videoio/src/cap_libcamera.cpp @@ -0,0 +1,1116 @@ +#include "precomp.hpp" +#include +#include +#include +#include "cap_interface.hpp" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cap_libcamera.hpp" +#include + +/** + * @brief implementation of the LibcameraApp class and LibcameraCapture + * The LibcameraApp implements is from LCCV + * Source: https://github.com/kbarni/LCCV + + + +*/ + +namespace cv +{ + + LibcameraApp::LibcameraApp(std::unique_ptr opts) + : options_(std::move(opts)), controls_(controls::controls) + + { + if (!options_) + options_ = std::make_unique(); + controls_.clear(); + } + + LibcameraApp::~LibcameraApp() + { + + StopCamera(); + Teardown(); + CloseCamera(); + std::cerr << "End of ~LibcameraApp() call" << std::endl; + } + + std::string const &LibcameraApp::CameraId() const + { + return camera_->id(); + } + + void LibcameraApp::OpenCamera() + { + + if (options_->verbose) + std::cerr << "Opening camera..." << std::endl; + + if (getCameraManager()->cameras().size() == 0) + throw std::runtime_error("no cameras available"); + if (options_->camera >= getCameraManager()->cameras().size()) + throw std::runtime_error("selected camera is not available"); + + std::string const &cam_id = getCameraManager()->cameras()[options_->camera]->id(); + camera_ = getCameraManager()->get(cam_id); + if (!camera_) + throw std::runtime_error("failed to find camera " + cam_id); + + if (!camera_acquired_ && camera_->acquire()) + throw std::runtime_error("failed to acquire camera " + cam_id); + camera_acquired_ = true; + + if (options_->verbose) + std::cerr << "Acquired camera " << cam_id << std::endl; + } + + void LibcameraApp::CloseCamera() + { + if (camera_acquired_) + camera_->release(); + camera_acquired_ = false; + + camera_.reset(); + + if (options_->verbose && !options_->help) + std::cerr << "Camera closed" << std::endl; + } + + // void LibcameraApp::CloseCamera() { + // std::lock_guard lock(camera_stop_mutex_); + // if (camera_acquired_) { + // try { + // camera_->release(); + // camera_acquired_ = false; + // camera_.reset(); + // } catch (const std::exception& e) { + // std::cerr << "Error releasing camera: " << e.what() << std::endl; + // } + // } + // } + + void LibcameraApp::ConfigureViewfinder() + { + if (options_->verbose) + std::cerr << "Configuring viewfinder..." << std::endl; + + StreamRoles stream_roles = {StreamRole::Viewfinder}; + configuration_ = camera_->generateConfiguration(stream_roles); + if (!configuration_) + throw std::runtime_error("failed to generate viewfinder configuration"); + + // Now we get to override any of the default settings from the options_-> + configuration_->at(0).pixelFormat = libcamera::formats::RGB888; + configuration_->at(0).size.width = options_->video_width; + configuration_->at(0).size.height = options_->video_height; + configuration_->at(0).bufferCount = 4; + + // configuration_->transform = options_->transform; + + configureDenoise(options_->denoise == "auto" ? "cdn_off" : options_->denoise); + setupCapture(); + + streams_["viewfinder"] = configuration_->at(0).stream(); + + if (options_->verbose) + std::cerr << "Viewfinder setup complete" << std::endl; + } + + void LibcameraApp::Teardown() + { + if (options_->verbose && !options_->help) + std::cerr << "Tearing down requests, buffers and configuration" << std::endl; + + for (auto &iter : mapped_buffers_) + { + // assert(iter.first->planes().size() == iter.second.size()); + // for (unsigned i = 0; i < iter.first->planes().size(); i++) + for (auto &span : iter.second) + munmap(span.data(), span.size()); + } + mapped_buffers_.clear(); + + delete allocator_; + allocator_ = nullptr; + + configuration_.reset(); + + frame_buffers_.clear(); + + streams_.clear(); + } + + void LibcameraApp::StartCamera() + { + // This makes all the Request objects that we shall need. + makeRequests(); + + // Build a list of initial controls that we must set in the camera before starting it. + // We don't overwrite anything the application may have set before calling us. + if (!controls_.get(controls::ScalerCrop) && options_->roi_width != 0 && options_->roi_height != 0) + { + Rectangle sensor_area = *camera_->properties().get(properties::ScalerCropMaximum); + int x = options_->roi_x * sensor_area.width; + int y = options_->roi_y * sensor_area.height; + int w = options_->roi_width * sensor_area.width; + int h = options_->roi_height * sensor_area.height; + Rectangle crop(x, y, w, h); + crop.translateBy(sensor_area.topLeft()); + if (options_->verbose) + std::cerr << "Using crop " << crop.toString() << std::endl; + controls_.set(controls::ScalerCrop, crop); + } + + // Framerate is a bit weird. If it was set programmatically, we go with that, but + // otherwise it applies only to preview/video modes. For stills capture we set it + // as long as possible so that we get whatever the exposure profile wants. + if (!controls_.get(controls::FrameDurationLimits)) + { + if (StillStream()) + controls_.set(controls::FrameDurationLimits, libcamera::Span({INT64_C(100), INT64_C(1000000000)})); + else if (options_->framerate > 0) + { + int64_t frame_time = 1000000 / options_->framerate; // in us + controls_.set(controls::FrameDurationLimits, libcamera::Span({frame_time, frame_time})); + } + } + + if (!controls_.get(controls::ExposureTime) && options_->shutter) + controls_.set(controls::ExposureTime, options_->shutter); + if (!controls_.get(controls::AnalogueGain) && options_->gain) + controls_.set(controls::AnalogueGain, options_->gain); + if (!controls_.get(controls::AeMeteringMode)) + controls_.set(controls::AeMeteringMode, options_->getMeteringMode()); + if (!controls_.get(controls::AeExposureMode)) + controls_.set(controls::AeExposureMode, options_->getExposureMode()); + if (!controls_.get(controls::ExposureValue)) + controls_.set(controls::ExposureValue, options_->ev); + if (!controls_.get(controls::AwbMode)) + controls_.set(controls::AwbMode, options_->getWhiteBalance()); + if (!controls_.get(controls::ColourGains) && options_->awb_gain_r && options_->awb_gain_b) + controls_.set(controls::ColourGains, libcamera::Span({options_->awb_gain_r, options_->awb_gain_b})); + if (!controls_.get(controls::Brightness)) + controls_.set(controls::Brightness, options_->brightness); + if (!controls_.get(controls::Contrast)) + controls_.set(controls::Contrast, options_->contrast); + if (!controls_.get(controls::Saturation)) + controls_.set(controls::Saturation, options_->saturation); + if (!controls_.get(controls::Sharpness)) + controls_.set(controls::Sharpness, options_->sharpness); + + if (camera_->start(&controls_)) + throw std::runtime_error("failed to start camera"); + controls_.clear(); + camera_started_ = true; + last_timestamp_ = 0; + + camera_->requestCompleted.connect(this, &LibcameraApp::requestComplete); + + for (std::unique_ptr &request : requests_) + { + if (camera_->queueRequest(request.get()) < 0) + throw std::runtime_error("Failed to queue request"); + } + + if (options_->verbose) + std::cerr << "Camera started!" << std::endl; + } + + void LibcameraApp::StopCamera() + { + { + // We don't want QueueRequest to run asynchronously while we stop the camera. + std::lock_guard lock(camera_stop_mutex_); + if (camera_started_) + { + std::cerr << "Camera tries to stop!!" << std::endl; + if (camera_->stop()) + throw std::runtime_error("failed to stop camera"); + + camera_started_ = false; + } + // camera_->requestCompleted.disconnect(this, &LibcameraApp::requestComplete); + // if (!camera_->requestCompleted.disconnect(this, &LibcameraApp::requestComplete)) { + // throw std::runtime_error("failed to disconnect camera callbacks"); + // } + } + + if (camera_) + camera_->requestCompleted.disconnect(this, &LibcameraApp::requestComplete); + + // An application might be holding a CompletedRequest, so queueRequest will get + // called to delete it later, but we need to know not to try and re-queue it. + completed_requests_.clear(); + + msg_queue_.Clear(); + + while (!free_requests_.empty()) + free_requests_.pop(); + + requests_.clear(); + + controls_.clear(); // no need for mutex here + + if (options_->verbose && !options_->help) + std::cerr << "Camera stopped!" << std::endl; + } + + void LibcameraApp::ApplyRoiSettings() + { + if (!controls_.get(controls::ScalerCrop) && options_->roi_width != 0 && options_->roi_height != 0) + { + Rectangle sensor_area = *camera_->properties().get(properties::ScalerCropMaximum); + int x = options_->roi_x * sensor_area.width; + int y = options_->roi_y * sensor_area.height; + int w = options_->roi_width * sensor_area.width; + int h = options_->roi_height * sensor_area.height; + Rectangle crop(x, y, w, h); + crop.translateBy(sensor_area.topLeft()); + if (options_->verbose) + std::cerr << "Using crop " << crop.toString() << std::endl; + controls_.set(controls::ScalerCrop, crop); + } + } + + LibcameraApp::Msg LibcameraApp::Wait() + { + return msg_queue_.Wait(); + } + + void LibcameraApp::queueRequest(CompletedRequest *completed_request) + { + BufferMap buffers(std::move(completed_request->buffers)); + + Request *request = completed_request->request; + assert(request); + + // This function may run asynchronously so needs protection from the + // camera stopping at the same time. + std::lock_guard stop_lock(camera_stop_mutex_); + if (!camera_started_) + return; + + // An application could be holding a CompletedRequest while it stops and re-starts + // the camera, after which we don't want to queue another request now. + { + std::lock_guard lock(completed_requests_mutex_); + auto it = completed_requests_.find(completed_request); + delete completed_request; + if (it == completed_requests_.end()) + return; + completed_requests_.erase(it); + } + + for (auto const &p : buffers) + { + if (request->addBuffer(p.first, p.second) < 0) + throw std::runtime_error("failed to add buffer to request in QueueRequest"); + } + + { + std::lock_guard lock(control_mutex_); + request->controls() = std::move(controls_); + } + + if (camera_->queueRequest(request) < 0) + throw std::runtime_error("failed to queue request"); + } + + void LibcameraApp::PostMessage(MsgType &t, MsgPayload &p) + { + Msg msg(t); + msg.payload = p; + msg_queue_.Post(std::move(msg)); + } + + libcamera::Stream *LibcameraApp::GetStream(std::string const &name, unsigned int *w, unsigned int *h, + unsigned int *stride) const + { + auto it = streams_.find(name); + if (it == streams_.end()) + return nullptr; + StreamDimensions(it->second, w, h, stride); + return it->second; + } + + libcamera::Stream *LibcameraApp::ViewfinderStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("viewfinder", w, h, stride); + } + + libcamera::Stream *LibcameraApp::StillStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("still", w, h, stride); + } + + libcamera::Stream *LibcameraApp::RawStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("raw", w, h, stride); + } + + libcamera::Stream *LibcameraApp::VideoStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("video", w, h, stride); + } + + libcamera::Stream *LibcameraApp::LoresStream(unsigned int *w, unsigned int *h, unsigned int *stride) const + { + return GetStream("lores", w, h, stride); + } + + libcamera::Stream *LibcameraApp::GetMainStream() const + { + for (auto &p : streams_) + { + if (p.first == "viewfinder" || p.first == "still" || p.first == "video") + return p.second; + } + + return nullptr; + } + + std::vector> LibcameraApp::Mmap(FrameBuffer *buffer) const + { + auto item = mapped_buffers_.find(buffer); + if (item == mapped_buffers_.end()) + return {}; + return item->second; + } + + void LibcameraApp::SetControls(ControlList &controls) + { + std::lock_guard lock(control_mutex_); + controls_ = std::move(controls); + } + + void LibcameraApp::StreamDimensions(Stream const *stream, unsigned int *w, unsigned int *h, unsigned int *stride) const + { + StreamConfiguration const &cfg = stream->configuration(); + if (w) + *w = cfg.size.width; + if (h) + *h = cfg.size.height; + if (stride) + *stride = cfg.stride; + } + + void LibcameraApp::setupCapture() + { + // First finish setting up the configuration. + + CameraConfiguration::Status validation = configuration_->validate(); + if (validation == CameraConfiguration::Invalid) + throw std::runtime_error("failed to valid stream configurations"); + else if (validation == CameraConfiguration::Adjusted) + std::cerr << "Stream configuration adjusted" << std::endl; + + if (camera_->configure(configuration_.get()) < 0) + throw std::runtime_error("failed to configure streams"); + + if (options_->verbose) + std::cerr << "Camera streams configured" << std::endl; + + // Next allocate all the buffers we need, mmap them and store them on a free list. + + allocator_ = new FrameBufferAllocator(camera_); + for (StreamConfiguration &config : *configuration_) + { + Stream *stream = config.stream(); + + if (allocator_->allocate(stream) < 0) + throw std::runtime_error("failed to allocate capture buffers"); + + for (const std::unique_ptr &buffer : allocator_->buffers(stream)) + { + // "Single plane" buffers appear as multi-plane here, but we can spot them because then + // planes all share the same fd. We accumulate them so as to mmap the buffer only once. + size_t buffer_size = 0; + for (unsigned i = 0; i < buffer->planes().size(); i++) + { + const FrameBuffer::Plane &plane = buffer->planes()[i]; + buffer_size += plane.length; + if (i == buffer->planes().size() - 1 || plane.fd.get() != buffer->planes()[i + 1].fd.get()) + { + void *memory = mmap(NULL, buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, plane.fd.get(), 0); + mapped_buffers_[buffer.get()].push_back( + libcamera::Span(static_cast(memory), buffer_size)); + buffer_size = 0; + } + } + frame_buffers_[stream].push(buffer.get()); + } + } + if (options_->verbose) + std::cerr << "Buffers allocated and mapped" << std::endl; + + // The requests will be made when StartCamera() is called. + } + + void LibcameraApp::makeRequests() + { + auto free_buffers(frame_buffers_); + while (true) + { + for (StreamConfiguration &config : *configuration_) + { + Stream *stream = config.stream(); + if (stream == configuration_->at(0).stream()) + { + if (free_buffers[stream].empty()) + { + if (options_->verbose) + std::cerr << "Requests created" << std::endl; + return; + } + std::unique_ptr request = camera_->createRequest(); + if (!request) + throw std::runtime_error("failed to make request"); + requests_.push_back(std::move(request)); + } + else if (free_buffers[stream].empty()) + throw std::runtime_error("concurrent streams need matching numbers of buffers"); + + FrameBuffer *buffer = free_buffers[stream].front(); + free_buffers[stream].pop(); + if (requests_.back()->addBuffer(stream, buffer) < 0) + throw std::runtime_error("failed to add buffer to request"); + } + } + } + + void LibcameraApp::requestComplete(Request *request) + { + if (request->status() == Request::RequestCancelled) + return; + + CompletedRequest *r = new CompletedRequest(sequence_++, request); + CompletedRequestPtr payload(r, [this](CompletedRequest *cr) + { this->queueRequest(cr); }); + { + std::lock_guard lock(completed_requests_mutex_); + completed_requests_.insert(r); + } + + // We calculate the instantaneous framerate in case anyone wants it. + uint64_t timestamp = payload->buffers.begin()->second->metadata().timestamp; + if (last_timestamp_ == 0 || last_timestamp_ == timestamp) + payload->framerate = 0; + else + payload->framerate = 1e9 / (timestamp - last_timestamp_); + last_timestamp_ = timestamp; + + msg_queue_.Post(Msg(MsgType::RequestComplete, std::move(payload))); + } + + void LibcameraApp::configureDenoise(const std::string &denoise_mode) + { + using namespace libcamera::controls::draft; + + static const std::map denoise_table = { + {"off", NoiseReductionModeOff}, + {"cdn_off", NoiseReductionModeMinimal}, + {"cdn_fast", NoiseReductionModeFast}, + {"cdn_hq", NoiseReductionModeHighQuality}}; + NoiseReductionModeEnum denoise; + + auto const mode = denoise_table.find(denoise_mode); + if (mode == denoise_table.end()) + throw std::runtime_error("Invalid denoise mode " + denoise_mode); + denoise = mode->second; + + controls_.set(NoiseReductionMode, denoise); + } + + /* ******************************************************************* */ + + LibcameraCapture::LibcameraCapture() + { + app = new LibcameraApp(std::unique_ptr(new Options())); + options = static_cast(app->GetOptions()); + still_flags = LibcameraApp::FLAG_STILL_NONE; + options->photo_width = 4056; + options->photo_height = 3040; + options->video_width = 640; + options->video_height = 480; + options->framerate = 30; + options->denoise = "auto"; + options->timeout = 1000; + options->setMetering(Metering_Modes::METERING_MATRIX); + options->setExposureMode(Exposure_Modes::EXPOSURE_NORMAL); + options->setWhiteBalance(WhiteBalance_Modes::WB_AUTO); + options->contrast = 1.0f; + options->saturation = 1.0f; + // still_flags |= LibcameraApp::FLAG_STILL_RGB; + still_flags |= LibcameraApp::FLAG_STILL_BGR; + needsReconfigure.store(false, std::memory_order_release); + camerastarted = false; + current_request_ = nullptr; + } + + LibcameraCapture::LibcameraCapture(int camera_index) + { + app = new LibcameraApp(std::unique_ptr(new Options())); + options = static_cast(app->GetOptions()); + still_flags = LibcameraApp::FLAG_STILL_NONE; + options->photo_width = 4056; + options->photo_height = 3040; + options->video_width = 640; + options->video_height = 480; + options->framerate = 30; + options->denoise = "auto"; + options->timeout = 1000; + options->camera = camera_index; + options->setMetering(Metering_Modes::METERING_MATRIX); + options->setExposureMode(Exposure_Modes::EXPOSURE_NORMAL); + options->setWhiteBalance(WhiteBalance_Modes::WB_AUTO); + options->contrast = 1.0f; + options->saturation = 1.0f; + // still_flags |= LibcameraApp::FLAG_STILL_RGB; + still_flags |= LibcameraApp::FLAG_STILL_BGR; + needsReconfigure.store(false, std::memory_order_release); + camerastarted = false; + current_request_ = nullptr; + + // Automatically open the camera + open(camera_index); + } + + LibcameraCapture::~LibcameraCapture() + { + stopVideo(); + // delete app; + std::cerr << "End of ~LibcameraCapture() call" << std::endl; + } + + bool LibcameraCapture::startVideo() // not resolved + { + if (camerastarted) + { + std::cerr << "Camera already started"; + return false; + } + + LibcameraCapture::app->OpenCamera(); + LibcameraCapture::app->ConfigureViewfinder(); + LibcameraCapture::app->StartCamera(); + + // Get stream dimensions for later use + libcamera::Stream *stream = app->ViewfinderStream(&vw, &vh, &vstr); + if (!stream) + { + std::cerr << "Error getting viewfinder stream" << std::endl; + return false; + } + + camerastarted = true; + return true; + } + + void LibcameraCapture::stopVideo() // not resolved + { + if (!camerastarted) + return; + + LibcameraCapture::app->StopCamera(); + LibcameraCapture::app->Teardown(); + LibcameraCapture::app->CloseCamera(); + + // Clear any pending request + { + std::lock_guard lock(request_mutex_); + current_request_ = nullptr; + } + + camerastarted = false; + } + + /** + * @brief Check if a frame is available and ready for retrieval. + * + * This function checks if libcamera has a completed request ready. + * If the camera is not started, it will start the camera first. + * It uses libcamera's asynchronous message system to check for available frames. + * + * @return `true` if a frame is ready for retrieval. + * `false` if no frame is available or camera failed to start. + */ + bool LibcameraCapture::grabFrame() + { + if (!camerastarted) + { + if (!startVideo()) + { + std::cerr << "Failed to start camera" << std::endl; + return false; + } + } + + // Check if we need to reconfigure + // if (needsReconfigure.load(std::memory_order_acquire)) + // { + // stopVideo(); + // if (!startVideo()) + // { + // std::cerr << "Failed to restart camera after reconfiguration" << std::endl; + // return false; + // } + // needsReconfigure.store(false, std::memory_order_release); + // } + + // Try to get a message from libcamera (non-blocking check) + try + { + // Use Wait() with short timeout to check for available messages + LibcameraApp::Msg msg = app->Wait(); + + if (msg.type == LibcameraApp::MsgType::RequestComplete) + { + CompletedRequestPtr payload = msg.getCompletedRequest(); + { + std::lock_guard lock(request_mutex_); + current_request_ = payload; + } + return true; + } + else if (msg.type == LibcameraApp::MsgType::Quit) + { + std::cerr << "Quit message received" << std::endl; + return false; + } + } + catch (const std::exception& e) + { + // No message available or error occurred + return false; + } + + return false; + } + + /** + * @brief Retrieve the frame data from the current completed request. + * + * This function extracts frame data directly from libcamera's memory-mapped buffers + * without additional copying. It uses the completed request obtained in grabFrame(). + * + * @param int Unused parameter. + * @param dst An OpenCV `OutputArray` where the retrieved frame will be stored. + * The frame is stored in RGB format (8-bit, 3 channels, CV_8UC3). + * + * @return `true` if a frame is successfully retrieved and copied to `dst`. + * `false` if no frame is ready or an error occurred. + */ + bool LibcameraCapture::retrieveFrame(int, OutputArray dst) + { + CompletedRequestPtr request; + { + std::lock_guard lock(request_mutex_); + if (!current_request_) + { + return false; + } + request = current_request_; + current_request_ = nullptr; // Clear after use + } + + // Get the viewfinder stream + libcamera::Stream *stream = app->ViewfinderStream(&vw, &vh, &vstr); + if (!stream) + { + std::cerr << "Error getting viewfinder stream" << std::endl; + return false; + } + + // Get memory mapped buffer directly from libcamera + std::vector> mem = app->Mmap(request->buffers[stream]); + if (mem.empty()) + { + std::cerr << "Error getting memory mapped buffer" << std::endl; + return false; + } + + // Create OpenCV Mat directly from libcamera buffer + Mat frame(vh, vw, CV_8UC3); + uint8_t *src_ptr = mem[0].data(); + uint line_size = vw * 3; + + // Copy line by line to handle stride correctly + for (unsigned int i = 0; i < vh; i++, src_ptr += vstr) + { + memcpy(frame.ptr(i), src_ptr, line_size); + } + + frame.copyTo(dst); + return true; + } + + bool LibcameraCapture::retrieve(cv::Mat& frame, int stream_idx) + { + cv::OutputArray dst(frame); + return retrieveFrame(stream_idx, dst); + } + + double LibcameraCapture::getProperty(int propId) const + { + switch (propId) + { + case cv::CAP_PROP_BRIGHTNESS: + return options->brightness; + + case cv::CAP_PROP_CONTRAST: + return options->contrast; + + case cv::CAP_PROP_SATURATION: + return options->saturation; + + case cv::CAP_PROP_SHARPNESS: + return options->sharpness; + + case cv::CAP_PROP_AUTO_EXPOSURE: + return options->getExposureMode() == Exposure_Modes::EXPOSURE_NORMAL; + + case cv::CAP_PROP_EXPOSURE: + return options->shutter; + + case cv::CAP_PROP_AUTO_WB: + return options->getWhiteBalance() == WhiteBalance_Modes::WB_AUTO; + + case cv::CAP_PROP_WB_TEMPERATURE: + // Since we don't have a direct WB temperature, return an approximation based on the current setting + switch (options->getWhiteBalance()) + { + case WhiteBalance_Modes::WB_TUNGSTEN: + return 3000.0; // Approximate value for tungsten + case WhiteBalance_Modes::WB_INDOOR: + return 4500.0; // Approximate value for indoor + case WhiteBalance_Modes::WB_DAYLIGHT: + return 5500.0; // Approximate value for daylight + case WhiteBalance_Modes::WB_CLOUDY: + return 7000.0; // Approximate value for cloudy + default: + return 5000.0; // Default approximation if none of the above + } + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_X: + return options->roi_x; + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_Y: + return options->roi_y; + + case cv::CAP_PROP_XI_AEAG_ROI_WIDTH: + return options->roi_width; + + case cv::CAP_PROP_XI_AEAG_ROI_HEIGHT: + return options->roi_height; + + case cv::CAP_PROP_FOURCC: + { + // Return the FOURCC code of the current video format. + // This is a placeholder. You should replace it with the actual FOURCC code. + // return cv::VideoWriter::fourcc('M', 'J', 'P', 'G'); + // return options->getFourCC(); + std::cerr << "Warning: Not implemented yet" << std::endl; + return 0; + } + + case cv::CAP_PROP_FRAME_WIDTH: + if (options->video_width != 0) + { + return options->video_width; + } + else + { + return options->photo_width; + } + + case cv::CAP_PROP_FRAME_HEIGHT: + if (options->video_height != 0) + { + return options->video_height; + } + else + { + return options->photo_height; + } + + case cv::CAP_PROP_FPS: + return options->framerate; + + case cv::CAP_PROP_AUTOFOCUS: + case cv::CAP_PROP_BUFFERSIZE: + case cv::CAP_PROP_PAN: + case cv::CAP_PROP_TILT: + case cv::CAP_PROP_ROLL: + case cv::CAP_PROP_IRIS: + // Not implemented, return a default value or an error code + std::cerr << "Warning: Property " << propId << " is not supported." << std::endl; + return 0; // Or some other value indicating an error or not supported + + default: + std::cerr << "Warning: Unsupported property: " << propId << std::endl; + return 0; + } + } + + bool LibcameraCapture::setProperty(int propId, double value) + { + switch (propId) + { + case cv::CAP_PROP_BRIGHTNESS: + options->brightness = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_CONTRAST: + options->contrast = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_SATURATION: + options->saturation = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_SHARPNESS: + options->sharpness = value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_AUTO_EXPOSURE: + if (value) + { + options->setExposureMode(Exposure_Modes::EXPOSURE_NORMAL); + } + else + { + options->setExposureMode(Exposure_Modes::EXPOSURE_SHORT); + } + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_EXPOSURE: + options->shutter = value; // Assumes value is in milliseconds, libcamera uses seconds + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_AUTO_WB: + options->setWhiteBalance(value ? WhiteBalance_Modes::WB_AUTO : WhiteBalance_Modes::WB_INDOOR); + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_WB_TEMPERATURE: + // Libcamera does not have a direct WB temperature setting, + // you might need to convert this to r/b gains for manual control. + // For now, let's assume a simplified approach. + if (value < 4000) + { + options->setWhiteBalance(WhiteBalance_Modes::WB_TUNGSTEN); + } + else if (value < 5000) + { + options->setWhiteBalance(WhiteBalance_Modes::WB_INDOOR); + } + else if (value < 6500) + { + options->setWhiteBalance(WhiteBalance_Modes::WB_DAYLIGHT); + } + else + { + options->setWhiteBalance(WhiteBalance_Modes::WB_CLOUDY); + } + needsReconfigure.store(true, std::memory_order_release); + break; + + // case cv::CAP_PROP_ZOOM: // This is a custom property for ROI + // options->roi_x = options->roi_y = (1.0 - value) / 2.0; // Assuming value is normalized zoom level (0.0 - 1.0) + // options->roi_width = options->roi_height = value; + // break; + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_X: + options->roi_x = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_XI_AEAG_ROI_OFFSET_Y: + options->roi_y = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_XI_AEAG_ROI_WIDTH: + options->roi_width = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_XI_AEAG_ROI_HEIGHT: + options->roi_height = value; + app->ApplyRoiSettings(); + break; + + case cv::CAP_PROP_FOURCC: + { + // Not implemented yet + + // char fourcc[4]; + // fourcc[0] = (char)((int)value & 0XFF); + // fourcc[1] = (char)(((int)value >> 8) & 0XFF); + // fourcc[2] = (char)(((int)value >> 16) & 0XFF); + // fourcc[3] = (char)(((int)value >> 24) & 0XFF); + // if(fourcc[0]=='M'&&fourcc[1]=='J'&&fourcc[2]=='P'&&fourcc[3]=='G'){ + + // } + // else if(fourcc[0]=='Y'&&fourcc[1]=='U'&&fourcc[2]=='Y'&&fourcc[3]=='V'){ + + // } + // else if(fourcc[0]=='R'&&fourcc[1]=='G'&&fourcc[2]=='B'&&fourcc[3]=='3'){ + // still_flags = LibcameraApp::FLAG_STILL_RGB; + // } + // else{ + // std::cerr << "Warning: FourCC code " << fourcc << " not supported." << std::endl; + // return false; + // } + // // needsReconfigure.store(true, std::memory_order_release); + break; + } + + case cv::CAP_PROP_FRAME_WIDTH: + options->video_width = options->photo_width = (int)value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_FRAME_HEIGHT: + options->video_height = options->photo_height = (int)value; + needsReconfigure.store(true, std::memory_order_release); + break; + + case cv::CAP_PROP_FPS: + options->framerate = (float)value; + needsReconfigure.store(true, std::memory_order_release); + break; + case cv::CAP_PROP_AUTOFOCUS: // Not implemented + case cv::CAP_PROP_BUFFERSIZE: // Not implemented + case cv::CAP_PROP_PAN: // Not implemented + case cv::CAP_PROP_TILT: // Not implemented + case cv::CAP_PROP_ROLL: // Not implemen ted + case cv::CAP_PROP_IRIS: // Not implemented + // These properties might need to trigger a re-configuration of the camera. + // You can handle them here if you want to support changing resolution or framerate on-the-fly. + // For now, we'll return false to indicate that these properties are not supported for dynamic changes. + std::cerr << "Warning: Property " << propId << " is not supported for dynamic changes." << std::endl; + return false; + + default: + std::cerr << "Warning: Unsupported property: " << propId << std::endl; + return false; + } + + // if (needsReconfigure) + // { + // if (isFramePending) + // { + // stopVideo(); + // startVideo(); + // } + // } + return true; + } + + bool LibcameraCapture::open(int _index) + { + cv::String name; + /* Select camera, or rather, V4L video source */ + if (_index < 0) // Asking for the first device available + { + for (int autoindex = 0; autoindex < 8; ++autoindex) // 8=MAX_CAMERAS + { + name = cv::format("/dev/video%d", autoindex); + /* Test using an open to see if this new device name really does exists. */ + int h = ::open(name.c_str(), O_RDONLY); + if (h != -1) + { + ::close(h); + _index = autoindex; + break; + } + } + if (_index < 0) + { + CV_LOG_WARNING(NULL, "VIDEOIO(Libcamera): can't find camera device"); + name.clear(); + return false; + } + } + else + { + name = cv::format("/dev/video%d", _index); + } + + bool res = open(name); + if (!res) + { + CV_LOG_WARNING(NULL, "VIDEOIO(Libcamera:" << name << "): can't open camera by index"); + } + return res; + } + + bool LibcameraCapture::open(const std::string &_deviceName) + { + CV_LOG_DEBUG(NULL, "VIDEOIO(Libcamera:" << _deviceName << "): opening..."); + // Some parameters initialization here, maybe more needed. + options->video_width = 1280; + options->video_height = 720; + options->framerate = 30; + options->verbose = true; + return startVideo(); + } + + bool LibcameraCapture::isOpened() const + { + return camerastarted; + } + + Ptr createLibcameraCapture_file(const std::string &filename) + { + auto ret = makePtr(); + if (ret->open(filename)) + return ret; + return NULL; + } + + Ptr createLibcameraCapture_cam(int index) + { + Ptr cap = makePtr(); + if (cap && cap->open(index)) + return cap; + return Ptr(); + } + +} // namespace \ No newline at end of file diff --git a/modules/videoio/src/cap_libcamera.hpp b/modules/videoio/src/cap_libcamera.hpp new file mode 100644 index 000000000000..faab9883f8b9 --- /dev/null +++ b/modules/videoio/src/cap_libcamera.hpp @@ -0,0 +1,532 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +// Forward declaration or include for IVideoCapture +namespace cv { + class IVideoCapture; +} + +#include "cap_interface.hpp" + +namespace cv +{ + + enum Exposure_Modes + { + EXPOSURE_NORMAL = libcamera::controls::ExposureNormal, + EXPOSURE_SHORT = libcamera::controls::ExposureShort, + EXPOSURE_CUSTOM = libcamera::controls::ExposureCustom + }; + + enum Metering_Modes + { + METERING_CENTRE = libcamera::controls::MeteringCentreWeighted, + METERING_SPOT = libcamera::controls::MeteringSpot, + METERING_MATRIX = libcamera::controls::MeteringMatrix, + METERING_CUSTOM = libcamera::controls::MeteringCustom + }; + + enum WhiteBalance_Modes + { + WB_AUTO = libcamera::controls::AwbAuto, + WB_NORMAL = libcamera::controls::AwbAuto, + WB_INCANDESCENT = libcamera::controls::AwbIncandescent, + WB_TUNGSTEN = libcamera::controls::AwbTungsten, + WB_FLUORESCENT = libcamera::controls::AwbFluorescent, + WB_INDOOR = libcamera::controls::AwbIndoor, + WB_DAYLIGHT = libcamera::controls::AwbDaylight, + WB_CLOUDY = libcamera::controls::AwbCloudy, + WB_CUSTOM = libcamera::controls::AwbAuto + }; + + class Options + { + public: + Options() + { + timeout = 1000; + metering_index = Metering_Modes::METERING_CENTRE; + exposure_index = Exposure_Modes::EXPOSURE_NORMAL; + awb_index = WhiteBalance_Modes::WB_AUTO; + saturation = 1.0f; + contrast = 1.0f; + sharpness = 1.0f; + brightness = 0.0f; + shutter = 0.0f; + gain = 0.0f; + ev = 0.0f; + roi_x = roi_y = roi_width = roi_height = 0; + awb_gain_r = awb_gain_b = 0; + denoise = "auto"; + verbose = false; + transform = libcamera::Transform::Identity; + camera = 0; + } + + ~Options() {} + + void setMetering(Metering_Modes meteringmode) { metering_index = meteringmode; } + void setWhiteBalance(WhiteBalance_Modes wb) { awb_index = wb; } + void setExposureMode(Exposure_Modes exp) { exposure_index = exp; } + + int getExposureMode() { return exposure_index; } + int getMeteringMode() { return metering_index; } + int getWhiteBalance() { return awb_index; } + + bool help; + bool version; + bool list_cameras; + bool verbose; + uint64_t timeout; // in ms + unsigned int photo_width, photo_height; + unsigned int video_width, video_height; + bool rawfull; + libcamera::Transform transform; + float roi_x, roi_y, roi_width, roi_height; + float shutter; + float gain; + float ev; + float awb_gain_r; + float awb_gain_b; + float brightness; + float contrast; + float saturation; + float sharpness; + float framerate; + std::string denoise; + std::string info_text; + unsigned int camera; + + protected: + int metering_index; + int exposure_index; + int awb_index; + + private: + }; + struct CompletedRequest; + using CompletedRequestPtr = std::shared_ptr; + + namespace controls = libcamera::controls; + namespace properties = libcamera::properties; + + class LibcameraApp + { + public: + using Stream = libcamera::Stream; + using FrameBuffer = libcamera::FrameBuffer; + using ControlList = libcamera::ControlList; + using Request = libcamera::Request; + using CameraManager = libcamera::CameraManager; + using Camera = libcamera::Camera; + using CameraConfiguration = libcamera::CameraConfiguration; + using FrameBufferAllocator = libcamera::FrameBufferAllocator; + using StreamRole = libcamera::StreamRole; + using StreamRoles = std::vector; + using PixelFormat = libcamera::PixelFormat; + using StreamConfiguration = libcamera::StreamConfiguration; + using BufferMap = Request::BufferMap; + using Size = libcamera::Size; + using Rectangle = libcamera::Rectangle; + enum class MsgType + { + RequestComplete, + Quit + }; + typedef void* MsgPayload; + struct Msg + { + Msg(MsgType const &t) : type(t), payload(nullptr) {} + + // Specialized constructor for CompletedRequestPtr + Msg(MsgType const &t, CompletedRequestPtr p) : type(t) + { + payload = new CompletedRequestPtr(std::move(p)); + } + + // Destructor to clean up allocated memory + ~Msg() + { + if (payload && type == MsgType::RequestComplete) { + delete static_cast(payload); + } + } + + // Copy constructor + Msg(const Msg& other) : type(other.type), payload(nullptr) + { + if (other.payload && other.type == MsgType::RequestComplete) { + CompletedRequestPtr* ptr = static_cast(other.payload); + payload = new CompletedRequestPtr(*ptr); + } + } + + // Move constructor + Msg(Msg&& other) noexcept : type(other.type), payload(other.payload) + { + other.payload = nullptr; + } + + // Copy assignment + Msg& operator=(const Msg& other) + { + if (this != &other) { + // Clean up current payload + if (payload && type == MsgType::RequestComplete) { + delete static_cast(payload); + } + + type = other.type; + payload = nullptr; + if (other.payload && other.type == MsgType::RequestComplete) { + CompletedRequestPtr* ptr = static_cast(other.payload); + payload = new CompletedRequestPtr(*ptr); + } + } + return *this; + } + + // Move assignment + Msg& operator=(Msg&& other) noexcept + { + if (this != &other) { + // Clean up current payload + if (payload && type == MsgType::RequestComplete) { + delete static_cast(payload); + } + + type = other.type; + payload = other.payload; + other.payload = nullptr; + } + return *this; + } + + MsgType type; + MsgPayload payload; + + // Helper to get CompletedRequestPtr back + CompletedRequestPtr getCompletedRequest() const + { + if (payload && type == MsgType::RequestComplete) { + CompletedRequestPtr* ptr = static_cast(payload); + return *ptr; + } + return nullptr; + } + }; + + // Some flags that can be used to give hints to the camera configuration. + static constexpr unsigned int FLAG_STILL_NONE = 0; + static constexpr unsigned int FLAG_STILL_BGR = 1; // supply BGR images, not YUV + static constexpr unsigned int FLAG_STILL_RGB = 2; // supply RGB images, not YUV + static constexpr unsigned int FLAG_STILL_RAW = 4; // request raw image stream + static constexpr unsigned int FLAG_STILL_DOUBLE_BUFFER = 8; // double-buffer stream + static constexpr unsigned int FLAG_STILL_TRIPLE_BUFFER = 16; // triple-buffer stream + static constexpr unsigned int FLAG_STILL_BUFFER_MASK = 24; // mask for buffer flags + + static constexpr unsigned int FLAG_VIDEO_NONE = 0; + static constexpr unsigned int FLAG_VIDEO_RAW = 1; // request raw image stream + static constexpr unsigned int FLAG_VIDEO_JPEG_COLOURSPACE = 2; // force JPEG colour space + + LibcameraApp(std::unique_ptr const opts = nullptr); + virtual ~LibcameraApp(); + + Options *GetOptions() const { return options_.get(); } + + std::string const &CameraId() const; + void OpenCamera(); + void CloseCamera(); + + void ConfigureStill(unsigned int flags = FLAG_STILL_NONE); + void ConfigureViewfinder(); + + void Teardown(); + void StartCamera(); + void StopCamera(); + + void ApplyRoiSettings(); + + Msg Wait(); + void PostMessage(MsgType &t, MsgPayload &p); + + Stream *GetStream(std::string const &name, unsigned int *w = nullptr, unsigned int *h = nullptr, + unsigned int *stride = nullptr) const; + Stream *ViewfinderStream(unsigned int *w = nullptr, unsigned int *h = nullptr, + unsigned int *stride = nullptr) const; + Stream *StillStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *RawStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *VideoStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *LoresStream(unsigned int *w = nullptr, unsigned int *h = nullptr, unsigned int *stride = nullptr) const; + Stream *GetMainStream() const; + + std::vector> Mmap(FrameBuffer *buffer) const; + + void SetControls(ControlList &controls); + void StreamDimensions(Stream const *stream, unsigned int *w, unsigned int *h, unsigned int *stride) const; + + protected: + std::unique_ptr options_; + + private: + static std::shared_ptr getCameraManager() + { + static std::shared_ptr camera_manager_; + if (!camera_manager_) + { + std::cerr << "creating manager" << std::endl; + camera_manager_ = std::make_shared(); + int ret = camera_manager_->start(); + if (ret) + throw std::runtime_error("camera manager failed to start," + "code " + + std::to_string(-ret)); + } + + return camera_manager_; + } + + template + class MessageQueue + { + public: + template + void Post(U &&msg) + { + std::unique_lock lock(mutex_); + queue_.push(std::forward(msg)); + cond_.notify_one(); + } + T Wait() + { + std::unique_lock lock(mutex_); + cond_.wait(lock, [this] + { return !queue_.empty(); }); + T msg = std::move(queue_.front()); + queue_.pop(); + return msg; + } + void Clear() + { + std::unique_lock lock(mutex_); + queue_ = {}; + } + + private: + std::queue queue_; + std::mutex mutex_; + std::condition_variable cond_; + }; + + void setupCapture(); + void makeRequests(); + void queueRequest(CompletedRequest *completed_request); + void requestComplete(Request *request); + void configureDenoise(const std::string &denoise_mode); + + // std::unique_ptr camera_manager_; + std::shared_ptr camera_; + bool camera_acquired_ = false; + std::unique_ptr configuration_; + std::map>> mapped_buffers_; + std::map streams_; + FrameBufferAllocator *allocator_ = nullptr; + std::map> frame_buffers_; + std::queue free_requests_; + std::vector> requests_; + std::mutex completed_requests_mutex_; + std::set completed_requests_; + bool camera_started_ = false; + std::mutex camera_stop_mutex_; + MessageQueue msg_queue_; + // For setting camera controls. + std::mutex control_mutex_; + ControlList controls_; + // Other: + uint64_t last_timestamp_; + uint64_t sequence_ = 0; + }; + + class Metadata + { + public: + Metadata() = default; + + Metadata(Metadata const &other) + { + std::lock_guard other_lock(other.mutex_); + data_ = other.data_; + } + + Metadata(Metadata &&other) + { + std::lock_guard other_lock(other.mutex_); + data_ = std::move(other.data_); + other.data_.clear(); + } + + template + void Set(std::string const &tag, T &&value) + { + std::lock_guard lock(mutex_); + T* stored_value = new T(std::forward(value)); + data_[tag] = static_cast(stored_value); + } + + template + int Get(std::string const &tag, T &value) const + { + std::lock_guard lock(mutex_); + auto it = data_.find(tag); + if (it == data_.end()) + return -1; + T* stored_value = static_cast(it->second); + if (stored_value) { + value = *stored_value; + return 0; + } + return -1; + } + + void Clear() + { + std::lock_guard lock(mutex_); + data_.clear(); + } + + Metadata &operator=(Metadata const &other) + { + if (this != &other) { + std::lock(mutex_, other.mutex_); + std::lock_guard lock1(mutex_, std::adopt_lock); + std::lock_guard lock2(other.mutex_, std::adopt_lock); + data_ = other.data_; + } + return *this; + } + + Metadata &operator=(Metadata &&other) + { + if (this != &other) { + std::lock(mutex_, other.mutex_); + std::lock_guard lock1(mutex_, std::adopt_lock); + std::lock_guard lock2(other.mutex_, std::adopt_lock); + data_ = std::move(other.data_); + other.data_.clear(); + } + return *this; + } + + void Merge(Metadata &other) + { + std::lock(mutex_, other.mutex_); + std::lock_guard lock1(mutex_, std::adopt_lock); + std::lock_guard lock2(other.mutex_, std::adopt_lock); + // For C++14 compatibility, manually merge + for (auto& item : other.data_) { + data_[item.first] = std::move(item.second); + } + other.data_.clear(); + } + + template + T *GetLocked(std::string const &tag) + { + // This allows in-place access to the Metadata contents, + // for which you should be holding the lock. + auto it = data_.find(tag); + if (it == data_.end()) + return nullptr; + return static_cast(it->second); + } + + template + void SetLocked(std::string const &tag, T &&value) + { + // Use this only if you're holding the lock yourself. + data_.insert_or_assign(tag, std::forward(value)); + } + + // Note: use of (lowercase) lock and unlock means you can create scoped + // locks with the standard lock classes. + // e.g. std::lock_guard lock(metadata) + void lock() { mutex_.lock(); } + void unlock() { mutex_.unlock(); } + + private: + mutable std::mutex mutex_; + std::map data_; + }; + + struct CompletedRequest + { + using BufferMap = libcamera::Request::BufferMap; + using ControlList = libcamera::ControlList; + using Request = libcamera::Request; + + CompletedRequest(unsigned int seq, Request *r) + : sequence(seq), buffers(r->buffers()), metadata(r->metadata()), request(r) + { + r->reuse(); + } + unsigned int sequence; + BufferMap buffers; + ControlList metadata; + Request *request; + float framerate; + Metadata post_process_metadata; + }; + + class LibcameraCapture : public IVideoCapture + { + public: + LibcameraCapture(); + LibcameraCapture(int camera_index); + virtual ~LibcameraCapture(); + + bool startVideo(); + void stopVideo(); + + bool open(int _index); + bool open(const std::string &filename); + + virtual bool grabFrame() CV_OVERRIDE; + virtual bool retrieveFrame(int stream_idx, OutputArray dst) CV_OVERRIDE; + virtual double getProperty(int propId) const CV_OVERRIDE; + virtual bool setProperty(int propId, double value) CV_OVERRIDE; + virtual bool isOpened() const CV_OVERRIDE; + virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_LIBCAMERA; } + + // Additional convenience methods for plugin + bool grab() { return grabFrame(); } + bool retrieve(cv::Mat& frame, int stream_idx = 0); + + protected: + LibcameraApp *app; + Options *options; + unsigned int still_flags; + unsigned int vw, vh, vstr; + std::atomic needsReconfigure; + bool camerastarted; + + // Store the current completed request for retrieve + CompletedRequestPtr current_request_; + std::mutex request_mutex_; + }; + +}; \ No newline at end of file diff --git a/modules/videoio/src/cap_libcamera_plugin.cpp b/modules/videoio/src/cap_libcamera_plugin.cpp new file mode 100644 index 000000000000..5310f5c1837f --- /dev/null +++ b/modules/videoio/src/cap_libcamera_plugin.cpp @@ -0,0 +1,292 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#define CAPTURE_ABI_VERSION 1 +#define CAPTURE_API_VERSION 2 + +#include "plugin_capture_api.hpp" +#include "cap_libcamera.hpp" + +#include +#include +#include + +using namespace cv; + +namespace { + +struct LibcameraPluginCapture +{ + Ptr capture; + + LibcameraPluginCapture() = default; + ~LibcameraPluginCapture() = default; +}; + +} + +static CvResult CV_API_CALL libcamera_capture_open(const char* /*filename*/, int camera_index, CV_OUT CvPluginCapture* handle) +{ + if (!handle) + return CV_ERROR_FAIL; + + *handle = nullptr; + + try + { + LibcameraPluginCapture* instance = new LibcameraPluginCapture(); + instance->capture = makePtr(camera_index); + + if (!instance->capture || !instance->capture->isOpened()) + { + delete instance; + return CV_ERROR_FAIL; + } + + *handle = reinterpret_cast(instance); + return CV_ERROR_OK; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to open capture: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to open capture: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_open_with_params( + const char* /*filename*/, int camera_index, + int* params, unsigned n_params, + CV_OUT CvPluginCapture* handle) +{ + if (!handle) + return CV_ERROR_FAIL; + + *handle = nullptr; + + try + { + LibcameraPluginCapture* instance = new LibcameraPluginCapture(); + instance->capture = makePtr(camera_index); + + if (!instance->capture || !instance->capture->isOpened()) + { + delete instance; + return CV_ERROR_FAIL; + } + + // Apply parameters + for (unsigned i = 0; i < n_params; ++i) + { + int prop = params[2*i]; + double val = static_cast(params[2*i + 1]); + instance->capture->setProperty(prop, val); + } + + *handle = reinterpret_cast(instance); + return CV_ERROR_OK; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to open capture with params: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to open capture with params: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_release(CvPluginCapture handle) +{ + if (!handle) + return CV_ERROR_FAIL; + + try + { + LibcameraPluginCapture* instance = reinterpret_cast(handle); + delete instance; + return CV_ERROR_OK; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to release capture: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to release capture: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_get_property(CvPluginCapture handle, int prop, CV_OUT double* val) +{ + if (!handle || !val) + return CV_ERROR_FAIL; + + try + { + LibcameraPluginCapture* instance = reinterpret_cast(handle); + if (!instance->capture) + return CV_ERROR_FAIL; + + *val = instance->capture->getProperty(prop); + return CV_ERROR_OK; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to get property: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to get property: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_set_property(CvPluginCapture handle, int prop, double val) +{ + if (!handle) + return CV_ERROR_FAIL; + + try + { + LibcameraPluginCapture* instance = reinterpret_cast(handle); + if (!instance->capture) + return CV_ERROR_FAIL; + + bool result = instance->capture->setProperty(prop, val); + return result ? CV_ERROR_OK : CV_ERROR_FAIL; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to set property: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to set property: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_grab(CvPluginCapture handle) +{ + if (!handle) + return CV_ERROR_FAIL; + + try + { + LibcameraPluginCapture* instance = reinterpret_cast(handle); + if (!instance->capture) + return CV_ERROR_FAIL; + + bool result = instance->capture->grab(); + return result ? CV_ERROR_OK : CV_ERROR_FAIL; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to grab frame: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to grab frame: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_capture_retrieve_cb_t callback, void* userdata) +{ + if (!handle || !callback) + return CV_ERROR_FAIL; + + try + { + LibcameraPluginCapture* instance = reinterpret_cast(handle); + if (!instance->capture) + return CV_ERROR_FAIL; + + cv::Mat frame; + bool result = instance->capture->retrieve(frame, stream_idx); + if (!result || frame.empty()) + return CV_ERROR_FAIL; + + // Call the callback with frame data + int type = frame.type(); + CvResult res = callback(stream_idx, frame.ptr(), static_cast(frame.step[0]), + frame.cols, frame.rows, type, userdata); + + return res; + } + catch (const std::exception& e) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to retrieve frame: " << e.what()); + return CV_ERROR_FAIL; + } + catch (...) + { + CV_LOG_ERROR(NULL, "Libcamera plugin: Failed to retrieve frame: unknown exception"); + return CV_ERROR_FAIL; + } +} + +static CvResult CV_API_CALL libcamera_capture_open_stream( + void* /*opaque*/, + long long(* /*read*/)(void* opaque, char* buffer, long long size), + long long(* /*seek*/)(void* opaque, long long offset, int way), + int* /*params*/, unsigned /*n_params*/, + CV_OUT CvPluginCapture* /*handle*/) +{ + // Libcamera doesn't support stream-based input + return CV_ERROR_FAIL; +} + +static const OpenCV_VideoIO_Capture_Plugin_API plugin_api = +{ + { + sizeof(OpenCV_VideoIO_Capture_Plugin_API), // size + CAPTURE_ABI_VERSION, // api_abi_version + CAPTURE_API_VERSION, // api_api_version + CV_VERSION_MAJOR, // opencv_version_major + CV_VERSION_MINOR, // opencv_version_minor + CV_VERSION_REVISION, // opencv_version_patch + CV_VERSION_STATUS, // opencv_version_status + "libcamera OpenCV plugin v1.2" // api_description + }, + { + CAP_LIBCAMERA, // id + libcamera_capture_open, + libcamera_capture_release, + libcamera_capture_get_property, + libcamera_capture_set_property, + libcamera_capture_grab, + libcamera_capture_retrieve, + }, + { + libcamera_capture_open_with_params, + }, + { + libcamera_capture_open_stream, + } +}; + +extern "C" { + +CV_PLUGIN_EXPORTS +const OpenCV_VideoIO_Capture_Plugin_API* CV_API_CALL opencv_videoio_capture_plugin_init_v1(int requested_abi_version, int requested_api_version, void* /*reserved*/) CV_NOEXCEPT +{ + if (requested_abi_version == CAPTURE_ABI_VERSION && requested_api_version <= CAPTURE_API_VERSION) + return reinterpret_cast(&plugin_api); + return NULL; +} + +} // extern "C" \ No newline at end of file diff --git a/modules/videoio/src/videoio_registry.cpp b/modules/videoio/src/videoio_registry.cpp index a84258ad9059..af0ae4375d07 100644 --- a/modules/videoio/src/videoio_registry.cpp +++ b/modules/videoio/src/videoio_registry.cpp @@ -105,6 +105,12 @@ static const struct VideoBackendInfo builtin_backends[] = DECLARE_STATIC_BACKEND(CAP_DSHOW, "DSHOW", MODE_CAPTURE_BY_INDEX, 0, create_DShow_capture, 0) #endif +#ifdef HAVE_LIBCAMERA + DECLARE_STATIC_BACKEND(CAP_LIBCAMERA, "LIBCAMERA", MODE_CAPTURE_ALL, createLibcameraCapture_file, createLibcameraCapture_cam, 0) +#elif defined(ENABLE_PLUGINS) + DECLARE_DYNAMIC_BACKEND(CAP_LIBCAMERA, "LIBCAMERA", MODE_CAPTURE_ALL) +#endif + // Linux, some Unix #if defined HAVE_CAMV4L2 DECLARE_STATIC_BACKEND(CAP_V4L2, "V4L2", MODE_CAPTURE_ALL, create_V4L_capture_file, create_V4L_capture_cam, 0) diff --git a/test/detect_backend.cpp b/test/detect_backend.cpp new file mode 100644 index 000000000000..be6e78c68629 --- /dev/null +++ b/test/detect_backend.cpp @@ -0,0 +1,60 @@ +// Running method: +// g++ -std=c++17 -o detect_backend detect_backend.cpp $(pkg-config --cflags --libs opencv4) && ./detect_backend + +#include +#include +#include +#include +#include +#include + +int main() { + std::cout << "=== Direct Test of OpenCV LIBCAMERA Backend ===" << std::endl; + + // --- Step 1: Open --- + std::cout << "\nAttempting to open camera with cv::CAP_LIBCAMERA..." << std::endl; + cv::VideoCapture cap(0, cv::CAP_LIBCAMERA); + + // --- Step 2: Check if the camera was opened successfully --- + if (!cap.isOpened()) { + std::cout << "❌ FAILED: cap.isOpened() returned false." << std::endl; + std::cout << " This means OpenCV could not open the camera using the LIBCAMERA backend." << std::endl; + std::cout << " Possible reasons: Plugin not found, camera not connected, permissions error, etc." << std::endl; + return -1; + } + + // --- Step 3: Check the actual backend used --- + std::string backend_name = cap.getBackendName(); + std::cout << "✅ SUCCESS: cap.isOpened() returned true." << std::endl; + std::cout << " Actual backend in use: " << backend_name << std::endl; + + if (backend_name == "LIBCAMERA") { + std::cout << " ✅ VERIFIED: The LIBCAMERA backend is confirmed to be working!" << std::endl; + } else { + std::cout << " ⚠️ WARNING: A camera was opened, but it used the '" << backend_name << "' backend, not LIBCAMERA." << std::endl; + std::cout << " This means the LIBCAMERA plugin is likely not working correctly or has a lower priority." << std::endl; + cap.release(); + return -1; + } + + std::cout << "\nAttempting to capture a single frame..." << std::endl; + cv::Mat frame; + + // 热身用,让自动曝光正常运行 + // Warming up to allow auto-exposure to stabilize + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + if (cap.read(frame) && !frame.empty()) { + std::cout << "✅ SUCCESS: A frame was captured." << std::endl; + std::cout << " Frame size: " << frame.cols << "x" << frame.rows << std::endl; + cv::imwrite("libcamera_test_frame.jpg", frame); + std::cout << " Frame saved as 'libcamera_test_frame.jpg'." << std::endl; + } else { + std::cout << "❌ FAILED: Could not read a valid frame from the camera." << std::endl; + cap.release(); + return -1; + } + + cap.release(); + return 0; +} \ No newline at end of file diff --git a/test/test_libcamera2.cpp b/test/test_libcamera2.cpp new file mode 100644 index 000000000000..542ef9b271a5 --- /dev/null +++ b/test/test_libcamera2.cpp @@ -0,0 +1,446 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// 测试结果结构 +struct TestResult { + std::string test_name; + bool passed; + std::string details; + double measured_value = -1; +}; + +// 图像质量分析 +struct ImageQuality { + double mean_brightness; + double std_brightness; + double mean_contrast; + cv::Size resolution; +}; + +class LibcameraDetailedTester { +private: + cv::VideoCapture cap; + std::vector results; + int test_counter = 0; + std::string result_dir = "test_results"; // 默认目录 + + void logTest(const std::string& name, bool passed, const std::string& details = "", double value = -1) { + TestResult result; + result.test_name = name; + result.passed = passed; + result.details = details; + result.measured_value = value; + results.push_back(result); + + std::cout << "[" << std::setw(2) << ++test_counter << "] " + << (passed ? "✅" : "❌") << " " + << std::setw(30) << std::left << name + << " | " << details; + if (value >= 0) std::cout << " (value: " << value << ")"; + std::cout << std::endl; + } + + ImageQuality analyzeImage(const cv::Mat& frame) { + ImageQuality quality; + quality.resolution = frame.size(); + + // 转换为灰度图分析亮度 + cv::Mat gray; + cv::cvtColor(frame, gray, cv::COLOR_RGB2GRAY); + + cv::Scalar mean, stddev; + cv::meanStdDev(gray, mean, stddev); + quality.mean_brightness = mean[0]; + quality.std_brightness = stddev[0]; + + // 计算对比度 + cv::Mat laplacian; + cv::Laplacian(gray, laplacian, CV_64F); + cv::Scalar contrast_scalar = cv::mean(cv::abs(laplacian)); + quality.mean_contrast = contrast_scalar[0]; + + return quality; + } + + bool captureTestFrames(const std::string& test_name, int num_frames = 5) { + std::vector frames; + auto start_time = std::chrono::high_resolution_clock::now(); + + for (int i = 0; i < num_frames; i++) { + cv::Mat frame; + cap >> frame; + + if (frame.empty()) { + logTest(test_name + " - Frame Capture", false, "Frame " + std::to_string(i) + " is empty"); + return false; + } + + frames.push_back(frame.clone()); + + // 保存样本帧 + if (i == 0 || i == num_frames-1) { + std::string filename = result_dir + "/test_" + test_name + "_frame_" + std::to_string(i) + ".jpg"; + cv::imwrite(filename, frame); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + double actual_fps = (num_frames * 1000.0) / duration.count(); + + // 分析最后一帧的质量 + ImageQuality quality = analyzeImage(frames.back()); + + logTest(test_name + " - Frame Capture", true, + "Resolution: " + std::to_string(quality.resolution.width) + "x" + std::to_string(quality.resolution.height) + + ", Brightness: " + std::to_string((int)quality.mean_brightness) + + ", Contrast: " + std::to_string((int)quality.mean_contrast), actual_fps); + + return true; + } + +public: + // 构造函数,设置结果目录 + LibcameraDetailedTester(const std::string& output_dir = "test_results") : result_dir(output_dir) {} + + bool initialize() { + cap.open(0); + if (!cap.isOpened()) { + logTest("Camera Initialization", false, "Failed to open camera"); + return false; + } + + std::string backend = cap.getBackendName(); + bool is_libcamera = (backend == "LIBCAMERA"); + logTest("Backend Detection", is_libcamera, "Using: " + backend); + + return is_libcamera; + } + + void testResolutions() { + std::cout << "\n🔧 Testing Resolution Settings...\n"; + + std::vector> resolutions = { + {640, 480}, + {1280, 720}, + {1920, 1080} + }; + + for (auto res : resolutions) { + bool width_set = cap.set(cv::CAP_PROP_FRAME_WIDTH, res.first); + bool height_set = cap.set(cv::CAP_PROP_FRAME_HEIGHT, res.second); + + std::this_thread::sleep_for(std::chrono::milliseconds(500)); // 等待配置生效 + + double actual_width = cap.get(cv::CAP_PROP_FRAME_WIDTH); + double actual_height = cap.get(cv::CAP_PROP_FRAME_HEIGHT); + + std::string test_name = "Resolution_" + std::to_string(res.first) + "x" + std::to_string(res.second); + std::string details = "Set: " + std::to_string(res.first) + "x" + std::to_string(res.second) + + " | Got: " + std::to_string((int)actual_width) + "x" + std::to_string((int)actual_height); + + logTest(test_name + " - Setting", width_set && height_set, details); + + if (width_set && height_set) { + captureTestFrames(test_name); + } + } + } + + void testFramerates() { + std::cout << "\n🎬 Testing Framerate Settings...\n"; + + // 先设置一个标准分辨率 + cap.set(cv::CAP_PROP_FRAME_WIDTH, 640); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + std::vector framerates = {15, 30, 60}; + + for (int fps : framerates) { + bool fps_set = cap.set(cv::CAP_PROP_FPS, fps); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + double actual_fps = cap.get(cv::CAP_PROP_FPS); + + std::string test_name = "Framerate_" + std::to_string(fps) + "fps"; + std::string details = "Set: " + std::to_string(fps) + " | Got: " + std::to_string((int)actual_fps); + + logTest(test_name + " - Setting", fps_set, details); + + if (fps_set) { + captureTestFrames(test_name); + } + } + } + + void testImageQualitySettings() { + std::cout << "\n🔆 Testing Image Quality Settings...\n"; + + // 设置标准配置 + cap.set(cv::CAP_PROP_FRAME_WIDTH, 640); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480); + cap.set(cv::CAP_PROP_FPS, 30); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + std::map> quality_settings = { + {"Brightness", {0.0, 0.5, 1.0}}, + {"Contrast", {0.5, 1.0, 1.5}}, + {"Saturation", {0.5, 1.0, 1.5}}, + {"Sharpness", {0.0, 0.5, 1.0}} + }; + + std::map property_map = { + {"Brightness", cv::CAP_PROP_BRIGHTNESS}, + {"Contrast", cv::CAP_PROP_CONTRAST}, + {"Saturation", cv::CAP_PROP_SATURATION}, + {"Sharpness", cv::CAP_PROP_SHARPNESS} + }; + + for (auto& setting : quality_settings) { + std::string property_name = setting.first; + int property_id = property_map[property_name]; + + for (double value : setting.second) { + bool prop_set = cap.set(property_id, value); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); + + double actual_value = cap.get(property_id); + + std::string test_name = property_name + "_" + std::to_string(value); + std::string details = "Set: " + std::to_string(value) + " | Got: " + std::to_string(actual_value); + + logTest(test_name + " - Setting", prop_set, details); + + if (prop_set) { + captureTestFrames(test_name, 3); + } + } + } + } + + void testExposureControl() { + std::cout << "\n🌟 Testing Exposure Control...\n"; + + // 测试自动曝光 + bool auto_exp_on = cap.set(cv::CAP_PROP_AUTO_EXPOSURE, 1); + logTest("Auto Exposure ON", auto_exp_on, "Auto exposure enabled"); + if (auto_exp_on) captureTestFrames("AutoExposure_ON", 3); + + bool auto_exp_off = cap.set(cv::CAP_PROP_AUTO_EXPOSURE, 0); + logTest("Auto Exposure OFF", auto_exp_off, "Auto exposure disabled"); + + // 测试手动曝光 + if (auto_exp_off) { + std::vector exposure_values = {0.2, 0.5, 0.8}; + for (double exp : exposure_values) { + bool exp_set = cap.set(cv::CAP_PROP_EXPOSURE, exp); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + double actual_exp = cap.get(cv::CAP_PROP_EXPOSURE); + + std::string test_name = "Manual_Exposure_" + std::to_string(exp); + std::string details = "Set: " + std::to_string(exp) + " | Got: " + std::to_string(actual_exp); + + logTest(test_name, exp_set, details); + if (exp_set) captureTestFrames(test_name, 3); + } + } + } + + void testWhiteBalance() { + std::cout << "\n🌡️ Testing White Balance...\n"; + + // 测试自动白平衡 + bool auto_wb_on = cap.set(cv::CAP_PROP_AUTO_WB, 1); + logTest("Auto White Balance ON", auto_wb_on, "Auto WB enabled"); + if (auto_wb_on) captureTestFrames("AutoWB_ON", 3); + + bool auto_wb_off = cap.set(cv::CAP_PROP_AUTO_WB, 0); + logTest("Auto White Balance OFF", auto_wb_off, "Auto WB disabled"); + + // 测试色温设置 + if (auto_wb_off) { + std::vector wb_temps = {3000, 5000, 7000}; + for (int temp : wb_temps) { + bool temp_set = cap.set(cv::CAP_PROP_WB_TEMPERATURE, temp); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + double actual_temp = cap.get(cv::CAP_PROP_WB_TEMPERATURE); + + std::string test_name = "WB_Temperature_" + std::to_string(temp) + "K"; + std::string details = "Set: " + std::to_string(temp) + "K | Got: " + std::to_string((int)actual_temp) + "K"; + + logTest(test_name, temp_set, details); + if (temp_set) captureTestFrames(test_name, 3); + } + } + } + + void testROI() { + std::cout << "\n📐 Testing ROI (Region of Interest)...\n"; + + // 设置标准配置 + cap.set(cv::CAP_PROP_FRAME_WIDTH, 1280); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, 720); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + std::vector> roi_settings = { + {0.0, 0.0, 1.0, 1.0}, // 全画面 + {0.25, 0.25, 0.5, 0.5}, // 中央1/4 + {0.0, 0.0, 0.5, 0.5} // 左上1/4 + }; + + for (auto roi : roi_settings) { + double x = std::get<0>(roi); + double y = std::get<1>(roi); + double w = std::get<2>(roi); + double h = std::get<3>(roi); + + bool roi_x_set = cap.set(cv::CAP_PROP_XI_AEAG_ROI_OFFSET_X, x); + bool roi_y_set = cap.set(cv::CAP_PROP_XI_AEAG_ROI_OFFSET_Y, y); + bool roi_w_set = cap.set(cv::CAP_PROP_XI_AEAG_ROI_WIDTH, w); + bool roi_h_set = cap.set(cv::CAP_PROP_XI_AEAG_ROI_HEIGHT, h); + + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + std::string test_name = "ROI_" + std::to_string((int)(x*100)) + "_" + std::to_string((int)(y*100)) + + "_" + std::to_string((int)(w*100)) + "_" + std::to_string((int)(h*100)); + std::string details = "X:" + std::to_string(x) + " Y:" + std::to_string(y) + + " W:" + std::to_string(w) + " H:" + std::to_string(h); + + bool roi_set = roi_x_set && roi_y_set && roi_w_set && roi_h_set; + logTest(test_name, roi_set, details); + + if (roi_set) { + captureTestFrames(test_name, 3); + } + } + } + + void performanceTest() { + std::cout << "\n⚡ Performance & Stability Test...\n"; + + // 设置高分辨率高帧率 + cap.set(cv::CAP_PROP_FRAME_WIDTH, 1280); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, 720); + cap.set(cv::CAP_PROP_FPS, 30); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + + int total_frames = 100; + int captured_frames = 0; + int empty_frames = 0; + + auto start_time = std::chrono::high_resolution_clock::now(); + + for (int i = 0; i < total_frames; i++) { + cv::Mat frame; + cap >> frame; + + if (frame.empty()) { + empty_frames++; + } else { + captured_frames++; + + // 保存一些样本帧 + if (i % 20 == 0) { + std::string filename = result_dir + "/performance_frame_" + std::to_string(i) + ".jpg"; + cv::imwrite(filename, frame); + } + } + } + + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + double actual_fps = (captured_frames * 1000.0) / duration.count(); + double success_rate = (double)captured_frames / total_frames * 100.0; + + std::string details = "Captured: " + std::to_string(captured_frames) + "/" + std::to_string(total_frames) + + " (Success: " + std::to_string((int)success_rate) + "%) | " + + "Empty: " + std::to_string(empty_frames); + + logTest("Performance Test", success_rate >= 95.0, details, actual_fps); + } + + void generateReport() { + std::cout << "\n📊 Test Summary Report\n"; + std::cout << "========================\n"; + + int passed = 0, total = 0; + for (const auto& result : results) { + total++; + if (result.passed) passed++; + } + + std::cout << "Overall Result: " << passed << "/" << total << " tests passed (" + << std::fixed << std::setprecision(1) << (100.0 * passed / total) << "%)\n\n"; + + // 保存详细报告到文件 + std::ofstream report(result_dir + "/libcamera_test_report.txt"); + report << "LibCamera Detailed Test Report\n"; + report << "Generated: " << std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()).count() << "\n\n"; + + for (const auto& result : results) { + report << (result.passed ? "[PASS]" : "[FAIL]") << " " + << result.test_name << ": " << result.details; + if (result.measured_value >= 0) { + report << " (Measured: " << result.measured_value << ")"; + } + report << "\n"; + } + + report.close(); + std::cout << "📄 Detailed report saved to: " << result_dir << "/libcamera_test_report.txt\n"; + } + + void runAllTests() { + std::cout << "🚀 Starting LibCamera Detailed Testing...\n"; + std::cout << "==========================================\n"; + + // 创建结果目录 + std::string mkdir_cmd = "mkdir -p " + result_dir; + system(mkdir_cmd.c_str()); + std::cout << "📁 Results will be saved to: " << result_dir << "/\n\n"; + + if (!initialize()) { + std::cout << "❌ Initialization failed. Exiting.\n"; + return; + } + + testResolutions(); + testFramerates(); + testImageQualitySettings(); + testExposureControl(); + testWhiteBalance(); + testROI(); + performanceTest(); + + cap.release(); + generateReport(); + + std::cout << "\n🎉 Testing completed!\n"; + } +}; + +int main() { + // 设置测试结果目录 + std::string test_result_dir = "libcamera_test_results"; + + std::cout << "📋 LibCamera Testing Suite\n"; + std::cout << "==========================\n"; + std::cout << "Test results will be saved to: " << test_result_dir << "/\n\n"; + + LibcameraDetailedTester tester(test_result_dir); + tester.runAllTests(); + return 0; +} \ No newline at end of file pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy