322 lines
10 KiB
C++
322 lines
10 KiB
C++
#include "pipeline.hpp"
|
|
#include "logger.hpp"
|
|
#include <chrono>
|
|
#include <iomanip>
|
|
#include <filesystem>
|
|
|
|
namespace pipeline {
|
|
|
|
namespace {
|
|
// 将RenderConfig转换为RendererConfig
|
|
renderer::RendererConfig convertToRendererConfig(const RenderConfig& config) {
|
|
renderer::RendererConfig renderer_config;
|
|
|
|
// 转换渲染启用状态
|
|
renderer_config.enable = config.enable;
|
|
|
|
// 转换窗口配置
|
|
renderer_config.window_name = config.window.name;
|
|
renderer_config.window_width = config.window.width;
|
|
renderer_config.window_height = config.window.height;
|
|
renderer_config.fullscreen = config.window.fullscreen;
|
|
|
|
// 转换默认样式
|
|
auto convertClassStyle = [](const RenderConfig::ClassStyle& src) {
|
|
renderer::RendererConfig::ClassStyle dst;
|
|
dst.box_color = src.box_color;
|
|
dst.text_color = src.text_color;
|
|
dst.transparency = src.transparency;
|
|
dst.box_thickness = src.box_thickness;
|
|
dst.font_scale = src.font_scale;
|
|
dst.font_thickness = src.font_thickness;
|
|
return dst;
|
|
};
|
|
|
|
// 转换默认样式
|
|
renderer_config.default_style = convertClassStyle(config.default_style);
|
|
|
|
// 转换类别样式映射
|
|
for (const auto& [class_name, style] : config.class_styles) {
|
|
renderer_config.class_styles[class_name] = convertClassStyle(style);
|
|
}
|
|
|
|
// 转换性能指标配置
|
|
renderer_config.metrics.show_fps = config.metrics.show_fps;
|
|
renderer_config.metrics.show_inference_time = config.metrics.show_inference_time;
|
|
renderer_config.metrics.show_gpu_usage = config.metrics.show_gpu_usage;
|
|
renderer_config.metrics.update_interval_ms = config.metrics.update_interval_ms;
|
|
|
|
return renderer_config;
|
|
}
|
|
|
|
// 将ModelConfig转换为InferenceConfig
|
|
InferenceConfig convertToInferenceConfig(const ModelConfig& config) {
|
|
InferenceConfig inference_config;
|
|
|
|
// 转换模型配置
|
|
inference_config.model.engine_path = config.engine_path;
|
|
inference_config.model.input_shape = config.input_shape;
|
|
inference_config.model.precision = config.precision;
|
|
|
|
// 转换阈值配置
|
|
inference_config.threshold.conf = config.threshold.conf;
|
|
inference_config.threshold.nms = config.threshold.nms;
|
|
|
|
// 转换其他配置
|
|
inference_config.gpu_id = config.gpu_id;
|
|
|
|
return inference_config;
|
|
}
|
|
|
|
// 将推理结果转换为渲染器可用的格式
|
|
std::vector<renderer::DetectionResult> convertToRendererResults(const std::vector<DetectionResult>& results) {
|
|
std::vector<renderer::DetectionResult> renderer_results;
|
|
renderer_results.reserve(results.size());
|
|
|
|
for (const auto& result : results) {
|
|
for (size_t i = 0; i < result.boxes.size(); ++i) {
|
|
renderer::DetectionResult renderer_result;
|
|
const auto& box = result.boxes[i];
|
|
|
|
// 转换边界框
|
|
renderer_result.bbox = cv::Rect(
|
|
static_cast<int>(box.x1),
|
|
static_cast<int>(box.y1),
|
|
static_cast<int>(box.x2 - box.x1),
|
|
static_cast<int>(box.y2 - box.y1)
|
|
);
|
|
|
|
// 转换其他属性
|
|
renderer_result.confidence = box.score;
|
|
renderer_result.class_id = box.class_id;
|
|
renderer_result.label = result.labels[i];
|
|
|
|
renderer_results.push_back(renderer_result);
|
|
}
|
|
}
|
|
|
|
return renderer_results;
|
|
}
|
|
} // namespace
|
|
|
|
Pipeline::Pipeline(const std::string& config_file, bool test_mode)
|
|
: config_file_(config_file)
|
|
, test_mode_(test_mode) {
|
|
config_parser_ = createConfigParser();
|
|
}
|
|
|
|
Pipeline::~Pipeline() {
|
|
stop();
|
|
wait();
|
|
}
|
|
|
|
bool Pipeline::init() {
|
|
if (initialized_) {
|
|
Logger::warning("Pipeline already initialized");
|
|
return true;
|
|
}
|
|
|
|
// 检查配置文件是否存在
|
|
if (!std::filesystem::exists(config_file_)) {
|
|
Logger::error("Config file not found: " + config_file_);
|
|
return false;
|
|
}
|
|
|
|
// 解析配置文件
|
|
if (!config_parser_->parse(config_file_)) {
|
|
Logger::error("Failed to parse config file: " + config_file_);
|
|
return false;
|
|
}
|
|
config_ = config_parser_->getConfig();
|
|
|
|
// 初始化输入管理器
|
|
input_manager_ = std::make_unique<InputManager>(config_.input.max_batch_size);
|
|
output_manager_ = std::make_unique<OutputManager>();
|
|
|
|
// 初始化输出目标
|
|
for (const auto& target : config_.output.targets) {
|
|
if (!output_manager_->addTarget(target)) {
|
|
Logger::error("Failed to add output target: " + target.name);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// 初始化输入源
|
|
for (const auto& source : config_.input.sources) {
|
|
if (source.type == "rtsp") {
|
|
// 添加RTSP源
|
|
RtspReader::Config rtsp_config;
|
|
rtsp_config.buffer_size = source.buffer_size;
|
|
if (!input_manager_->addSource(source.name, rtsp_config, source.url)) {
|
|
Logger::error("Failed to add input source: " + source.name);
|
|
return false;
|
|
}
|
|
} else if (source.type == "video") {
|
|
// 添加视频文件源
|
|
VideoReader::Config video_config;
|
|
video_config.buffer_size = source.buffer_size;
|
|
video_config.loop_playback = true; // 循环播放以保持流式处理
|
|
if (!input_manager_->addVideoSource(source.name, video_config, source.url)) {
|
|
Logger::error("Failed to add input source: " + source.name);
|
|
return false;
|
|
}
|
|
} else {
|
|
Logger::error("Unsupported input source type: " + source.type);
|
|
return false;
|
|
}
|
|
|
|
// 设置输入源到输出目标的映射
|
|
if (!source.outputs.empty()) {
|
|
if (!output_manager_->addSourceTargetMapping(source.name, source.outputs)) {
|
|
Logger::error("Failed to set output mapping for source: " + source.name);
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
|
|
// 在测试模式下,跳过推理引擎初始化
|
|
if (!test_mode_) {
|
|
// 初始化推理引擎,使用转换后的配置
|
|
auto inference_config = convertToInferenceConfig(config_.inference);
|
|
inference_engine_ = std::make_unique<TrtInference>(inference_config);
|
|
if (!inference_engine_->loadEngine()) {
|
|
Logger::error("Failed to load inference engine");
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// 初始化渲染器,使用转换后的配置
|
|
renderer_ = std::make_unique<Renderer>();
|
|
auto renderer_config = convertToRendererConfig(config_.render);
|
|
renderer_config.test_mode = test_mode_; // 设置测试模式
|
|
if (!renderer_->init(renderer_config)) {
|
|
Logger::error("Failed to initialize renderer");
|
|
return false;
|
|
}
|
|
|
|
initialized_ = true;
|
|
Logger::info("Pipeline initialized successfully");
|
|
return true;
|
|
}
|
|
|
|
bool Pipeline::start() {
|
|
if (!initialized_) {
|
|
Logger::error("Pipeline not initialized");
|
|
return false;
|
|
}
|
|
|
|
if (running_) {
|
|
Logger::warning("Pipeline already running");
|
|
return true;
|
|
}
|
|
|
|
running_ = true;
|
|
pipeline_thread_ = std::make_unique<std::thread>(&Pipeline::mainLoop, this);
|
|
Logger::info("Pipeline started");
|
|
return true;
|
|
}
|
|
|
|
void Pipeline::stop() {
|
|
if (running_) {
|
|
running_ = false;
|
|
Logger::info("Pipeline stopping...");
|
|
}
|
|
}
|
|
|
|
void Pipeline::wait() {
|
|
if (pipeline_thread_ && pipeline_thread_->joinable()) {
|
|
pipeline_thread_->join();
|
|
Logger::info("Pipeline stopped");
|
|
}
|
|
}
|
|
|
|
bool Pipeline::getMetrics(PerformanceMetrics& metrics) const {
|
|
if (!running_) {
|
|
Logger::error("Cannot get metrics: Pipeline is not running");
|
|
return false;
|
|
}
|
|
|
|
std::lock_guard<std::mutex> lock(metrics_mutex_);
|
|
metrics = current_metrics_;
|
|
return true;
|
|
}
|
|
|
|
void Pipeline::mainLoop() {
|
|
std::vector<cv::Mat> batch_frames;
|
|
std::vector<DetectionResult> batch_results;
|
|
auto start_time = std::chrono::steady_clock::now();
|
|
|
|
while (running_) {
|
|
// 获取一批帧
|
|
if (!input_manager_->getNextBatch(batch_frames)) {
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
|
continue;
|
|
}
|
|
|
|
// 在测试模式下,跳过推理
|
|
if (!test_mode_) {
|
|
// 执行推理
|
|
if (!inference_engine_->infer(batch_frames, batch_results)) {
|
|
Logger::warning("Inference failed");
|
|
continue;
|
|
}
|
|
} else {
|
|
// 在测试模式下,生成空的检测结果
|
|
batch_results.clear();
|
|
for (size_t i = 0; i < batch_frames.size(); ++i) {
|
|
batch_results.emplace_back();
|
|
}
|
|
}
|
|
|
|
// 计算推理时间
|
|
auto end_time = std::chrono::steady_clock::now();
|
|
float inference_time = std::chrono::duration<float, std::milli>(end_time - start_time).count();
|
|
start_time = end_time;
|
|
|
|
// 更新性能指标
|
|
updateMetrics(inference_time);
|
|
|
|
// 渲染结果
|
|
for (size_t i = 0; i < batch_frames.size(); ++i) {
|
|
// 转换为渲染器格式
|
|
std::vector<renderer::DetectionResult> renderer_results;
|
|
if (i < batch_results.size()) {
|
|
renderer_results = convertToRendererResults({batch_results[i]});
|
|
}
|
|
|
|
// 渲染
|
|
if (!renderer_->render(batch_frames[i], renderer_results, current_metrics_)) {
|
|
Logger::warning("Rendering failed");
|
|
continue;
|
|
}
|
|
|
|
// 写入输出
|
|
if (!output_manager_->writeFrames(batch_frames[i])) {
|
|
Logger::warning("Failed to write frame");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void Pipeline::updateMetrics(float inference_time_ms) {
|
|
std::lock_guard<std::mutex> lock(metrics_mutex_);
|
|
|
|
// 更新帧率
|
|
frame_count_++;
|
|
auto now = std::chrono::steady_clock::now();
|
|
auto duration = std::chrono::duration_cast<std::chrono::seconds>(now - last_fps_update_).count();
|
|
if (duration >= 1) {
|
|
current_metrics_.fps = static_cast<float>(frame_count_) / duration;
|
|
frame_count_ = 0;
|
|
last_fps_update_ = now;
|
|
}
|
|
|
|
// 更新其他指标
|
|
current_metrics_.inference_time_ms = inference_time_ms;
|
|
// TODO: 添加GPU使用率监控
|
|
current_metrics_.gpu_usage_percent = 0.0f;
|
|
}
|
|
|
|
} // namespace pipeline
|
|
|