项目补充开发
This commit is contained in:
parent
2b3faa2706
commit
edb8f48f8b
@ -50,6 +50,8 @@ struct NodeSnapshot {
|
||||
uint64_t drop_total = 0;
|
||||
uint64_t error_total = 0;
|
||||
double avg_process_time_ms = 0.0;
|
||||
|
||||
SimpleJson custom_metrics;
|
||||
};
|
||||
|
||||
struct GraphSnapshot {
|
||||
@ -57,6 +59,10 @@ struct GraphSnapshot {
|
||||
bool running = false;
|
||||
uint64_t timestamp_ms = 0;
|
||||
double total_fps = 0.0;
|
||||
|
||||
uint64_t alarm_total = 0;
|
||||
uint64_t publish_clients = 0;
|
||||
|
||||
std::vector<NodeSnapshot> nodes;
|
||||
std::vector<EdgeSnapshot> edges;
|
||||
};
|
||||
@ -77,6 +83,8 @@ public:
|
||||
GraphSnapshot Snapshot() const;
|
||||
bool FindNodeSnapshotById(const std::string& node_id, NodeSnapshot& out) const;
|
||||
|
||||
bool UpdateNodeConfig(const std::string& node_id, const SimpleJson& new_node_cfg, std::string& err);
|
||||
|
||||
// Attempt in-place update via INode::UpdateConfig for nodes whose config changed.
|
||||
// Returns true if fully updated without rebuild.
|
||||
// Returns false with empty err if rebuild is required.
|
||||
@ -137,6 +145,7 @@ public:
|
||||
static bool LoadConfigFile(const std::string& path, SimpleJson& out, std::string& err);
|
||||
|
||||
bool Build(const SimpleJson& root_cfg, std::string& err);
|
||||
bool BuildFromFile(const std::string& path, std::string& err);
|
||||
bool StartAll();
|
||||
void StopAll();
|
||||
void RequestStop();
|
||||
@ -144,6 +153,13 @@ public:
|
||||
|
||||
bool ReloadFromFile(const std::string& path, std::string& err);
|
||||
|
||||
const std::string& ConfigPath() const { return config_path_; }
|
||||
const std::string& LastGoodPath() const { return last_good_path_; }
|
||||
bool RollbackFromLastGood(std::string& err);
|
||||
|
||||
bool UpdateNodeConfig(const std::string& node_id, const std::optional<std::string>& graph,
|
||||
const SimpleJson& new_node_cfg, std::string& err);
|
||||
|
||||
std::vector<GraphSnapshot> ListGraphSnapshots();
|
||||
bool GetGraphSnapshot(const std::string& name, GraphSnapshot& out, std::string& err);
|
||||
// If graph is not set: auto-match when unique, otherwise return false with err describing ambiguity.
|
||||
@ -155,6 +171,8 @@ private:
|
||||
PluginLoader loader_;
|
||||
std::vector<std::unique_ptr<Graph>> graphs_;
|
||||
SimpleJson last_good_root_;
|
||||
std::string config_path_;
|
||||
std::string last_good_path_;
|
||||
size_t default_queue_size_ = 8;
|
||||
QueueDropStrategy default_strategy_ = QueueDropStrategy::DropOldest;
|
||||
std::mutex graphs_mu_;
|
||||
|
||||
@ -44,11 +44,15 @@ public:
|
||||
// Dynamic config update without restart. Returns true if update succeeded.
|
||||
virtual bool UpdateConfig(const SimpleJson& /*new_config*/) { return false; }
|
||||
|
||||
// Optional custom metrics for graph-level aggregation/observability.
|
||||
// Return true if out is filled.
|
||||
virtual bool GetCustomMetrics(SimpleJson& /*out*/) const { return false; }
|
||||
|
||||
// Called before Stop() to flush internal buffers (e.g., finish writing files).
|
||||
virtual void Drain() {}
|
||||
};
|
||||
|
||||
constexpr int kNodeAbiVersion = 1;
|
||||
constexpr int kNodeAbiVersion = 2;
|
||||
|
||||
using CreateNodeFn = INode* (*)();
|
||||
using DestroyNodeFn = void (*)(INode*);
|
||||
|
||||
206
include/utils/config_schema.h
Normal file
206
include/utils/config_schema.h
Normal file
@ -0,0 +1,206 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "utils/simple_json.h"
|
||||
|
||||
namespace rk3588 {
|
||||
|
||||
inline bool IsStringNonEmpty(const SimpleJson* v) {
|
||||
return v && v->IsString() && !v->AsString("").empty();
|
||||
}
|
||||
|
||||
inline bool IsDropStrategy(const std::string& s) {
|
||||
return s == "drop_oldest" || s == "drop_newest" || s == "block";
|
||||
}
|
||||
|
||||
inline bool ValidateQueueCfg(const SimpleJson& q, std::string& err) {
|
||||
if (!q.IsObject()) {
|
||||
err = "queue must be object";
|
||||
return false;
|
||||
}
|
||||
if (const SimpleJson* s = q.Find("size")) {
|
||||
if (!s->IsNumber() || s->AsInt(0) <= 0) {
|
||||
err = "queue.size must be positive number";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
std::string policy;
|
||||
if (const SimpleJson* p = q.Find("policy")) {
|
||||
if (!p->IsString()) {
|
||||
err = "queue.policy must be string";
|
||||
return false;
|
||||
}
|
||||
policy = p->AsString("");
|
||||
}
|
||||
if (policy.empty()) {
|
||||
if (const SimpleJson* p = q.Find("strategy")) {
|
||||
if (!p->IsString()) {
|
||||
err = "queue.strategy must be string";
|
||||
return false;
|
||||
}
|
||||
policy = p->AsString("");
|
||||
}
|
||||
}
|
||||
if (!policy.empty() && !IsDropStrategy(policy)) {
|
||||
err = "queue policy/strategy must be one of: drop_oldest, drop_newest, block";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool ValidateNodeCfg(const SimpleJson& node, std::string& err) {
|
||||
if (!node.IsObject()) {
|
||||
err = "node entry must be object";
|
||||
return false;
|
||||
}
|
||||
if (!IsStringNonEmpty(node.Find("id"))) {
|
||||
err = "node.id must be non-empty string";
|
||||
return false;
|
||||
}
|
||||
if (!IsStringNonEmpty(node.Find("type"))) {
|
||||
err = "node.type must be non-empty string";
|
||||
return false;
|
||||
}
|
||||
if (const SimpleJson* en = node.Find("enable")) {
|
||||
if (!en->IsBool()) {
|
||||
err = "node.enable must be bool";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (const SimpleJson* role = node.Find("role")) {
|
||||
if (!role->IsString()) {
|
||||
err = "node.role must be string";
|
||||
return false;
|
||||
}
|
||||
const std::string r = role->AsString("");
|
||||
if (!r.empty() && r != "source" && r != "filter" && r != "sink") {
|
||||
err = "node.role must be one of: source, filter, sink";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (const SimpleJson* q = node.Find("queue")) {
|
||||
std::string qerr;
|
||||
if (!ValidateQueueCfg(*q, qerr)) {
|
||||
err = "node.queue invalid: " + qerr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (const SimpleJson* aff = node.Find("cpu_affinity")) {
|
||||
if (!aff->IsArray()) {
|
||||
err = "node.cpu_affinity must be array";
|
||||
return false;
|
||||
}
|
||||
for (const auto& c : aff->AsArray()) {
|
||||
if (!c.IsNumber() || c.AsInt(-1) < 0) {
|
||||
err = "node.cpu_affinity entries must be non-negative numbers";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool ValidateEdgeCfg(const SimpleJson& edge, std::string& err) {
|
||||
if (edge.IsArray()) {
|
||||
const auto& a = edge.AsArray();
|
||||
if (a.size() < 2) {
|
||||
err = "edge array must be [from, to]";
|
||||
return false;
|
||||
}
|
||||
if (!a[0].IsString() || !a[1].IsString()) {
|
||||
err = "edge array entries must be strings";
|
||||
return false;
|
||||
}
|
||||
if (a.size() >= 3) {
|
||||
std::string qerr;
|
||||
if (!ValidateQueueCfg(a[2], qerr)) {
|
||||
err = "edge queue invalid: " + qerr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
if (edge.IsObject()) {
|
||||
if (!IsStringNonEmpty(edge.Find("from")) || !IsStringNonEmpty(edge.Find("to"))) {
|
||||
err = "edge object must have non-empty string 'from' and 'to'";
|
||||
return false;
|
||||
}
|
||||
if (const SimpleJson* q = edge.Find("queue")) {
|
||||
std::string qerr;
|
||||
if (!ValidateQueueCfg(*q, qerr)) {
|
||||
err = "edge.queue invalid: " + qerr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
err = "edge entry must be array or object";
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool ValidateExpandedRootConfig(const SimpleJson& root, std::string& err) {
|
||||
if (!root.IsObject()) {
|
||||
err = "root config must be object";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (const SimpleJson* q = root.Find("queue")) {
|
||||
std::string qerr;
|
||||
if (!ValidateQueueCfg(*q, qerr)) {
|
||||
err = "root.queue invalid: " + qerr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (const SimpleJson* g = root.Find("global")) {
|
||||
if (!g->IsObject()) {
|
||||
err = "root.global must be object";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const SimpleJson* graphs = root.Find("graphs");
|
||||
if (!graphs || !graphs->IsArray()) {
|
||||
err = "root missing 'graphs' array";
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const auto& gv : graphs->AsArray()) {
|
||||
if (!gv.IsObject()) {
|
||||
err = "graph entry must be object";
|
||||
return false;
|
||||
}
|
||||
if (!IsStringNonEmpty(gv.Find("name"))) {
|
||||
err = "graph.name must be non-empty string";
|
||||
return false;
|
||||
}
|
||||
const SimpleJson* nodes = gv.Find("nodes");
|
||||
const SimpleJson* edges = gv.Find("edges");
|
||||
if (!nodes || !nodes->IsArray()) {
|
||||
err = "graph.nodes must be array";
|
||||
return false;
|
||||
}
|
||||
if (!edges || !edges->IsArray()) {
|
||||
err = "graph.edges must be array";
|
||||
return false;
|
||||
}
|
||||
for (const auto& nv : nodes->AsArray()) {
|
||||
std::string nerr;
|
||||
if (!ValidateNodeCfg(nv, nerr)) {
|
||||
err = "graph.nodes invalid: " + nerr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
for (const auto& ev : edges->AsArray()) {
|
||||
std::string eerr;
|
||||
if (!ValidateEdgeCfg(ev, eerr)) {
|
||||
err = "graph.edges invalid: " + eerr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace rk3588
|
||||
99
include/utils/logger.h
Normal file
99
include/utils/logger.h
Normal file
@ -0,0 +1,99 @@
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <deque>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace rk3588 {
|
||||
|
||||
enum class LogLevel { Debug, Info, Warn, Error };
|
||||
|
||||
class Logger {
|
||||
public:
|
||||
static Logger& Instance() {
|
||||
static Logger inst;
|
||||
return inst;
|
||||
}
|
||||
|
||||
void SetMaxLines(size_t n) {
|
||||
std::lock_guard<std::mutex> lock(mu_);
|
||||
max_lines_ = (n == 0) ? 1 : n;
|
||||
TrimLocked();
|
||||
}
|
||||
|
||||
void Log(LogLevel lvl, const std::string& msg) {
|
||||
const std::string line = FormatLine(lvl, msg);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mu_);
|
||||
lines_.push_back(line);
|
||||
TrimLocked();
|
||||
}
|
||||
|
||||
// Keep existing stdout/stderr logging style for easy board-side debugging.
|
||||
if (lvl == LogLevel::Warn || lvl == LogLevel::Error) {
|
||||
std::cerr << line << "\n";
|
||||
} else {
|
||||
std::cout << line << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> RecentLines(size_t limit) const {
|
||||
std::lock_guard<std::mutex> lock(mu_);
|
||||
if (limit == 0) return {};
|
||||
if (limit > lines_.size()) limit = lines_.size();
|
||||
std::vector<std::string> out;
|
||||
out.reserve(limit);
|
||||
const size_t start = lines_.size() - limit;
|
||||
for (size_t i = start; i < lines_.size(); ++i) {
|
||||
out.push_back(lines_[i]);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
private:
|
||||
Logger() = default;
|
||||
|
||||
static const char* LevelText(LogLevel lvl) {
|
||||
switch (lvl) {
|
||||
case LogLevel::Debug: return "D";
|
||||
case LogLevel::Info: return "I";
|
||||
case LogLevel::Warn: return "W";
|
||||
case LogLevel::Error: return "E";
|
||||
}
|
||||
return "I";
|
||||
}
|
||||
|
||||
static std::string FormatLine(LogLevel lvl, const std::string& msg) {
|
||||
using namespace std::chrono;
|
||||
const auto now = system_clock::now();
|
||||
const auto ms = duration_cast<milliseconds>(now.time_since_epoch()).count();
|
||||
std::ostringstream oss;
|
||||
oss << "[" << ms << "]";
|
||||
oss << "[" << LevelText(lvl) << "] ";
|
||||
oss << msg;
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
void TrimLocked() {
|
||||
while (lines_.size() > max_lines_) {
|
||||
lines_.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
mutable std::mutex mu_;
|
||||
std::deque<std::string> lines_;
|
||||
size_t max_lines_ = 2000;
|
||||
};
|
||||
|
||||
inline void LogDebug(const std::string& msg) { Logger::Instance().Log(LogLevel::Debug, msg); }
|
||||
inline void LogInfo(const std::string& msg) { Logger::Instance().Log(LogLevel::Info, msg); }
|
||||
inline void LogWarn(const std::string& msg) { Logger::Instance().Log(LogLevel::Warn, msg); }
|
||||
inline void LogError(const std::string& msg) { Logger::Instance().Log(LogLevel::Error, msg); }
|
||||
|
||||
} // namespace rk3588
|
||||
80
include/utils/simple_json_writer.h
Normal file
80
include/utils/simple_json_writer.h
Normal file
@ -0,0 +1,80 @@
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#include "utils/simple_json.h"
|
||||
|
||||
namespace rk3588 {
|
||||
|
||||
inline std::string JsonEscapeString(std::string_view s) {
|
||||
std::string out;
|
||||
out.reserve(s.size() + 8);
|
||||
for (char c : s) {
|
||||
switch (c) {
|
||||
case '\\': out += "\\\\"; break;
|
||||
case '"': out += "\\\""; break;
|
||||
case '\n': out += "\\n"; break;
|
||||
case '\r': out += "\\r"; break;
|
||||
case '\t': out += "\\t"; break;
|
||||
default:
|
||||
if (static_cast<unsigned char>(c) < 0x20) {
|
||||
out += "?";
|
||||
} else {
|
||||
out.push_back(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
inline void StringifySimpleJsonTo(const SimpleJson& v, std::ostringstream& oss);
|
||||
|
||||
inline void StringifyArrayTo(const SimpleJson::Array& arr, std::ostringstream& oss) {
|
||||
oss << '[';
|
||||
for (size_t i = 0; i < arr.size(); ++i) {
|
||||
if (i) oss << ',';
|
||||
StringifySimpleJsonTo(arr[i], oss);
|
||||
}
|
||||
oss << ']';
|
||||
}
|
||||
|
||||
inline void StringifyObjectTo(const SimpleJson::Object& obj, std::ostringstream& oss) {
|
||||
oss << '{';
|
||||
bool first = true;
|
||||
for (const auto& kv : obj) {
|
||||
if (!first) oss << ',';
|
||||
first = false;
|
||||
oss << '"' << JsonEscapeString(kv.first) << '"' << ':';
|
||||
StringifySimpleJsonTo(kv.second, oss);
|
||||
}
|
||||
oss << '}';
|
||||
}
|
||||
|
||||
inline void StringifySimpleJsonTo(const SimpleJson& v, std::ostringstream& oss) {
|
||||
if (v.IsNull()) {
|
||||
oss << "null";
|
||||
} else if (v.IsBool()) {
|
||||
oss << (v.AsBool(false) ? "true" : "false");
|
||||
} else if (v.IsNumber()) {
|
||||
// Keep simple.
|
||||
oss << v.AsNumber(0.0);
|
||||
} else if (v.IsString()) {
|
||||
oss << '"' << JsonEscapeString(v.AsString("")) << '"';
|
||||
} else if (v.IsArray()) {
|
||||
StringifyArrayTo(v.AsArray(), oss);
|
||||
} else if (v.IsObject()) {
|
||||
StringifyObjectTo(v.AsObject(), oss);
|
||||
} else {
|
||||
oss << "null";
|
||||
}
|
||||
}
|
||||
|
||||
inline std::string StringifySimpleJson(const SimpleJson& v) {
|
||||
std::ostringstream oss;
|
||||
StringifySimpleJsonTo(v, oss);
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
} // namespace rk3588
|
||||
71
include/utils/thread_affinity.h
Normal file
71
include/utils/thread_affinity.h
Normal file
@ -0,0 +1,71 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "utils/simple_json.h"
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#elif defined(_WIN32)
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
namespace rk3588 {
|
||||
|
||||
inline std::vector<int> ParseCpuAffinity(const SimpleJson& node_cfg) {
|
||||
std::vector<int> out;
|
||||
const SimpleJson* a = node_cfg.Find("cpu_affinity");
|
||||
if (!a || !a->IsArray()) return out;
|
||||
for (const auto& v : a->AsArray()) {
|
||||
if (!v.IsNumber()) continue;
|
||||
const int cpu = v.AsInt(-1);
|
||||
if (cpu >= 0) out.push_back(cpu);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
inline bool SetCurrentThreadAffinity(const std::vector<int>& cpus, std::string& err) {
|
||||
err.clear();
|
||||
if (cpus.empty()) return true;
|
||||
|
||||
#if defined(__linux__)
|
||||
cpu_set_t set;
|
||||
CPU_ZERO(&set);
|
||||
for (int cpu : cpus) {
|
||||
if (cpu < 0 || cpu >= CPU_SETSIZE) continue;
|
||||
CPU_SET(cpu, &set);
|
||||
}
|
||||
const int rc = pthread_setaffinity_np(pthread_self(), sizeof(set), &set);
|
||||
if (rc != 0) {
|
||||
err = "pthread_setaffinity_np failed";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#elif defined(_WIN32)
|
||||
// Windows uses a bitmask; best-effort for up to 64 CPUs in the current group.
|
||||
DWORD_PTR mask = 0;
|
||||
for (int cpu : cpus) {
|
||||
if (cpu < 0 || cpu >= static_cast<int>(8 * sizeof(DWORD_PTR))) continue;
|
||||
mask |= (static_cast<DWORD_PTR>(1) << cpu);
|
||||
}
|
||||
if (mask == 0) return true;
|
||||
const DWORD_PTR prev = SetThreadAffinityMask(GetCurrentThread(), mask);
|
||||
if (prev == 0) {
|
||||
err = "SetThreadAffinityMask failed";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
(void)cpus;
|
||||
err = "thread affinity not supported on this platform";
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace rk3588
|
||||
@ -234,6 +234,15 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GetCustomMetrics(SimpleJson& out) const override {
|
||||
std::lock_guard<std::mutex> lock(mu_);
|
||||
SimpleJson::Object o;
|
||||
o["alarm_total"] = SimpleJson(static_cast<double>(alarm_count_));
|
||||
o["processed"] = SimpleJson(static_cast<double>(processed_frames_));
|
||||
out = SimpleJson(std::move(o));
|
||||
return true;
|
||||
}
|
||||
|
||||
NodeStatus Process(FramePtr frame) override {
|
||||
if (!frame) return NodeStatus::DROP;
|
||||
|
||||
@ -275,7 +284,7 @@ private:
|
||||
std::vector<std::unique_ptr<IAlarmAction>> actions_;
|
||||
ClipAction* clip_action_ = nullptr;
|
||||
|
||||
std::mutex mu_;
|
||||
mutable std::mutex mu_;
|
||||
|
||||
std::shared_ptr<SpscQueue<FramePtr>> input_queue_;
|
||||
uint64_t processed_frames_ = 0;
|
||||
|
||||
@ -6,6 +6,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "node.h"
|
||||
#include "utils/thread_affinity.h"
|
||||
|
||||
#if defined(RK3588_ENABLE_MPP)
|
||||
extern "C" {
|
||||
@ -42,6 +43,10 @@ public:
|
||||
use_ffmpeg_ = config.ValueOr<bool>("use_ffmpeg", false);
|
||||
use_mpp_ = config.ValueOr<bool>("use_mpp", true);
|
||||
ffmpeg_force_tcp_ = config.ValueOr<bool>("force_tcp", true);
|
||||
reconnect_sec_ = config.ValueOr<int>("reconnect_sec", 5);
|
||||
reconnect_backoff_max_sec_ = config.ValueOr<int>("reconnect_backoff_max_sec", 30);
|
||||
fallback_to_stub_on_fail_ = config.ValueOr<bool>("fallback_to_stub_on_fail", false);
|
||||
cpu_affinity_ = ParseCpuAffinity(config);
|
||||
if (ctx.output_queues.empty()) {
|
||||
std::cerr << "[input_rtsp] no downstream queue configured for node " << id_ << "\n";
|
||||
return false;
|
||||
@ -98,6 +103,14 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
void ApplyAffinity() {
|
||||
if (cpu_affinity_.empty()) return;
|
||||
std::string aerr;
|
||||
if (!SetCurrentThreadAffinity(cpu_affinity_, aerr)) {
|
||||
std::cerr << "[input_rtsp] SetCurrentThreadAffinity failed: " << aerr << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
void PushToDownstream(FramePtr frame) {
|
||||
for (auto& q : out_queues_) {
|
||||
q->Push(frame);
|
||||
@ -105,6 +118,7 @@ private:
|
||||
}
|
||||
|
||||
void LoopStub() {
|
||||
ApplyAffinity();
|
||||
using namespace std::chrono;
|
||||
auto frame_interval = fps_ > 0 ? milliseconds(1000 / fps_) : milliseconds(40);
|
||||
while (running_.load()) {
|
||||
@ -130,79 +144,103 @@ private:
|
||||
}
|
||||
#if defined(RK3588_ENABLE_FFMPEG)
|
||||
void LoopFfmpegCpu() {
|
||||
ApplyAffinity();
|
||||
using namespace std::chrono;
|
||||
AVFormatContext* fmt_ctx = nullptr;
|
||||
AVCodecContext* codec_ctx = nullptr;
|
||||
AVPacket* pkt = av_packet_alloc();
|
||||
AVFrame* frm = av_frame_alloc();
|
||||
int video_stream = -1;
|
||||
AVRational time_base{1, 1000};
|
||||
|
||||
AVDictionary* opts = nullptr;
|
||||
if (ffmpeg_force_tcp_) av_dict_set(&opts, "rtsp_transport", "tcp", 0);
|
||||
if (avformat_open_input(&fmt_ctx, url_.c_str(), nullptr, &opts) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_open_input failed: " << url_ << "\n";
|
||||
av_dict_free(&opts);
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
if (avformat_find_stream_info(fmt_ctx, nullptr) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_find_stream_info failed\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
for (unsigned i = 0; i < fmt_ctx->nb_streams; ++i) {
|
||||
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
video_stream = static_cast<int>(i);
|
||||
time_base = fmt_ctx->streams[i]->time_base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (video_stream < 0) {
|
||||
std::cerr << "[input_rtsp] no video stream\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
|
||||
const AVCodec* codec = avcodec_find_decoder(fmt_ctx->streams[video_stream]->codecpar->codec_id);
|
||||
if (!codec) {
|
||||
std::cerr << "[input_rtsp] decoder not found\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
codec_ctx = avcodec_alloc_context3(codec);
|
||||
avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream]->codecpar);
|
||||
if (avcodec_open2(codec_ctx, codec, nullptr) < 0) {
|
||||
std::cerr << "[input_rtsp] avcodec_open2 failed\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
int backoff = std::max(1, reconnect_sec_);
|
||||
const int backoff_max = std::max(backoff, reconnect_backoff_max_sec_);
|
||||
|
||||
while (running_.load()) {
|
||||
if (av_read_frame(fmt_ctx, pkt) < 0) {
|
||||
std::this_thread::sleep_for(milliseconds(10));
|
||||
AVFormatContext* fmt_ctx = nullptr;
|
||||
AVCodecContext* codec_ctx = nullptr;
|
||||
AVPacket* pkt = av_packet_alloc();
|
||||
AVFrame* frm = av_frame_alloc();
|
||||
int video_stream = -1;
|
||||
AVRational time_base{1, 1000};
|
||||
|
||||
AVDictionary* opts = nullptr;
|
||||
if (ffmpeg_force_tcp_) av_dict_set(&opts, "rtsp_transport", "tcp", 0);
|
||||
if (avformat_open_input(&fmt_ctx, url_.c_str(), nullptr, &opts) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_open_input failed: " << url_ << "\n";
|
||||
av_dict_free(&opts);
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
if (fallback_to_stub_on_fail_) {
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
if (pkt->stream_index != video_stream) {
|
||||
av_packet_unref(pkt);
|
||||
av_dict_free(&opts);
|
||||
if (avformat_find_stream_info(fmt_ctx, nullptr) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_find_stream_info failed\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
if (avcodec_send_packet(codec_ctx, pkt) == 0) {
|
||||
while (avcodec_receive_frame(codec_ctx, frm) == 0) {
|
||||
PushFrameFromAVFrame(*frm, time_base);
|
||||
av_frame_unref(frm);
|
||||
for (unsigned i = 0; i < fmt_ctx->nb_streams; ++i) {
|
||||
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
video_stream = static_cast<int>(i);
|
||||
time_base = fmt_ctx->streams[i]->time_base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
if (video_stream < 0) {
|
||||
std::cerr << "[input_rtsp] no video stream\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
const AVCodec* codec = avcodec_find_decoder(fmt_ctx->streams[video_stream]->codecpar->codec_id);
|
||||
if (!codec) {
|
||||
std::cerr << "[input_rtsp] decoder not found\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
codec_ctx = avcodec_alloc_context3(codec);
|
||||
avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream]->codecpar);
|
||||
if (avcodec_open2(codec_ctx, codec, nullptr) < 0) {
|
||||
std::cerr << "[input_rtsp] avcodec_open2 failed\n";
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
|
||||
backoff = std::max(1, reconnect_sec_); // reset after successful open
|
||||
int read_fail = 0;
|
||||
while (running_.load()) {
|
||||
if (av_read_frame(fmt_ctx, pkt) < 0) {
|
||||
if (++read_fail >= 50) {
|
||||
break;
|
||||
}
|
||||
std::this_thread::sleep_for(milliseconds(10));
|
||||
continue;
|
||||
}
|
||||
read_fail = 0;
|
||||
if (pkt->stream_index != video_stream) {
|
||||
av_packet_unref(pkt);
|
||||
continue;
|
||||
}
|
||||
if (avcodec_send_packet(codec_ctx, pkt) == 0) {
|
||||
while (avcodec_receive_frame(codec_ctx, frm) == 0) {
|
||||
PushFrameFromAVFrame(*frm, time_base);
|
||||
av_frame_unref(frm);
|
||||
}
|
||||
}
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
|
||||
Cleanup(fmt_ctx, codec_ctx, pkt, frm);
|
||||
if (!running_.load()) break;
|
||||
std::this_thread::sleep_for(seconds(std::max(1, reconnect_sec_)));
|
||||
}
|
||||
}
|
||||
|
||||
void PushFrameFromAVFrame(const AVFrame& f, AVRational time_base) {
|
||||
@ -383,78 +421,100 @@ private:
|
||||
};
|
||||
|
||||
void LoopFfmpegMpp() {
|
||||
ApplyAffinity();
|
||||
using namespace std::chrono;
|
||||
AVFormatContext* fmt_ctx = nullptr;
|
||||
AVPacket* pkt = av_packet_alloc();
|
||||
int video_stream = -1;
|
||||
AVRational time_base{1, 1000};
|
||||
|
||||
AVDictionary* opts = nullptr;
|
||||
if (ffmpeg_force_tcp_) av_dict_set(&opts, "rtsp_transport", "tcp", 0);
|
||||
if (avformat_open_input(&fmt_ctx, url_.c_str(), nullptr, &opts) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_open_input failed: " << url_ << "\n";
|
||||
av_dict_free(&opts);
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
if (avformat_find_stream_info(fmt_ctx, nullptr) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_find_stream_info failed\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
for (unsigned i = 0; i < fmt_ctx->nb_streams; ++i) {
|
||||
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
video_stream = static_cast<int>(i);
|
||||
time_base = fmt_ctx->streams[i]->time_base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (video_stream < 0) {
|
||||
std::cerr << "[input_rtsp] no video stream\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
|
||||
MppCodingType coding = MPP_VIDEO_CodingAVC;
|
||||
auto codec_id = fmt_ctx->streams[video_stream]->codecpar->codec_id;
|
||||
if (codec_id == AV_CODEC_ID_H264) coding = MPP_VIDEO_CodingAVC;
|
||||
else if (codec_id == AV_CODEC_ID_HEVC) coding = MPP_VIDEO_CodingHEVC;
|
||||
else {
|
||||
std::cerr << "[input_rtsp] unsupported codec for mpp\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
|
||||
MppDecoderWrapper dec;
|
||||
if (!dec.Init(coding)) {
|
||||
std::cerr << "[input_rtsp] mpp init failed\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
int backoff = std::max(1, reconnect_sec_);
|
||||
const int backoff_max = std::max(backoff, reconnect_backoff_max_sec_);
|
||||
|
||||
while (running_.load()) {
|
||||
if (av_read_frame(fmt_ctx, pkt) < 0) {
|
||||
std::this_thread::sleep_for(milliseconds(10));
|
||||
continue;
|
||||
}
|
||||
if (pkt->stream_index != video_stream) {
|
||||
av_packet_unref(pkt);
|
||||
continue;
|
||||
}
|
||||
int64_t pts_ms = pkt->pts == AV_NOPTS_VALUE ? 0
|
||||
: av_rescale_q(pkt->pts, time_base, {1, 1000});
|
||||
dec.Decode(pkt->data, pkt->size, false, pts_ms,
|
||||
[&](MppFrame frm) { PushFrameFromMpp(frm); });
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
AVFormatContext* fmt_ctx = nullptr;
|
||||
AVPacket* pkt = av_packet_alloc();
|
||||
int video_stream = -1;
|
||||
AVRational time_base{1, 1000};
|
||||
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
AVDictionary* opts = nullptr;
|
||||
if (ffmpeg_force_tcp_) av_dict_set(&opts, "rtsp_transport", "tcp", 0);
|
||||
if (avformat_open_input(&fmt_ctx, url_.c_str(), nullptr, &opts) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_open_input failed: " << url_ << "\n";
|
||||
av_dict_free(&opts);
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
if (fallback_to_stub_on_fail_) {
|
||||
LoopStub();
|
||||
return;
|
||||
}
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
if (avformat_find_stream_info(fmt_ctx, nullptr) < 0) {
|
||||
std::cerr << "[input_rtsp] avformat_find_stream_info failed\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
for (unsigned i = 0; i < fmt_ctx->nb_streams; ++i) {
|
||||
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
video_stream = static_cast<int>(i);
|
||||
time_base = fmt_ctx->streams[i]->time_base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (video_stream < 0) {
|
||||
std::cerr << "[input_rtsp] no video stream\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
|
||||
MppCodingType coding = MPP_VIDEO_CodingAVC;
|
||||
auto codec_id = fmt_ctx->streams[video_stream]->codecpar->codec_id;
|
||||
if (codec_id == AV_CODEC_ID_H264) coding = MPP_VIDEO_CodingAVC;
|
||||
else if (codec_id == AV_CODEC_ID_HEVC) coding = MPP_VIDEO_CodingHEVC;
|
||||
else {
|
||||
std::cerr << "[input_rtsp] unsupported codec for mpp\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
|
||||
MppDecoderWrapper dec;
|
||||
if (!dec.Init(coding)) {
|
||||
std::cerr << "[input_rtsp] mpp init failed\n";
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
std::this_thread::sleep_for(seconds(backoff));
|
||||
backoff = std::min(backoff_max, backoff * 2);
|
||||
continue;
|
||||
}
|
||||
|
||||
backoff = std::max(1, reconnect_sec_);
|
||||
int read_fail = 0;
|
||||
while (running_.load()) {
|
||||
if (av_read_frame(fmt_ctx, pkt) < 0) {
|
||||
if (++read_fail >= 50) break;
|
||||
std::this_thread::sleep_for(milliseconds(10));
|
||||
continue;
|
||||
}
|
||||
read_fail = 0;
|
||||
if (pkt->stream_index != video_stream) {
|
||||
av_packet_unref(pkt);
|
||||
continue;
|
||||
}
|
||||
int64_t pts_ms = pkt->pts == AV_NOPTS_VALUE ? 0
|
||||
: av_rescale_q(pkt->pts, time_base, {1, 1000});
|
||||
dec.Decode(pkt->data, pkt->size, false, pts_ms,
|
||||
[&](MppFrame frm) { PushFrameFromMpp(frm); });
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
|
||||
Cleanup(fmt_ctx, nullptr, pkt, nullptr);
|
||||
if (!running_.load()) break;
|
||||
std::this_thread::sleep_for(seconds(std::max(1, reconnect_sec_)));
|
||||
}
|
||||
}
|
||||
|
||||
void PushFrameFromMpp(MppFrame frm) {
|
||||
@ -541,6 +601,11 @@ private:
|
||||
bool use_ffmpeg_ = false;
|
||||
bool use_mpp_ = true;
|
||||
bool ffmpeg_force_tcp_ = true;
|
||||
|
||||
int reconnect_sec_ = 5;
|
||||
int reconnect_backoff_max_sec_ = 30;
|
||||
bool fallback_to_stub_on_fail_ = false;
|
||||
std::vector<int> cpu_affinity_;
|
||||
};
|
||||
|
||||
REGISTER_NODE(InputRtspNode, "input_rtsp");
|
||||
|
||||
@ -726,6 +726,22 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GetCustomMetrics(SimpleJson& out) const override {
|
||||
std::lock_guard<std::mutex> lock(mu_);
|
||||
uint64_t clients = 0;
|
||||
#if defined(RK3588_ENABLE_ZLMEDIAKIT)
|
||||
for (const auto& p : zlm_pubs_) {
|
||||
if (!p) continue;
|
||||
clients += static_cast<uint64_t>(p->TotalReaderCount());
|
||||
}
|
||||
#endif
|
||||
SimpleJson::Object o;
|
||||
o["clients"] = SimpleJson(static_cast<double>(clients));
|
||||
o["encoded_frames"] = SimpleJson(static_cast<double>(encoded_frames_));
|
||||
out = SimpleJson(std::move(o));
|
||||
return true;
|
||||
}
|
||||
|
||||
void Stop() override {
|
||||
if (input_queue_) input_queue_->Stop();
|
||||
|
||||
@ -849,6 +865,10 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
int TotalReaderCount() const {
|
||||
return media_ ? mk_media_total_reader_count(media_) : 0;
|
||||
}
|
||||
|
||||
void Write(const EncodedPacket& pkt, const std::vector<uint8_t>& header, bool is_h265) {
|
||||
if (!media_ || pkt.data.empty()) return;
|
||||
|
||||
@ -1216,7 +1236,7 @@ private:
|
||||
std::shared_ptr<SpscQueue<FramePtr>> input_queue_;
|
||||
uint64_t encoded_frames_ = 0;
|
||||
|
||||
std::mutex mu_;
|
||||
mutable std::mutex mu_;
|
||||
|
||||
#if defined(RK3588_ENABLE_MPP)
|
||||
std::unique_ptr<MppVencEncoder> mpp_encoder_;
|
||||
|
||||
BIN
scripts/stress/__pycache__/collect_metrics.cpython-311.pyc
Normal file
BIN
scripts/stress/__pycache__/collect_metrics.cpython-311.pyc
Normal file
Binary file not shown.
BIN
scripts/stress/__pycache__/gen_multi_instances.cpython-311.pyc
Normal file
BIN
scripts/stress/__pycache__/gen_multi_instances.cpython-311.pyc
Normal file
Binary file not shown.
71
scripts/stress/collect_metrics.py
Normal file
71
scripts/stress/collect_metrics.py
Normal file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
|
||||
|
||||
def http_json(method: str, url: str, body=None, timeout: float = 5.0):
|
||||
data = None
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
}
|
||||
if body is not None:
|
||||
data = json.dumps(body).encode("utf-8")
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
req = urllib.request.Request(url, data=data, headers=headers, method=method)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
raw = resp.read().decode("utf-8", errors="replace")
|
||||
return json.loads(raw)
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser(description="Poll /api/graphs and /api/graphs/{name} and write JSONL.")
|
||||
ap.add_argument("--api", default="http://127.0.0.1:9000", help="Base API URL")
|
||||
ap.add_argument("--interval", type=float, default=2.0, help="Polling interval seconds")
|
||||
ap.add_argument("--duration", type=float, default=60.0, help="Total duration seconds")
|
||||
ap.add_argument("--out", default="metrics.jsonl", help="Output jsonl path")
|
||||
ap.add_argument("--reload", action="store_true", help="POST /api/config/reload before polling")
|
||||
args = ap.parse_args()
|
||||
|
||||
base = args.api.rstrip("/")
|
||||
|
||||
if args.reload:
|
||||
try:
|
||||
http_json("POST", f"{base}/api/config/reload")
|
||||
except Exception as e:
|
||||
print(f"reload failed: {e}")
|
||||
|
||||
end_ts = time.time() + max(0.0, args.duration)
|
||||
with open(args.out, "w", encoding="utf-8") as f:
|
||||
while time.time() <= end_ts:
|
||||
ts = int(time.time() * 1000)
|
||||
record: dict = {"ts_ms": ts}
|
||||
try:
|
||||
graphs = http_json("GET", f"{base}/api/graphs")
|
||||
record["graphs"] = graphs
|
||||
details = {}
|
||||
for g in graphs:
|
||||
name = g.get("name")
|
||||
if not name:
|
||||
continue
|
||||
try:
|
||||
details[name] = http_json("GET", f"{base}/api/graphs/{name}")
|
||||
except Exception as e:
|
||||
details[name] = {"error": str(e)}
|
||||
record["graph_details"] = details
|
||||
except urllib.error.HTTPError as e:
|
||||
record["error"] = f"http {e.code}: {e.read().decode('utf-8', errors='replace')}"
|
||||
except Exception as e:
|
||||
record["error"] = str(e)
|
||||
|
||||
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||
f.flush()
|
||||
time.sleep(max(0.1, args.interval))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
58
scripts/stress/gen_multi_instances.py
Normal file
58
scripts/stress/gen_multi_instances.py
Normal file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser(description="Duplicate an instance N times to create multi-channel configs.")
|
||||
ap.add_argument("--in", dest="inp", required=True, help="Input config.json")
|
||||
ap.add_argument("--out", dest="out", required=True, help="Output config.json")
|
||||
ap.add_argument("--instance", required=True, help="Base instance name to duplicate")
|
||||
ap.add_argument("--count", type=int, required=True, help="Number of instances to generate")
|
||||
ap.add_argument("--name-prefix", default=None, help="New instance name prefix (default: base name)")
|
||||
ap.add_argument(
|
||||
"--url-pattern",
|
||||
default=None,
|
||||
help="Optional url pattern with {i} (0-based) and {n} (1-based), e.g. rtsp://ip/stream{n}",
|
||||
)
|
||||
args = ap.parse_args()
|
||||
|
||||
with open(args.inp, "r", encoding="utf-8") as f:
|
||||
cfg = json.load(f)
|
||||
|
||||
insts = cfg.get("instances")
|
||||
if not isinstance(insts, list):
|
||||
raise SystemExit("root.instances must be an array")
|
||||
|
||||
base = None
|
||||
for it in insts:
|
||||
if isinstance(it, dict) and it.get("name") == args.instance:
|
||||
base = it
|
||||
break
|
||||
if base is None:
|
||||
raise SystemExit(f"instance not found: {args.instance}")
|
||||
|
||||
prefix = args.name_prefix or args.instance
|
||||
out_insts = []
|
||||
for i in range(max(0, args.count)):
|
||||
one = deepcopy(base)
|
||||
one["name"] = f"{prefix}_{i+1:02d}"
|
||||
params = one.get("params")
|
||||
if not isinstance(params, dict):
|
||||
params = {}
|
||||
one["params"] = params
|
||||
if args.url_pattern is not None:
|
||||
params["url"] = args.url_pattern.format(i=i, n=i + 1)
|
||||
out_insts.append(one)
|
||||
|
||||
cfg["instances"] = out_insts
|
||||
|
||||
with open(args.out, "w", encoding="utf-8") as f:
|
||||
json.dump(cfg, f, ensure_ascii=False, indent=2)
|
||||
f.write("\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -8,12 +8,56 @@
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
|
||||
#if __has_include(<filesystem>)
|
||||
#include <filesystem>
|
||||
namespace fs = std::filesystem;
|
||||
#endif
|
||||
|
||||
#include "utils/config_expand.h"
|
||||
#include "utils/config_schema.h"
|
||||
#include "utils/logger.h"
|
||||
#include "utils/simple_json_writer.h"
|
||||
#include "utils/thread_affinity.h"
|
||||
|
||||
namespace rk3588 {
|
||||
|
||||
namespace {
|
||||
|
||||
bool WriteTextFile(const std::string& path, const std::string& content, std::string& err) {
|
||||
std::ofstream ofs(path, std::ios::binary | std::ios::trunc);
|
||||
if (!ofs.is_open()) {
|
||||
err = "Failed to open file for write: " + path;
|
||||
return false;
|
||||
}
|
||||
ofs.write(content.data(), static_cast<std::streamsize>(content.size()));
|
||||
if (!ofs.good()) {
|
||||
err = "Failed to write file: " + path;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteTextFileAtomic(const std::string& path, const std::string& content, std::string& err) {
|
||||
err.clear();
|
||||
#if __has_include(<filesystem>)
|
||||
const std::string tmp = path + ".tmp";
|
||||
if (!WriteTextFile(tmp, content, err)) return false;
|
||||
std::error_code ec;
|
||||
fs::remove(path, ec); // best-effort (Windows rename requires target missing)
|
||||
ec.clear();
|
||||
fs::rename(tmp, path, ec);
|
||||
if (ec) {
|
||||
err = "rename failed: " + ec.message();
|
||||
std::error_code ec2;
|
||||
fs::remove(tmp, ec2);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
return WriteTextFile(path, content, err);
|
||||
#endif
|
||||
}
|
||||
|
||||
QueueDropStrategy ParseDropStrategy(const std::string& s, QueueDropStrategy def) {
|
||||
if (s == "drop_oldest") return QueueDropStrategy::DropOldest;
|
||||
if (s == "drop_newest") return QueueDropStrategy::DropNewest;
|
||||
@ -411,6 +455,49 @@ bool Graph::TryUpdateInPlace(const SimpleJson& new_graph_cfg, size_t default_que
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
SimpleJson ReplaceNodeInGraphCfg(const SimpleJson& graph_cfg, const std::string& node_id,
|
||||
const SimpleJson& new_node_cfg) {
|
||||
if (!graph_cfg.IsObject()) return graph_cfg;
|
||||
SimpleJson::Object obj = graph_cfg.AsObject();
|
||||
const SimpleJson* nodes = graph_cfg.Find("nodes");
|
||||
if (!nodes || !nodes->IsArray()) return graph_cfg;
|
||||
SimpleJson::Array out_nodes;
|
||||
out_nodes.reserve(nodes->AsArray().size());
|
||||
for (const auto& n : nodes->AsArray()) {
|
||||
if (n.IsObject() && n.ValueOr<std::string>("id", "") == node_id) {
|
||||
out_nodes.push_back(new_node_cfg);
|
||||
} else {
|
||||
out_nodes.push_back(n);
|
||||
}
|
||||
}
|
||||
obj["nodes"] = SimpleJson(std::move(out_nodes));
|
||||
return SimpleJson(std::move(obj));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool Graph::UpdateNodeConfig(const std::string& node_id, const SimpleJson& new_node_cfg, std::string& err) {
|
||||
err.clear();
|
||||
for (auto& entry : nodes_) {
|
||||
if (entry.id != node_id) continue;
|
||||
if (!entry.enabled || !entry.node) {
|
||||
err = "node not running or disabled: " + node_id;
|
||||
return false;
|
||||
}
|
||||
if (!entry.node->UpdateConfig(new_node_cfg)) {
|
||||
err = "UpdateConfig returned false";
|
||||
return false;
|
||||
}
|
||||
entry.config = new_node_cfg;
|
||||
graph_cfg_ = ReplaceNodeInGraphCfg(graph_cfg_, node_id, new_node_cfg);
|
||||
return true;
|
||||
}
|
||||
err = "node not found: " + node_id;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Graph::Start() {
|
||||
bool expected = false;
|
||||
if (!running_.compare_exchange_strong(expected, true)) {
|
||||
@ -431,6 +518,13 @@ bool Graph::Start() {
|
||||
if (entry.role != "source") {
|
||||
if (entry.context.input_queue) {
|
||||
entry.worker = std::thread([this, &entry]() {
|
||||
{
|
||||
const auto cpus = ParseCpuAffinity(entry.config);
|
||||
std::string aerr;
|
||||
if (!cpus.empty() && !SetCurrentThreadAffinity(cpus, aerr)) {
|
||||
LogWarn("[Graph] SetCurrentThreadAffinity failed for node " + entry.id + ": " + aerr);
|
||||
}
|
||||
}
|
||||
FramePtr frame;
|
||||
while (true) {
|
||||
if (entry.context.input_queue->Pop(frame, std::chrono::milliseconds(100))) {
|
||||
@ -599,6 +693,21 @@ GraphSnapshot Graph::Snapshot() const {
|
||||
ns.avg_process_time_ms = (static_cast<double>(ns_total) / 1e6) / static_cast<double>(proc_cnt);
|
||||
}
|
||||
|
||||
{
|
||||
SimpleJson cm;
|
||||
if (n.node && n.node->GetCustomMetrics(cm)) {
|
||||
ns.custom_metrics = cm;
|
||||
if (cm.IsObject()) {
|
||||
if (const SimpleJson* a = cm.Find("alarm_total"); a && a->IsNumber()) {
|
||||
snap.alarm_total += static_cast<uint64_t>(a->AsNumber(0.0));
|
||||
}
|
||||
if (const SimpleJson* c = cm.Find("clients"); c && c->IsNumber()) {
|
||||
snap.publish_clients += static_cast<uint64_t>(c->AsNumber(0.0));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const bool source_like = (n.role == "source") || !n.context.input_queue;
|
||||
if (source_like) {
|
||||
total_fps += ns.output_fps;
|
||||
@ -671,6 +780,10 @@ bool GraphManager::Build(const SimpleJson& root_cfg, std::string& err) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ValidateExpandedRootConfig(expanded, err)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto graphs_it = expanded.AsObject().find("graphs");
|
||||
if (graphs_it == expanded.AsObject().end() || !graphs_it->second.IsArray()) {
|
||||
err = "Root config missing 'graphs' array";
|
||||
@ -714,9 +827,26 @@ bool GraphManager::Build(const SimpleJson& root_cfg, std::string& err) {
|
||||
last_good_root_ = expanded;
|
||||
default_queue_size_ = default_queue_size;
|
||||
default_strategy_ = default_strategy;
|
||||
|
||||
if (!last_good_path_.empty()) {
|
||||
std::string werr;
|
||||
if (!WriteTextFileAtomic(last_good_path_, StringifySimpleJson(last_good_root_), werr)) {
|
||||
LogWarn("[GraphManager] persist last_good failed: " + werr);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GraphManager::BuildFromFile(const std::string& path, std::string& err) {
|
||||
config_path_ = path;
|
||||
last_good_path_ = path + ".last_good.json";
|
||||
SimpleJson root_cfg;
|
||||
if (!LoadConfigFile(path, root_cfg, err)) {
|
||||
return false;
|
||||
}
|
||||
return Build(root_cfg, err);
|
||||
}
|
||||
|
||||
bool GraphManager::StartAll() {
|
||||
std::lock_guard<std::mutex> lock(graphs_mu_);
|
||||
for (auto& g : graphs_) {
|
||||
@ -756,6 +886,11 @@ void GraphManager::BlockUntilStop() {
|
||||
}
|
||||
|
||||
bool GraphManager::ReloadFromFile(const std::string& path, std::string& err) {
|
||||
if (config_path_.empty()) {
|
||||
config_path_ = path;
|
||||
last_good_path_ = path + ".last_good.json";
|
||||
}
|
||||
|
||||
SimpleJson root_cfg;
|
||||
if (!LoadConfigFile(path, root_cfg, err)) {
|
||||
return false;
|
||||
@ -766,6 +901,10 @@ bool GraphManager::ReloadFromFile(const std::string& path, std::string& err) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ValidateExpandedRootConfig(expanded, err)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto graphs_it = expanded.AsObject().find("graphs");
|
||||
if (graphs_it == expanded.AsObject().end() || !graphs_it->second.IsArray()) {
|
||||
err = "Root config missing 'graphs' array";
|
||||
@ -1009,9 +1148,75 @@ bool GraphManager::ReloadFromFile(const std::string& path, std::string& err) {
|
||||
last_good_root_ = expanded;
|
||||
default_queue_size_ = new_default_queue_size;
|
||||
default_strategy_ = new_default_strategy;
|
||||
|
||||
if (!last_good_path_.empty()) {
|
||||
std::string werr;
|
||||
if (!WriteTextFileAtomic(last_good_path_, StringifySimpleJson(last_good_root_), werr)) {
|
||||
LogWarn("[GraphManager] persist last_good failed: " + werr);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GraphManager::RollbackFromLastGood(std::string& err) {
|
||||
err.clear();
|
||||
if (config_path_.empty()) {
|
||||
err = "config_path not set";
|
||||
return false;
|
||||
}
|
||||
if (last_good_path_.empty()) {
|
||||
err = "last_good_path not set";
|
||||
return false;
|
||||
}
|
||||
|
||||
SimpleJson last_good;
|
||||
if (!LoadConfigFile(last_good_path_, last_good, err)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string werr;
|
||||
if (!WriteTextFileAtomic(config_path_, StringifySimpleJson(last_good), werr)) {
|
||||
err = "failed to write rollback config: " + werr;
|
||||
return false;
|
||||
}
|
||||
|
||||
return ReloadFromFile(config_path_, err);
|
||||
}
|
||||
|
||||
bool GraphManager::UpdateNodeConfig(const std::string& node_id, const std::optional<std::string>& graph,
|
||||
const SimpleJson& new_node_cfg, std::string& err) {
|
||||
std::lock_guard<std::mutex> lock(graphs_mu_);
|
||||
if (graph && !graph->empty()) {
|
||||
for (const auto& g : graphs_) {
|
||||
if (!g || g->Name() != *graph) continue;
|
||||
return g->UpdateNodeConfig(node_id, new_node_cfg, err);
|
||||
}
|
||||
err = "graph not found: " + *graph;
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t hits = 0;
|
||||
Graph* found = nullptr;
|
||||
for (const auto& g : graphs_) {
|
||||
if (!g) continue;
|
||||
NodeSnapshot tmp;
|
||||
if (g->FindNodeSnapshotById(node_id, tmp)) {
|
||||
found = g.get();
|
||||
++hits;
|
||||
if (hits > 1) break;
|
||||
}
|
||||
}
|
||||
if (hits == 0 || !found) {
|
||||
err = "node not found: " + node_id;
|
||||
return false;
|
||||
}
|
||||
if (hits > 1) {
|
||||
err = "node id not unique, specify ?graph=<name>";
|
||||
return false;
|
||||
}
|
||||
return found->UpdateNodeConfig(node_id, new_node_cfg, err);
|
||||
}
|
||||
|
||||
std::vector<GraphSnapshot> GraphManager::ListGraphSnapshots() {
|
||||
std::vector<GraphSnapshot> out;
|
||||
std::lock_guard<std::mutex> lock(graphs_mu_);
|
||||
|
||||
@ -9,6 +9,10 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "utils/logger.h"
|
||||
#include "utils/simple_json.h"
|
||||
#include "utils/simple_json_writer.h"
|
||||
|
||||
#if defined(_WIN32)
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
@ -168,6 +172,8 @@ static std::string GraphSnapshotJson(const GraphSnapshot& g) {
|
||||
oss << "\"running\":" << (g.running ? "true" : "false") << ',';
|
||||
oss << "\"timestamp_ms\":" << g.timestamp_ms << ',';
|
||||
oss << "\"total_fps\":" << g.total_fps << ',';
|
||||
oss << "\"alarm_total\":" << g.alarm_total << ',';
|
||||
oss << "\"publish_clients\":" << g.publish_clients << ',';
|
||||
|
||||
oss << "\"nodes\":[";
|
||||
for (size_t i = 0; i < g.nodes.size(); ++i) {
|
||||
@ -185,7 +191,8 @@ static std::string GraphSnapshotJson(const GraphSnapshot& g) {
|
||||
oss << "\"drop_total\":" << n.drop_total << ',';
|
||||
oss << "\"error_total\":" << n.error_total << ',';
|
||||
oss << "\"avg_process_time_ms\":" << n.avg_process_time_ms << ',';
|
||||
oss << "\"input_queue\":" << QueueJson(n.input_queue);
|
||||
oss << "\"input_queue\":" << QueueJson(n.input_queue) << ',';
|
||||
oss << "\"custom_metrics\":" << StringifySimpleJson(n.custom_metrics);
|
||||
oss << "}";
|
||||
}
|
||||
oss << "],";
|
||||
@ -220,7 +227,8 @@ static std::string NodeSnapshotJson(const NodeSnapshot& n) {
|
||||
oss << "\"drop_total\":" << n.drop_total << ',';
|
||||
oss << "\"error_total\":" << n.error_total << ',';
|
||||
oss << "\"avg_process_time_ms\":" << n.avg_process_time_ms << ',';
|
||||
oss << "\"input_queue\":" << QueueJson(n.input_queue);
|
||||
oss << "\"input_queue\":" << QueueJson(n.input_queue) << ',';
|
||||
oss << "\"custom_metrics\":" << StringifySimpleJson(n.custom_metrics);
|
||||
oss << "}";
|
||||
return oss.str();
|
||||
}
|
||||
@ -470,60 +478,149 @@ void HttpServer::ServerLoop() {
|
||||
|
||||
// Dispatch
|
||||
if (req.path.rfind("/api/", 0) == 0) {
|
||||
if (req.method != "GET") {
|
||||
resp.status = 405;
|
||||
resp.body = ErrorJson("method not allowed");
|
||||
} else if (req.path == "/api/graphs") {
|
||||
auto snaps = gm_.ListGraphSnapshots();
|
||||
std::ostringstream oss;
|
||||
oss << "[";
|
||||
for (size_t i = 0; i < snaps.size(); ++i) {
|
||||
if (i) oss << ',';
|
||||
auto OkJson = [] { return std::string("{\"ok\":true}"); };
|
||||
|
||||
if (req.method == "GET") {
|
||||
if (req.path == "/api/graphs") {
|
||||
auto snaps = gm_.ListGraphSnapshots();
|
||||
std::ostringstream oss;
|
||||
oss << "[";
|
||||
for (size_t i = 0; i < snaps.size(); ++i) {
|
||||
if (i) oss << ',';
|
||||
oss << "{";
|
||||
oss << "\"name\":\"" << JsonEscape(snaps[i].name) << "\",";
|
||||
oss << "\"running\":" << (snaps[i].running ? "true" : "false") << ',';
|
||||
oss << "\"total_fps\":" << snaps[i].total_fps << ',';
|
||||
oss << "\"alarm_total\":" << snaps[i].alarm_total << ',';
|
||||
oss << "\"publish_clients\":" << snaps[i].publish_clients;
|
||||
oss << "}";
|
||||
}
|
||||
oss << "]";
|
||||
resp.body = oss.str();
|
||||
} else if (req.path.rfind("/api/graphs/", 0) == 0) {
|
||||
std::string name = req.path.substr(std::string("/api/graphs/").size());
|
||||
GraphSnapshot gs;
|
||||
std::string gerr;
|
||||
if (!gm_.GetGraphSnapshot(name, gs, gerr)) {
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson(gerr);
|
||||
} else {
|
||||
resp.body = GraphSnapshotJson(gs);
|
||||
}
|
||||
} else if (req.path == "/api/logs/recent") {
|
||||
auto q = ParseQuery(req.query);
|
||||
size_t limit = 200;
|
||||
if (auto it = q.find("limit"); it != q.end()) {
|
||||
try {
|
||||
limit = static_cast<size_t>(std::stoul(it->second));
|
||||
} catch (...) {
|
||||
limit = 200;
|
||||
}
|
||||
}
|
||||
auto lines = Logger::Instance().RecentLines(limit);
|
||||
std::ostringstream oss;
|
||||
oss << "{";
|
||||
oss << "\"name\":\"" << JsonEscape(snaps[i].name) << "\",";
|
||||
oss << "\"running\":" << (snaps[i].running ? "true" : "false") << ',';
|
||||
oss << "\"total_fps\":" << snaps[i].total_fps;
|
||||
oss << "}";
|
||||
}
|
||||
oss << "]";
|
||||
resp.body = oss.str();
|
||||
} else if (req.path.rfind("/api/graphs/", 0) == 0) {
|
||||
std::string name = req.path.substr(std::string("/api/graphs/").size());
|
||||
GraphSnapshot gs;
|
||||
std::string gerr;
|
||||
if (!gm_.GetGraphSnapshot(name, gs, gerr)) {
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson(gerr);
|
||||
oss << "\"lines\":[";
|
||||
for (size_t i = 0; i < lines.size(); ++i) {
|
||||
if (i) oss << ',';
|
||||
oss << "\"" << JsonEscape(lines[i]) << "\"";
|
||||
}
|
||||
oss << "]}";
|
||||
resp.body = oss.str();
|
||||
} else if (req.path.rfind("/api/nodes/", 0) == 0) {
|
||||
// /api/nodes/{id}/metrics
|
||||
const std::string prefix = "/api/nodes/";
|
||||
const std::string suffix = "/metrics";
|
||||
auto pos = req.path.rfind(suffix);
|
||||
if (pos == std::string::npos || pos + suffix.size() != req.path.size() || pos <= prefix.size()) {
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson("not found");
|
||||
} else {
|
||||
std::string node_id = req.path.substr(prefix.size(), pos - prefix.size());
|
||||
auto q = ParseQuery(req.query);
|
||||
std::optional<std::string> graph;
|
||||
if (auto it = q.find("graph"); it != q.end() && !it->second.empty()) {
|
||||
graph = it->second;
|
||||
}
|
||||
NodeSnapshot ns;
|
||||
std::string nerr;
|
||||
if (!gm_.GetNodeSnapshot(node_id, graph, ns, nerr)) {
|
||||
resp.status = (nerr.find("not unique") != std::string::npos) ? 409 : 404;
|
||||
resp.body = ErrorJson(nerr);
|
||||
} else {
|
||||
resp.body = NodeSnapshotJson(ns);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
resp.body = GraphSnapshotJson(gs);
|
||||
}
|
||||
} else if (req.path.rfind("/api/nodes/", 0) == 0) {
|
||||
// /api/nodes/{id}/metrics
|
||||
const std::string prefix = "/api/nodes/";
|
||||
const std::string suffix = "/metrics";
|
||||
auto pos = req.path.rfind(suffix);
|
||||
if (pos == std::string::npos || pos + suffix.size() != req.path.size() || pos <= prefix.size()) {
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson("not found");
|
||||
} else {
|
||||
std::string node_id = req.path.substr(prefix.size(), pos - prefix.size());
|
||||
auto q = ParseQuery(req.query);
|
||||
std::optional<std::string> graph;
|
||||
if (auto it = q.find("graph"); it != q.end() && !it->second.empty()) {
|
||||
graph = it->second;
|
||||
}
|
||||
NodeSnapshot ns;
|
||||
std::string nerr;
|
||||
if (!gm_.GetNodeSnapshot(node_id, graph, ns, nerr)) {
|
||||
resp.status = (nerr.find("not unique") != std::string::npos) ? 409 : 404;
|
||||
resp.body = ErrorJson(nerr);
|
||||
} else if (req.method == "POST") {
|
||||
if (req.path == "/api/config/reload") {
|
||||
if (gm_.ConfigPath().empty()) {
|
||||
resp.status = 500;
|
||||
resp.body = ErrorJson("config_path not set");
|
||||
} else {
|
||||
std::string rerr;
|
||||
if (!gm_.ReloadFromFile(gm_.ConfigPath(), rerr)) {
|
||||
resp.status = 500;
|
||||
resp.body = ErrorJson(rerr);
|
||||
} else {
|
||||
resp.body = OkJson();
|
||||
}
|
||||
}
|
||||
} else if (req.path == "/api/config/rollback") {
|
||||
std::string rerr;
|
||||
if (!gm_.RollbackFromLastGood(rerr)) {
|
||||
resp.status = 500;
|
||||
resp.body = ErrorJson(rerr);
|
||||
} else {
|
||||
resp.body = OkJson();
|
||||
}
|
||||
} else if (req.path.rfind("/api/nodes/", 0) == 0) {
|
||||
// /api/nodes/{id}/config
|
||||
const std::string prefix = "/api/nodes/";
|
||||
const std::string suffix = "/config";
|
||||
auto pos = req.path.rfind(suffix);
|
||||
if (pos == std::string::npos || pos + suffix.size() != req.path.size() || pos <= prefix.size()) {
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson("not found");
|
||||
} else {
|
||||
std::string node_id = req.path.substr(prefix.size(), pos - prefix.size());
|
||||
auto q = ParseQuery(req.query);
|
||||
std::optional<std::string> graph;
|
||||
if (auto it = q.find("graph"); it != q.end() && !it->second.empty()) {
|
||||
graph = it->second;
|
||||
}
|
||||
if (req.body.empty()) {
|
||||
resp.status = 400;
|
||||
resp.body = ErrorJson("empty body");
|
||||
} else {
|
||||
SimpleJson body;
|
||||
std::string jerr;
|
||||
if (!ParseSimpleJson(req.body, body, jerr)) {
|
||||
resp.status = 400;
|
||||
resp.body = ErrorJson(jerr);
|
||||
} else {
|
||||
std::string uerr;
|
||||
if (!gm_.UpdateNodeConfig(node_id, graph, body, uerr)) {
|
||||
if (uerr.find("not unique") != std::string::npos) resp.status = 409;
|
||||
else if (uerr.find("not found") != std::string::npos) resp.status = 404;
|
||||
else resp.status = 400;
|
||||
resp.body = ErrorJson(uerr);
|
||||
} else {
|
||||
resp.body = OkJson();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
resp.body = NodeSnapshotJson(ns);
|
||||
}
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson("not found");
|
||||
}
|
||||
} else {
|
||||
resp.status = 404;
|
||||
resp.body = ErrorJson("not found");
|
||||
resp.status = 405;
|
||||
resp.body = ErrorJson("method not allowed");
|
||||
}
|
||||
} else {
|
||||
std::string path = req.path;
|
||||
|
||||
@ -189,7 +189,7 @@ int MediaServerApp::Start() {
|
||||
web_root = g->ValueOr<std::string>("web_root", web_root);
|
||||
}
|
||||
|
||||
if (!graph_manager_.Build(root_cfg, err)) {
|
||||
if (!graph_manager_.BuildFromFile(config_path_, err)) {
|
||||
std::cerr << "[MediaServerApp] Failed to build graphs: " << err << "\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user