Add config template rendering workflow

This commit is contained in:
tian 2026-04-18 11:56:48 +08:00
parent ecc6937746
commit a46c319d83
12 changed files with 1027 additions and 4 deletions

2
configs/generated/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -0,0 +1,24 @@
{
"description": "Enable verbose face recognition and unknown-candidate diagnostics for test runs.",
"instance_overrides": {
"*": {
"override": {
"nodes": {
"face_recog": {
"debug": {
"enabled": true,
"log_matches": true,
"min_log_interval_ms": 0
}
},
"alarm": {
"face_debug": {
"log_unknown_candidates": true,
"unknown_candidate_interval_ms": 0
}
}
}
}
}
}
}

View File

@ -0,0 +1,39 @@
{
"description": "Disable verbose recognition, unknown-candidate, and shoe debug logs for normal operation.",
"instance_overrides": {
"*": {
"override": {
"nodes": {
"face_recog": {
"debug": {
"enabled": false,
"log_matches": false
}
},
"alarm": {
"face_debug": {
"log_unknown_candidates": false
}
},
"person_det": {
"debug": {
"stats": false,
"detections": false
}
},
"face_det": {
"debug": {
"stats": false
}
},
"shoe_assoc": {
"debug": false
},
"shoe_color": {
"debug": false
}
}
}
}
}
}

View File

@ -0,0 +1,17 @@
{
"description": "Enable shoe association and color-check debug logs for test runs.",
"instance_overrides": {
"*": {
"override": {
"nodes": {
"shoe_assoc": {
"debug": true
},
"shoe_color": {
"debug": true
}
}
}
}
}
}

View File

@ -0,0 +1,26 @@
{
"name": "local_3588_test",
"description": "Local RK3588 test profile used during workshop face and shoe alarm validation.",
"queue": {
"size": 8,
"strategy": "drop_oldest"
},
"instances": [
{
"name": "cam1",
"template": "workshop_face_shoe_alarm",
"params": {
"rtsp_url": "rtsp://10.0.0.49:8554/cam",
"rga_gate": "full_pipeline_1080p",
"face_gallery_path": "./models/face_gallery.db",
"minio_endpoint": "http://10.0.0.49:9000",
"minio_bucket": "myminio",
"minio_access_key": "admin",
"minio_secret_key": "password",
"external_get_token_url": "http://10.0.0.49:8080/api/getToken",
"external_put_message_url": "http://10.0.0.49:8080/api/putMessage",
"tenant_code": "32"
}
}
]
}

View File

@ -0,0 +1,539 @@
{
"name": "workshop_face_shoe_alarm",
"description": "Full 1080p workshop pipeline with face detection/recognition, person tracking, shoe detection, OSD, publishing, alarms, snapshots, clips, and external API upload.",
"source": "configs/full_pipeline_1080p_test_alarm.json",
"template": {
"executor": {
"batch_size": 2,
"run_budget": 8
},
"nodes": [
{
"id": "in",
"type": "input_rtsp",
"role": "source",
"enable": true,
"url": "${rtsp_url}",
"fps": 30,
"width": 1920,
"height": 1080,
"use_ffmpeg": true,
"use_mpp": false,
"force_tcp": true,
"reconnect_sec": 5,
"reconnect_backoff_max_sec": 30
},
{
"id": "pre_rgb",
"type": "preprocess",
"role": "filter",
"enable": true,
"cpu_affinity": [
2
],
"dst_w": 1920,
"dst_h": 1080,
"dst_format": "rgb",
"dst_packed": true,
"resize_mode": "stretch",
"keep_ratio": false,
"rga_gate": "${rga_gate}",
"use_rga": true
},
{
"id": "face_det",
"type": "ai_scrfd_sliding",
"role": "filter",
"enable": true,
"cpu_affinity": [
4
],
"infer_fps": 3,
"model_path": "./models/face_det_scrfd_500m_640_rk3588.rknn",
"model_w": 640,
"model_h": 640,
"windows": [
{
"x": 0,
"y": 0,
"w": 960,
"h": 1080
},
{
"x": 960,
"y": 0,
"w": 960,
"h": 1080
}
],
"conf_thresh": 0.5,
"nms_thresh": 0.4,
"max_faces": 50,
"debug": {
"stats": true,
"stats_interval": 30
}
},
{
"id": "face_recog",
"type": "ai_face_recog",
"role": "filter",
"enable": true,
"cpu_affinity": [
4
],
"infer_fps": 2,
"infer_phase_ms": 120,
"model_path": "./models/face_recog_mobilefacenet_arcface_112_rk3588.rknn",
"align": true,
"emit_embedding": false,
"max_faces": 50,
"person_class_id": 0,
"track_state_max_age_ms": 1000,
"input_format": "rgb",
"input_dtype": "uint8",
"threshold": {
"accept": 0.45,
"margin": 0.05
},
"gallery": {
"backend": "sqlite",
"path": "${face_gallery_path}",
"load_on_start": true,
"dtype": "auto"
},
"debug": {
"enabled": true,
"log_matches": true,
"min_log_interval_ms": 0
}
},
{
"id": "person_det",
"type": "ai_yolo",
"role": "filter",
"enable": true,
"cpu_affinity": [
5
],
"use_rga": true,
"rga_gate": "${rga_gate}",
"rga_max_inflight": 4,
"dst_packed": true,
"use_dma_input": true,
"infer_fps": 2,
"infer_phase_ms": 0,
"model_path": "./models/object_det_yolov8n_coco_640_rk3588.rknn",
"model_version": "v8",
"model_w": 640,
"model_h": 640,
"num_classes": 80,
"conf": 0.35,
"nms": 0.45,
"class_filter": [
0
],
"bbox_expand": {
"enable": true,
"class_id": 0,
"left": 0.06,
"right": 0.06,
"top": 0.04,
"bottom": 0.16
},
"debug": {
"stats": true,
"stats_interval": 30,
"detections": true
}
},
{
"id": "person_trk",
"type": "tracker",
"role": "filter",
"enable": true,
"cpu_affinity": [
5
],
"mode": "bytetrack_lite",
"per_class": true,
"track_classes": [
0
],
"ignore_classes": [],
"high_th": 0.55,
"low_th": 0.1,
"iou_th": 0.3,
"max_age_ms": 900,
"min_hits": 1,
"max_tracks": 128
},
{
"id": "shoe_det",
"type": "ai_shoe_det",
"role": "filter",
"enable": true,
"cpu_affinity": [
6
],
"use_rga": true,
"rga_gate": "${rga_gate}",
"rga_max_inflight": 4,
"dst_packed": true,
"use_dma_input": false,
"infer_fps": 2,
"infer_phase_ms": 150,
"model_path": "./models/shoe_det_yolov8s_workshoe_640_rk3588.rknn",
"model_w": 640,
"model_h": 640,
"conf": 0.22,
"nms": 0.45,
"v8_box_format": "cxcywh",
"append_detections": true,
"dynamic_roi": {
"enable": true,
"person_class_id": 0,
"shoe_class_id": 1,
"debug_roi_class_id": -1,
"max_rois": 3,
"min_person_height": 60,
"max_box_area_ratio": 0.6,
"y_offset": 0.7,
"width_scale": 1.6,
"height_scale": 0.4
}
},
{
"id": "shoe_assoc",
"type": "logic_gate",
"role": "filter",
"enable": true,
"cpu_affinity": [
6
],
"mode": "person_shoe_check",
"debug": false,
"person_shoe_check": {
"person_class": 0,
"shoe_class": 1,
"violation_class": 2,
"min_person_score": 0.3,
"min_shoe_score": 0.22,
"foot_region": {
"y_offset": 0.7,
"width_scale": 1.6,
"height_scale": 0.4
},
"min_shoe_height_ratio": 0.08,
"min_shoe_area_ratio": 0.012,
"max_shoe_height_ratio": 0.14,
"max_shoe_width_ratio": 0.38,
"max_shoe_area_ratio": 0.05,
"max_shoe_roi_width_ratio": 0.45,
"max_shoe_roi_height_ratio": 0.35,
"max_shoe_roi_area_ratio": 0.1
}
},
{
"id": "shoe_color",
"type": "logic_gate",
"role": "filter",
"enable": true,
"cpu_affinity": [
6
],
"mode": "ppe_boots_check",
"anchor_class": 0,
"boots_class": 1,
"violation_class": 2,
"debug": false,
"color_check": {
"enable": true
}
},
{
"id": "pre_osd",
"type": "preprocess",
"role": "filter",
"enable": true,
"cpu_affinity": [
7
],
"dst_w": 1920,
"dst_h": 1080,
"dst_format": "nv12",
"resize_mode": "stretch",
"rga_gate": "${rga_gate}",
"use_rga": true
},
{
"id": "osd",
"type": "osd",
"role": "filter",
"enable": true,
"cpu_affinity": [
7
],
"draw_bbox": true,
"draw_text": true,
"draw_face_det": true,
"draw_face_recog": true,
"draw_face_bbox": true,
"line_width": 2.0,
"font_scale": 1.0,
"use_rga_bbox": false,
"labels": [
"person",
"shoe",
"non_black_shoe"
]
},
{
"id": "publish",
"type": "publish",
"role": "filter",
"enable": true,
"cpu_affinity": [
3
],
"queue": {
"size": 2,
"policy": "drop_oldest"
},
"codec": "h264",
"fps": 30,
"gop": 60,
"bitrate_kbps": 4000,
"mpp_output_timeout_ms": 50,
"mpp_packet_wait_ms": 10,
"use_mpp": true,
"use_ffmpeg_mux": true,
"outputs": [
{
"proto": "hls",
"path": "./web/hls//index.m3u8",
"segment_sec": 2
},
{
"proto": "rtsp_server",
"port": 8555,
"path": "/live/"
}
]
},
{
"id": "alarm",
"type": "alarm",
"role": "sink",
"enable": true,
"eval_fps": 2,
"labels": [
"person",
"shoe",
"non_black_shoe"
],
"rules": [
{
"name": "non_compliant_workshoe",
"class_ids": [
2
],
"roi": {
"x": 0.0,
"y": 0.0,
"w": 1.0,
"h": 1.0
},
"min_score": 0.1,
"min_box_area_ratio": 0.0,
"require_track_id": false,
"min_duration_ms": 0,
"min_hits": 1,
"hit_window_ms": 3000,
"cooldown_ms": 1000,
"per_track_cooldown_ms": 0
}
],
"face_track_aggregation": {
"known": {
"min_hits": 1,
"hit_window_ms": 3000,
"reentry_cooldown_ms": 8000
},
"unknown": {
"min_track_age_ms": 1500,
"min_quality_hits": 4
}
},
"face_debug": {
"log_unknown_candidates": true,
"unknown_candidate_interval_ms": 0
},
"face_rules": [
{
"name": "unknown_face",
"type": "unknown",
"cooldown_ms": 7000,
"max_known_sim": 0.35,
"min_hits": 1,
"hit_window_ms": 1500,
"min_face_area_ratio": 0.001,
"min_face_aspect": 0.6,
"max_face_aspect": 1.6
},
{
"name": "known_person",
"type": "person",
"cooldown_ms": 7000,
"min_sim": 0.45,
"min_hits": 1,
"hit_window_ms": 1500,
"min_face_area_ratio": 0.0002,
"min_face_aspect": 0.55,
"max_face_aspect": 1.6
}
],
"actions": {
"log": {
"enable": true,
"level": "info",
"include_detections": true,
"min_interval_ms": 2000
},
"snapshot": {
"enable": true,
"format": "jpg",
"quality": 85,
"upload": {
"type": "minio",
"endpoint": "${minio_endpoint}",
"bucket": "${minio_bucket}",
"region": "us-east-1",
"access_key": "${minio_access_key}",
"secret_key": "${minio_secret_key}"
}
},
"clip": {
"enable": true,
"pre_sec": 5,
"post_sec": 10,
"format": "mp4",
"fps": 30,
"upload": {
"type": "minio",
"endpoint": "${minio_endpoint}",
"bucket": "${minio_bucket}",
"region": "us-east-1",
"access_key": "${minio_access_key}",
"secret_key": "${minio_secret_key}"
}
},
"external_api": {
"enable": true,
"getTokenUrl": "${external_get_token_url}",
"putMessageUrl": "${external_put_message_url}",
"tenantCode": "${tenant_code}",
"channelNo": "${name}",
"timeout_ms": 3000,
"include_media_url": true,
"token_header": "X-Access-Token",
"token_json_path": "responseBody.token",
"token_cache_sec": 1200
}
}
}
],
"edges": [
[
"in",
"pre_rgb"
],
[
"pre_rgb",
"face_det"
],
[
"face_det",
"person_det"
],
[
"person_det",
"person_trk"
],
[
"person_trk",
"face_recog"
],
[
"face_recog",
"shoe_det",
{
"queue": {
"size": 16,
"strategy": "drop_oldest"
}
}
],
[
"shoe_det",
"shoe_assoc",
{
"queue": {
"size": 16,
"strategy": "drop_oldest"
}
}
],
[
"shoe_assoc",
"shoe_color",
{
"queue": {
"size": 16,
"strategy": "drop_oldest"
}
}
],
[
"shoe_color",
"osd",
{
"queue": {
"size": 16,
"strategy": "drop_oldest"
}
}
],
[
"osd",
"pre_osd",
{
"queue": {
"size": 32,
"strategy": "drop_oldest"
}
}
],
[
"pre_osd",
"publish",
{
"queue": {
"size": 64,
"strategy": "drop_oldest"
}
}
],
[
"publish",
"alarm",
{
"queue": {
"size": 64,
"strategy": "drop_oldest"
}
}
]
]
}
}

View File

@ -657,7 +657,38 @@ python tools/analyze_face_recog_log.py .\logs\media-server_latest.log
---
## 6. 调参顺序
## 6. 模板化配置
推荐以模板作为运维核心资产,避免为每台 RK3588 或每次调参复制完整配置。
当前标准模板来自 `configs/full_pipeline_1080p_test_alarm.json`包含人脸、人体、鞋子、OSD、发布、告警和上传链路
```bash
python tools/render_config.py \
--template configs/templates/workshop_face_shoe_alarm.json \
--profile configs/profiles/local_3588_test.json \
--overlay configs/overlays/face_debug.json \
--out configs/generated/local_3588_face_debug.json
```
生成后的配置运行方式:
```bash
./build/media-server -c configs/generated/local_3588_face_debug.json
```
目录约定:
| 目录 | 用途 |
|------|------|
| `configs/templates/` | 长期维护的 pipeline 模板 |
| `configs/profiles/` | 设备、现场、摄像头差异参数 |
| `configs/overlays/` | 测试或运行场景覆盖,例如 debug、阈值、频率 |
| `configs/generated/` | 渲染产物,不手工维护,不提交生成的 JSON |
---
## 7. 调参顺序
建议按下面顺序调,不要同时乱改:
@ -670,7 +701,7 @@ python tools/analyze_face_recog_log.py .\logs\media-server_latest.log
4. `alarm.min_hits / min_duration_ms / cooldown_ms`
目标:把“会报警”收敛成“稳一点再报警”
### 6.1 实施人员重点参数
### 7.1 实施人员重点参数
实施时优先看下面这些参数,不建议一开始改其它项。
@ -696,7 +727,7 @@ python tools/analyze_face_recog_log.py .\logs\media-server_latest.log
| `alarm` | `min_duration_ms` | 控制要稳定多久才报警 | 更稳,但慢一点 | 更灵敏,但更容易闪报 |
| `alarm` | `cooldown_ms` | 控制两次告警间隔 | 减少重复告警 | 同一事件会更频繁重复报 |
### 6.2 推荐调参动作
### 7.2 推荐调参动作
现场遇到问题时,优先按下面方式处理:
@ -744,7 +775,7 @@ python tools/analyze_face_recog_log.py .\logs\media-server_latest.log
---
## 7. 常见问题
## 8. 常见问题
### Q1: 为什么鞋子检测阈值只有 0.22

View File

@ -0,0 +1,73 @@
# 配置模板化与渲染设计
## 目标
以模板为核心管理 RK3588 设备配置,避免为了设备差异、测试参数或临时调参复制完整配置文件。
第一阶段采用离线渲染方式:模板、设备 profile、测试 overlay 合成为一个 root config再由现有 `media-server -c` 加载。这样不改变运行入口,也能复用已有 `templates / instances` 展开逻辑。
## 目录约定
```text
configs/
templates/ # 长期维护的 pipeline 模板
profiles/ # 设备、现场、摄像头差异参数
overlays/ # 测试或运行场景覆盖
generated/ # 渲染产物,不手工维护
```
## 当前标准模板
`configs/templates/workshop_face_shoe_alarm.json``configs/full_pipeline_1080p_test_alarm.json` 提炼而来,包含当前最完整链路:
- RTSP 输入
- RGB 预处理
- SCRFD 滑窗人脸检测
- MobileFaceNet 人脸识别
- YOLOv8n 人体检测
- 人体跟踪
- 动态 ROI 鞋检测
- 鞋人关联和颜色判断
- OSD
- HLS/RTSP 发布
- 告警、截图、录像、MinIO 上传、External API 上传
模板中只保留 DAG 和插件结构,设备差异通过占位符表达,例如 `${rtsp_url}`、`${face_gallery_path}`、`${minio_endpoint}`、`${external_get_token_url}`。
## Profile
`configs/profiles/local_3588_test.json` 描述具体设备或测试盒子的参数。多台设备或多路相机应新增 profile 或在同一 profile 中新增 instances而不是复制完整 pipeline。
## Overlay
Overlay 用于测试或运行场景覆盖:
- `configs/overlays/face_debug.json`:打开人脸识别和陌生人候选诊断日志。
- `configs/overlays/shoe_debug.json`:打开鞋子关联和颜色判断 debug。
- `configs/overlays/production_quiet.json`:关闭高频 debug 日志,适合正式运行。
Overlay 支持 `instance_overrides."*"`,可以一次覆盖所有 instance也可以使用具体 instance 名只覆盖单路相机。
## 渲染命令
```bash
python tools/render_config.py \
--template configs/templates/workshop_face_shoe_alarm.json \
--profile configs/profiles/local_3588_test.json \
--overlay configs/overlays/face_debug.json \
--out configs/generated/local_3588_face_debug.json
```
设备运行:
```bash
./build/media-server -c configs/generated/local_3588_face_debug.json
```
`configs/generated/*.json` 是生成物,不纳入 git不应手工修改。
## 运行时展开
渲染工具输出的是包含 `templates / instances` 的 root config。`media-server` 加载时继续使用现有 `ExpandRootConfig` 展开成 `graphs`
本次补充了 template `executor` 保留逻辑,确保从完整配置提炼模板时不会丢失 graph executor 参数。

View File

@ -219,6 +219,9 @@ bool ExpandInstances(const SimpleJson& in_root, SimpleJson::Array& out_graphs, s
SimpleJson::Object graph;
graph.emplace("name", SimpleJson(inst_name));
if (const SimpleJson* executor = tpl.Find("executor")) {
graph.emplace("executor", ReplacePlaceholders(*executor, vars));
}
graph.emplace("nodes", SimpleJson(std::move(inst_nodes)));
graph.emplace("edges", SimpleJson(std::move(inst_edges)));
out_graphs.emplace_back(SimpleJson(std::move(graph)));

View File

@ -227,6 +227,41 @@ TEST(ConfigExpandTest, QueueConfigPreserved) {
EXPECT_EQ(queue->ValueOr<std::string>("strategy", ""), "drop_newest");
}
TEST(ConfigExpandTest, TemplateExecutorPreserved) {
const char* json = R"({
"templates": {
"t": {
"executor": {"batch_size": 2, "run_budget": "${budget}"},
"nodes": [{"id": "n", "type": "test", "role": "source"}],
"edges": []
}
},
"instances": [
{
"name": "cam1",
"template": "t",
"params": {"budget": 8}
}
]
})";
SimpleJson root;
std::string err;
EXPECT_TRUE(ParseSimpleJson(json, root, err));
SimpleJson expanded;
EXPECT_TRUE(ExpandRootConfig(root, expanded, err));
const SimpleJson* graphs = expanded.Find("graphs");
ASSERT_NE(graphs, nullptr);
ASSERT_EQ(graphs->AsArray().size(), 1u);
const SimpleJson* executor = graphs->AsArray()[0].Find("executor");
ASSERT_NE(executor, nullptr);
EXPECT_EQ(executor->ValueOr<int>("batch_size", 0), 2);
EXPECT_EQ(executor->ValueOr<std::string>("run_budget", ""), "8");
}
TEST(ConfigExpandTest, GlobalConfigPreserved) {
const char* json = R"({
"global": {"metrics_port": 8080, "log_level": "debug"},

View File

@ -0,0 +1,70 @@
import importlib.util
import pathlib
import sys
import unittest
REPO_ROOT = pathlib.Path(__file__).resolve().parents[1]
SCRIPT_PATH = REPO_ROOT / "tools" / "render_config.py"
def load_module():
spec = importlib.util.spec_from_file_location("render_config", SCRIPT_PATH)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
return module
class RenderConfigTest(unittest.TestCase):
def test_renders_profile_and_overlay(self):
module = load_module()
template = {
"name": "pipeline",
"template": {
"executor": {"batch_size": 2},
"nodes": [
{"id": "in", "type": "input_rtsp", "url": "${rtsp_url}"},
{"id": "face_recog", "type": "ai_face_recog", "debug": {"enabled": False}},
],
"edges": [["in", "face_recog"]],
},
}
profile = {
"queue": {"size": 8},
"instances": [
{
"name": "cam1",
"params": {"rtsp_url": "rtsp://example/cam1"},
}
],
}
overlay = {
"instance_overrides": {
"*": {
"override": {
"nodes": {
"face_recog": {"debug": {"enabled": True, "log_matches": True}}
}
}
}
}
}
root = {
"templates": {module.template_name(template, pathlib.Path("x.json")): module.template_body(template)},
"instances": module.profile_instances(profile, "pipeline"),
"queue": profile["queue"],
}
rendered = module.apply_overlay(root, overlay)
self.assertEqual(rendered["queue"]["size"], 8)
self.assertEqual(rendered["instances"][0]["template"], "pipeline")
self.assertEqual(rendered["instances"][0]["params"]["rtsp_url"], "rtsp://example/cam1")
node_override = rendered["instances"][0]["override"]["nodes"]["face_recog"]
self.assertTrue(node_override["debug"]["enabled"])
self.assertTrue(node_override["debug"]["log_matches"])
if __name__ == "__main__":
unittest.main()

164
tools/render_config.py Normal file
View File

@ -0,0 +1,164 @@
#!/usr/bin/env python3
"""Render media-server template/profile/overlay config into one root config."""
from __future__ import annotations
import argparse
import copy
import json
from pathlib import Path
from typing import Any
JsonObject = dict[str, Any]
def deep_merge(base: Any, override: Any) -> Any:
if isinstance(base, dict) and isinstance(override, dict):
merged = copy.deepcopy(base)
for key, value in override.items():
if key in merged:
merged[key] = deep_merge(merged[key], value)
else:
merged[key] = copy.deepcopy(value)
return merged
return copy.deepcopy(override)
def load_json(path: Path) -> JsonObject:
with path.open("r", encoding="utf-8-sig") as f:
data = json.load(f)
if not isinstance(data, dict):
raise ValueError(f"{path}: root must be a JSON object")
return data
def template_name(template_doc: JsonObject, template_path: Path) -> str:
name = str(template_doc.get("name") or template_path.stem).strip()
if not name:
raise ValueError(f"{template_path}: template name is empty")
return name
def template_body(template_doc: JsonObject) -> JsonObject:
body = template_doc.get("template", template_doc)
if not isinstance(body, dict):
raise ValueError("template body must be a JSON object")
if not isinstance(body.get("nodes"), list) or not isinstance(body.get("edges"), list):
raise ValueError("template body must contain nodes[] and edges[]")
allowed = {"executor", "nodes", "edges"}
return {key: copy.deepcopy(value) for key, value in body.items() if key in allowed}
def profile_instances(profile: JsonObject, tpl_name: str) -> list[JsonObject]:
if "instances" in profile:
instances = profile["instances"]
if not isinstance(instances, list):
raise ValueError("profile.instances must be an array")
out = []
for item in instances:
if not isinstance(item, dict):
raise ValueError("profile.instances entries must be objects")
inst = copy.deepcopy(item)
inst.setdefault("template", tpl_name)
out.append(inst)
return out
name = str(profile.get("name", "")).strip()
if not name:
raise ValueError("profile must contain name or instances[]")
return [
{
"name": name,
"template": tpl_name,
"params": copy.deepcopy(profile.get("params", {})),
**({"override": copy.deepcopy(profile["override"])} if "override" in profile else {}),
}
]
def merge_instance_patch(instance: JsonObject, patch: JsonObject) -> JsonObject:
merged = copy.deepcopy(instance)
if "params" in patch:
merged["params"] = deep_merge(merged.get("params", {}), patch["params"])
if "override" in patch:
merged["override"] = deep_merge(merged.get("override", {}), patch["override"])
for key, value in patch.items():
if key not in {"name", "template", "params", "override"}:
merged[key] = deep_merge(merged.get(key), value)
return merged
def apply_overlay(root: JsonObject, overlay: JsonObject) -> JsonObject:
out = copy.deepcopy(root)
for key in ("global", "queue", "templates"):
if key in overlay:
out[key] = deep_merge(out.get(key, {}), overlay[key])
patches = overlay.get("instance_overrides", {})
if patches:
if not isinstance(patches, dict):
raise ValueError("overlay.instance_overrides must be an object")
instances = []
for inst in out.get("instances", []):
merged = copy.deepcopy(inst)
if "*" in patches:
merged = merge_instance_patch(merged, patches["*"])
name = merged.get("name")
if name in patches:
merged = merge_instance_patch(merged, patches[name])
instances.append(merged)
out["instances"] = instances
if "instances" in overlay:
if not isinstance(overlay["instances"], list):
raise ValueError("overlay.instances must be an array")
by_name = {inst.get("name"): i for i, inst in enumerate(out.get("instances", []))}
for patch in overlay["instances"]:
if not isinstance(patch, dict) or not patch.get("name"):
raise ValueError("overlay.instances entries must be objects with name")
name = patch["name"]
if name not in by_name:
raise ValueError(f"overlay instance not found in profile: {name}")
out["instances"][by_name[name]] = merge_instance_patch(out["instances"][by_name[name]], patch)
return out
def render(template_path: Path, profile_path: Path, overlay_paths: list[Path]) -> JsonObject:
template_doc = load_json(template_path)
profile = load_json(profile_path)
tpl_name = template_name(template_doc, template_path)
root: JsonObject = {
"templates": {tpl_name: template_body(template_doc)},
"instances": profile_instances(profile, tpl_name),
}
for key in ("global", "queue"):
if key in profile:
root[key] = copy.deepcopy(profile[key])
for overlay_path in overlay_paths:
root = apply_overlay(root, load_json(overlay_path))
return root
def main() -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--template", required=True, type=Path)
parser.add_argument("--profile", required=True, type=Path)
parser.add_argument("--overlay", action="append", default=[], type=Path)
parser.add_argument("--out", required=True, type=Path)
args = parser.parse_args()
rendered = render(args.template, args.profile, args.overlay)
args.out.parent.mkdir(parents=True, exist_ok=True)
args.out.write_text(
json.dumps(rendered, ensure_ascii=False, indent=2) + "\n",
encoding="utf-8",
)
return 0
if __name__ == "__main__":
raise SystemExit(main())