CommonAutoRearsh/scripts/run_task.py

77 lines
2.1 KiB
Python

from __future__ import annotations
import argparse
import json
import sys
from pathlib import Path
ROOT_DIR = Path(__file__).resolve().parents[1]
if str(ROOT_DIR) not in sys.path:
sys.path.insert(0, str(ROOT_DIR))
from engine.artifact_manager import ArtifactManager
from engine.decision_engine import decide_candidate
from engine.runner import run_command
from engine.scorer import parse_score_output
from engine.task_loader import load_task
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--task", required=True)
return parser.parse_args()
def main() -> int:
args = parse_args()
root_dir = Path.cwd()
task_path = (root_dir / args.task).resolve()
task = load_task(task_path)
artifact_manager = ArtifactManager(task)
snapshot = artifact_manager.snapshot()
run_result = run_command(
task.runner.command,
(root_dir / task.runner.cwd).resolve(),
task.runner.timeout_seconds,
)
scorer_result = run_command(
task.scorer.command,
root_dir.resolve(),
task.runner.timeout_seconds,
)
score_result = parse_score_output(
scorer_result.stdout,
score_field=task.scorer.parse.score_field,
metrics_field=task.scorer.parse.metrics_field,
)
decision = decide_candidate(
baseline=None,
candidate=score_result,
objective=task.objective,
constraints=task.constraints,
tie_breakers=task.policy.tie_breakers,
run_result=run_result,
)
record = {
"task_id": task.id,
"status": decision.status,
"reason": decision.reason,
"candidate_score": decision.candidate_score,
"diff_summary": artifact_manager.diff_summary(snapshot),
}
results_path = (root_dir / task.logging.results_file).resolve()
results_path.parent.mkdir(parents=True, exist_ok=True)
with results_path.open("a", encoding="utf-8", newline="") as handle:
handle.write(json.dumps(record, ensure_ascii=False) + "\n")
print(json.dumps(record, ensure_ascii=False))
return 0
if __name__ == "__main__":
raise SystemExit(main())