添加测试paddleonnx模型脚本

This commit is contained in:
haotian 2025-08-15 11:52:26 +08:00
parent 23afbdd1cf
commit 8288b37d65
4 changed files with 449 additions and 1 deletions

View File

@ -52,7 +52,7 @@ print('done')
print('--> Running model')
outputs = rknn.inference(inputs=[img], data_format=['nhwc'])
np.save('./tflite_mobilenet_v1_0.npy', outputs[0])
show_outputs(outputs)
# show_outputs(outputs)
print('done')
rknn.release()

View File

@ -0,0 +1,27 @@
import onnxruntime as ort
import cv2
import numpy as np
# 加载 ONNX 模型(检测 / 识别)
det_session = ort.InferenceSession("/home/admin-root/haotian/康达瑞贝斯机器狗/det_shape.onnx", providers=['CPUExecutionProvider'])
rec_session = ort.InferenceSession("/home/admin-root/haotian/康达瑞贝斯机器狗/rec_shape.onnx", providers=['CPUExecutionProvider'])
# 示例预处理函数(根据你的模型需要调整)
def preprocess(img_path, target_size=(640, 640)):
img = cv2.imread(img_path)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, target_size)
img = img.astype('float32') / 255.0
img = img.transpose(2, 0, 1) # HWC → CHW
img = np.expand_dims(img, axis=0)
return img
img_path = "/home/admin-root/haotian/康达瑞贝斯机器狗/data_image/001读表图片/3aee64cc1f90d93a5a45979f7b17cb4b_frame_001460.jpg"
input_blob = preprocess(img_path)
# 推理检测结果
det_out = det_session.run(None, {det_session.get_inputs()[0].name: input_blob})
print("Detection ONNX outputs:", det_out)
print("Detection ONNX outputs shape:", det_out[0].shape)
# 对应地,用识别模型预测

View File

@ -0,0 +1,363 @@
import cv2
import numpy as np
import onnxruntime as ort
from PIL import Image, ImageDraw, ImageFont
import math
class PaddleOCRONNX:
def __init__(self, det_model_path, rec_model_path):
"""
初始化ONNX推理器
Args:
det_model_path: 检测模型路径 (det.onnx)
rec_model_path: 识别模型路径 (rec.onnx)
"""
# 初始化检测模型
self.det_session = ort.InferenceSession(det_model_path)
self.det_input_name = self.det_session.get_inputs()[0].name
# 初始化识别模型
self.rec_session = ort.InferenceSession(rec_model_path)
self.rec_input_name = self.rec_session.get_inputs()[0].name
# 字符集(根据您的模型调整)
self.character = ['blank', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+',
',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y',
'z', '{', '|', '}', '~'] + [chr(i) for i in range(19968, 40870)] # 中文字符
def resize_norm_img_det(self, img, max_side_len=960):
"""
检测模型的图像预处理
"""
h, w, _ = img.shape
# 计算缩放比例
if max(h, w) > max_side_len:
if h > w:
ratio = max_side_len / h
else:
ratio = max_side_len / w
else:
ratio = 1.0
resize_h = int(h * ratio)
resize_w = int(w * ratio)
# 确保尺寸是32的倍数
resize_h = resize_h if resize_h % 32 == 0 else (resize_h // 32 + 1) * 32
resize_w = resize_w if resize_w % 32 == 0 else (resize_w // 32 + 1) * 32
# 调整图像大小
img = cv2.resize(img, (resize_w, resize_h))
# 归一化
img = img.astype(np.float32)
img = (img / 255.0 - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
img = img.transpose(2, 0, 1).astype(np.float32)
img = np.expand_dims(img, axis=0)
return img, ratio
def post_process_det(self, dt_boxes, ratio_h, ratio_w):
"""
检测结果后处理
"""
if dt_boxes is None:
return None
dt_boxes[:, :, 0] = dt_boxes[:, :, 0] / ratio_w
dt_boxes[:, :, 1] = dt_boxes[:, :, 1] / ratio_h
return dt_boxes
def boxes_from_bitmap(self, pred, bitmap, dest_width, dest_height, max_candidates=1000, box_thresh=0.6):
"""
从位图中提取文本框
"""
bitmap = bitmap.astype(np.uint8)
height, width = bitmap.shape
# 查找轮廓
contours, _ = cv2.findContours(bitmap, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
num_contours = min(len(contours), max_candidates)
boxes = []
scores = []
for i in range(num_contours):
contour = contours[i]
points, sside = self.get_mini_boxes(contour)
if sside < 5:
continue
points = np.array(points)
score = self.box_score_fast(pred, points.reshape(-1, 2))
if box_thresh > score:
continue
# 扩展box
box = self.unclip(points, 1.5).reshape(-1, 1, 2)
box, sside = self.get_mini_boxes(box)
if sside < 5 + 2:
continue
box = np.array(box)
box[:, 0] = np.clip(box[:, 0] / width * dest_width, 0, dest_width)
box[:, 1] = np.clip(box[:, 1] / height * dest_height, 0, dest_height)
boxes.append(box.astype(np.int16))
scores.append(score)
return np.array(boxes), scores
def get_mini_boxes(self, contour):
"""获取最小外接矩形"""
bounding_box = cv2.minAreaRect(contour)
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
if points[1][1] > points[0][1]:
index_1 = 0
index_4 = 1
else:
index_1 = 1
index_4 = 0
if points[3][1] > points[2][1]:
index_2 = 2
index_3 = 3
else:
index_2 = 3
index_3 = 2
box = [points[index_1], points[index_2], points[index_3], points[index_4]]
return box, min(bounding_box[1])
def box_score_fast(self, bitmap, _box):
"""快速计算box得分"""
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(int), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(int), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(int), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(int), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
def unclip(self, box, unclip_ratio):
"""扩展文本框"""
from shapely.geometry import Polygon
import pyclipper
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset()
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded = offset.Execute(distance)
if len(expanded) == 0:
return box
else:
return np.array(expanded[0])
def resize_norm_img_rec(self, img, image_shape=[3, 48, 320]):
"""
识别模型的图像预处理
"""
imgC, imgH, imgW = image_shape
h = img.shape[0]
w = img.shape[1]
ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW:
resized_w = imgW
else:
resized_w = int(math.ceil(imgH * ratio))
resized_image = cv2.resize(img, (resized_w, imgH))
resized_image = resized_image.astype('float32')
# 归一化
resized_image = (resized_image / 255.0 - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
resized_image = resized_image.transpose((2, 0, 1))
# Padding
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
padding_im[:, :, 0:resized_w] = resized_image
return np.expand_dims(padding_im, axis=0)
def decode_rec_result(self, preds_prob):
"""
解码识别结果
"""
preds_idx = np.argmax(preds_prob, axis=1)
preds_prob = np.max(preds_prob, axis=1)
# CTC解码
last_idx = 0
preds_text = []
preds_conf = []
for i, idx in enumerate(preds_idx):
if idx != last_idx and idx != 0: # 0是blank
if idx < len(self.character):
preds_text.append(self.character[idx])
preds_conf.append(preds_prob[i])
last_idx = idx
text = ''.join(preds_text)
conf = np.mean(preds_conf) if preds_conf else 0.0
return text, conf
def detect_text(self, image):
"""
文本检测
"""
ori_h, ori_w = image.shape[:2]
# 预处理
det_img, ratio = self.resize_norm_img_det(image)
# 推理
det_output = self.det_session.run(None, {self.det_input_name: det_img})[0]
# 后处理
mask = det_output[0, 0, :, :]
threshold = 0.3
bitmap = (mask > threshold).astype(np.uint8) * 255
boxes, scores = self.boxes_from_bitmap(mask, bitmap, ori_w, ori_h)
return boxes, scores
def recognize_text(self, image):
"""
文本识别
"""
# 预处理
rec_img = self.resize_norm_img_rec(image)
# 推理
rec_output = self.rec_session.run(None, {self.rec_input_name: rec_img})[0]
# 解码
text, conf = self.decode_rec_result(rec_output[0])
return text, conf
def get_rotate_crop_image(self, img, points):
"""
根据四个点坐标裁剪并矫正图像
"""
img_crop_width = int(
max(
np.linalg.norm(points[0] - points[1]),
np.linalg.norm(points[2] - points[3])))
img_crop_height = int(
max(
np.linalg.norm(points[0] - points[3]),
np.linalg.norm(points[1] - points[2])))
pts_std = np.float32([[0, 0], [img_crop_width, 0],
[img_crop_width, img_crop_height],
[0, img_crop_height]])
M = cv2.getPerspectiveTransform(points, pts_std)
dst_img = cv2.warpPerspective(
img,
M, (img_crop_width, img_crop_height),
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC)
dst_img_height, dst_img_width = dst_img.shape[0:2]
if dst_img_height * 1.0 / dst_img_width >= 1.5:
dst_img = np.rot90(dst_img)
return dst_img
def ocr(self, image_path):
"""
完整的OCR流程
"""
# 读取图像
image = cv2.imread(image_path)
if image is None:
return []
# 1. 文本检测
dt_boxes, scores = self.detect_text(image)
if dt_boxes is None or len(dt_boxes) == 0:
return []
# 2. 文本识别
ocr_results = []
for i, box in enumerate(dt_boxes):
# 裁剪文本区域
box_points = box.astype(np.float32)
crop_img = self.get_rotate_crop_image(image, box_points)
# 识别文本
text, conf = self.recognize_text(crop_img)
if conf > 0.5: # 置信度过滤
ocr_results.append({
'text': text,
'confidence': conf,
'box': box.tolist(),
'score': scores[i] if i < len(scores) else 0.0
})
return ocr_results
# 使用示例
def main():
# 初始化OCR
ocr = PaddleOCRONNX('/home/admin-root/haotian/康达瑞贝斯机器狗/det.onnx', '/home/admin-root/haotian/康达瑞贝斯机器狗/rec.onnx')
# 执行OCR
image_path = '/home/admin-root/haotian/康达瑞贝斯机器狗/data_image/001读表图片/3aee64cc1f90d93a5a45979f7b17cb4b_frame_001460.jpg'
results = ocr.ocr(image_path)
# 打印结果
for result in results:
print(f"文本: {result['text']}")
print(f"置信度: {result['confidence']:.3f}")
print(f"检测得分: {result['score']:.3f}")
print(f"坐标: {result['box']}")
print("-" * 50)
# 可视化结果
visualize_results(image_path, results)
def visualize_results(image_path, results):
"""
可视化OCR结果
"""
image = cv2.imread(image_path)
for result in results:
box = np.array(result['box'], dtype=np.int32)
cv2.polylines(image, [box], True, (0, 255, 0), 2)
# 在框上方显示文本
cv2.putText(image, result['text'],
(box[0][0], box[0][1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
cv2.imwrite('result.jpg', image)
# cv2.imshow('OCR Results', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,58 @@
from rknn.api import RKNN
import cv2
import numpy as np
# 初始化 RKNN
rknn = RKNN()
# 配置参数(关键!)
rknn.config(
target_platform="rk3588", # 根据实际芯片型号修改
mean_values=[[0.485, 0.456, 0.406]], # YOLOv8 输入为 0-255无需归一化
std_values=[[255, 255, 255]], # 输入数据除以 255即 0-1 范围)
quant_img_RGB2BGR=True,
optimization_level=3, # 最高优化级别
)
# 加载 ONNX
ret = rknn.load_onnx(model="/home/admin-root/haotian/rk3588/pytorch模型转rknn/models/yolov8m.onnx")
assert ret == 0, "加载 ONNX 失败!"
# 转换模型
ret = rknn.build(
do_quantization=False, # 启用量化
# dataset="dataset.txt", # 校准数据路径
)
assert ret == 0, "转换 RKNN 失败!"
# 导出 RKNN
ret = rknn.export_rknn("/home/admin-root/haotian/rk3588/pytorch模型转rknn/models/yolov8m.rknn")
assert ret == 0, "导出 RKNN 失败!"
# Set inputs
img = cv2.imread('/home/admin-root/haotian/rk3588/pytorch模型转rknn/images/bus.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img.resize((3, 640, 640))
img = np.expand_dims(img, 0)
# Init runtime environment
print('--> Init runtime environment')
ret = rknn.init_runtime()
if ret != 0:
print('Init runtime environment failed!')
exit(ret)
print('done')
# Inference
print('--> Running model')
outputs = rknn.inference(inputs=[img], data_format=['nchw'])
np.save('./tflite_mobilenet_v1_0.npy', outputs[0])
print(len(outputs))
print('done')
rknn.release()