import json import Levenshtein def compute_edit_distance(pred: str, gt: str) -> float: """ 计算两个字符串的归一化编辑距离 (Normalized Edit Distance, NED) 值域为 [0, 1],越小表示越相似。 """ pred = pred.replace("\n", "").replace(" ", "") gt = gt.replace("\n", "").replace(" ", "") if not pred and not gt: return 0.0 dist = Levenshtein.distance(pred, gt) return round(dist / max(len(pred), len(gt)), 4) def compute_accuracy(json_path: str): """从 JSON 文件批量计算平均 NED 及每条样本的编辑距离""" with open(json_path, 'r', encoding='utf-8') as f: data = json.load(f) total_ned = 0 sample_accuracies = [] for sample in data: # Ground truth 选择逻辑 if "normal" in json_path: gt = sample.get("content", "") elif "tiny_shuffled" in json_path: gt = sample.get("tiny_shuffled_content", "") elif "shuffled" in json_path: gt = sample.get("shuffled_content", "") else: raise ValueError("无法确定使用哪个字段作为 ground truth") pred = sample.get("ocr", "") ned = compute_edit_distance(pred, gt) total_ned += ned sample_accuracies.append({ "id": sample["id"], "image_path": sample["image_path"], "content": gt, "ocr": sample["ocr"], "NED": ned }) overall_ned = round(total_ned / len(data), 4) print(f"共 {len(data)} 条样本, 平均 NED = {overall_ned}") return overall_ned, sample_accuracies if __name__ == "__main__": # json_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/sample200_len1.0-1.2k/tiny_shuffled/input_small_ocr.json" # overall_ned, sample_accuracies = compute_accuracy(json_path) # # 保存每条样本的编辑距离 # out_path = json_path.replace(".json", "_acc.json") # with open(out_path, "w", encoding="utf-8") as f: # json.dump(sample_accuracies, f, ensure_ascii=False, indent=2) # print(f"结果已保存至:{out_path}") origin = "四月飞雪《四月飞雪》是四月菲雪创作的网络小说,发表于起点网。作品简介大千世界,两亿年前究竟发生了什么,在我们身边,好似缺少着什么,,一个生活在这个世界的小孩,究竟经历了什么,一步步走上了永无止境的通天大道。。接下来就让我们一起见证。。。。。" mod = "四月飞雪《四月飞雪雪》是四月菲雪作创作的网络小说,发表于起点网。作作品简大简介世大千世界,两两亿年前究竟发竟发什生了什么,在我们身身边,好似缺什少着什么,,一个生活活在这个世世小界的小孩,经究竟经历了什么,一步步步上走上了永止无止天境的通天大道。。接下来就让我们一起见见证。。。。。" ned = compute_edit_distance(origin, mod) print(ned)