import json import Levenshtein from fit_2d import plot_density_ned from fit_3d import fit_and_plot_3d import numpy as np import matplotlib.pyplot as plt import os def read_jsonl(file_path): """ 读取 JSONL 文件并返回解析后的数据列表。 :param file_path: JSONL 文件的路径 :return: 包含所有 JSON 对象的列表 """ data = [] with open(file_path, 'r', encoding='utf-8') as f: for line in f: data.append(json.loads(line.strip())) return data def save_jsonl(data, file_path): """ 将数据保存为 JSONL 文件。 :param data: 要保存的数据(Python 对象的列表) :param file_path: 保存的 JSONL 文件路径 """ with open(file_path, 'w', encoding='utf-8') as f: for item in data: f.write(json.dumps(item, ensure_ascii=False) + '\n') def compute_edit_distance(pred: str, gt: str) -> float: """ 计算两个字符串的归一化编辑距离 (Normalized Edit Distance, NED) 值域为 [0, 1],越小表示越相似。 """ pred = pred.replace("\n", "").replace(" ", "") gt = gt.replace("\n", "").replace(" ", "") if not pred and not gt: return 0.0 dist = Levenshtein.distance(pred, gt) return round(dist / max(len(pred), len(gt)), 4) def compute_accuracy(json_path: str): """从 JSON 文件批量计算平均 NED 及每条样本的编辑距离""" with open(json_path, 'r', encoding='utf-8') as f: data = json.load(f) total_ned = 0 sample_accuracies = [] for sample in data: # Ground truth 选择逻辑 if "normal" in json_path: gt = sample.get("content", "") elif "tiny_shuffled" in json_path: gt = sample.get("tiny_shuffled_content", "") elif "shuffled" in json_path: gt = sample.get("shuffled_content", "") else: raise ValueError("无法确定使用哪个字段作为 ground truth") pred = sample.get("ocr", "") ned = compute_edit_distance(pred, gt) total_ned += ned sample_accuracies.append({ "id": sample["id"], "image_path": sample["image_path"], "content": gt, "ocr": sample["ocr"], "NED": ned }) overall_ned = round(total_ned / len(data), 4) print(f"共 {len(data)} 条样本, 平均 NED = {overall_ned}") return overall_ned, sample_accuracies def plot_length_ned_bar(data, output_path="length_ned_bar.png", length_bin_width=500, ned_bin_width=0.05): """ 用柱状图显示 item['length'] 与 extent['NED'] 的频率 参数: data: 原始数据列表 output_path: 图片保存路径 length_bin_width: 文字长度的柱宽 ned_bin_width: NED的柱宽 """ # 提取 length 和 NED length_vals = [] ned_vals = [] for item in data: for dim, v in item["rewrite"].items(): for extent in v["extents"]: length_vals.append(len(extent["edited"])) ned_vals.append(extent["NED"]) length_vals = np.array(length_vals) ned_vals = np.array(ned_vals) if len(length_vals) == 0: print("⚠️ 数据为空,未生成图像。") return # --- 文字长度柱状图 --- length_bins = np.arange(min(length_vals), max(length_vals) + length_bin_width, length_bin_width) plt.figure(figsize=(8, 4)) plt.hist(length_vals, bins=length_bins, color='skyblue', edgecolor='black') plt.xlabel("Text Length") plt.ylabel("Frequency") plt.title("Frequency of Text Length") plt.xticks(length_bins) plt.tight_layout() os.makedirs(os.path.dirname(output_path), exist_ok=True) plt.savefig(output_path.replace(".png", "_length.png"), dpi=300) plt.close() # --- NED柱状图 --- ned_bins = np.arange(0, 1 + ned_bin_width, ned_bin_width) plt.figure(figsize=(8, 4)) plt.hist(ned_vals, bins=ned_bins, color='salmon', edgecolor='black') plt.xlabel("NED") plt.ylabel("Frequency") plt.title("Frequency of NED") plt.xticks(ned_bins) plt.tight_layout() plt.savefig(output_path.replace(".png", "_ned.png"), dpi=300) plt.close() print(f"✅ 柱状图已保存:{output_path.replace('.png', '_length.png')} 和 {output_path.replace('.png', '_ned.png')}") if __name__ == "__main__": meta_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10_last-mode/output_processed_ocr_Gundam.jsonl" data = read_jsonl(meta_path) # 1. 计算NED for item in data: for dim, v in item["rewrite"].items(): for extent in v["extents"]: orig = extent["edited"] pred = extent["ocr"] extent["NED_ocr"] = compute_edit_distance(orig, pred) save_jsonl(data, "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10_last-mode/output_processed_ocr_Gundam_result.jsonl") # 1. 不同维度的模型性能 最大程度。 metric_dim = { "形近字替换": 0, "音近/同音字替换": 0, "语序颠倒": 0, "字符增衍": 0, "字符缺失": 0 } for item in data: for dim, v in item["rewrite"].items(): for extent in v["extents"]: # TODO 不同level相对于原始字符串的NED也不同,要进行区分。即,数据集本身有bias,导致其他的自变量没有得到控制。 if extent["level"] == 1.0: metric_dim[dim] += extent["NED_ocr"] for k, v in metric_dim.items(): metric_dim[k] = v / len(data) print(metric_dim) # 2. 不同文字密度的模型性能。 最大程度。 分维度,出5条曲线。或者取均值? metric_p = { "形近字替换": [], "音近/同音字替换": [], "语序颠倒": [], "字符增衍": [], "字符缺失": [] } for item in data: for dim, v in item["rewrite"].items(): for extent in v["extents"]: if extent["level"] == 1.0: metric_p[dim].append([item["length"], extent["NED_ocr"]]) plot_density_ned(metric_p["形近字替换"], save_path="/vol/fit_p_ned.png") metric_3d = [] # 3. 不同程度。 先不考虑文字密度。 文字密度不同,也是个自变量在这里。 分维度,出5条曲线。或者取均值? # TODO for item in data: for dim, v in item["rewrite"].items(): for extent in v["extents"]: metric_3d.append([item["length"], extent["NED"], extent["NED_ocr"]]) fit_and_plot_3d(metric_3d, "/vol/fit_3d.png") # 统计自变量分布 plot_length_ned_bar(data, output_path="/vol/length_ned_bar.png", length_bin_width=500, ned_bin_width=0.05)