Upload folder using huggingface_hub
Browse files- scripts/0_process_CCI3.py +87 -0
- scripts/0_process_CCI3_avg.py +142 -0
- scripts/0_process_stories.py +97 -0
- scripts/1_render_images.py +95 -0
- scripts/1_render_images_v2.py +55 -0
- scripts/1_render_images_v3.py +201 -0
- scripts/1_render_images_v3_concurrent.py +235 -0
- scripts/3_calu_metric_v1.py +68 -0
- scripts/3_calu_metric_v2.py +197 -0
- scripts/3_calu_order.py +18 -0
- scripts/Levenshtein1.py +9 -0
- scripts/__pycache__/Levenshtein.cpython-312.pyc +0 -0
- scripts/__pycache__/fit.cpython-312.pyc +0 -0
- scripts/__pycache__/fit_2d.cpython-312.pyc +0 -0
- scripts/__pycache__/fit_3d.cpython-312.pyc +0 -0
- scripts/__pycache__/tiny_shuffle.cpython-312.pyc +0 -0
- scripts/fit_2d.py +75 -0
- scripts/fit_3d.py +69 -0
- scripts/gen_random_char.py +52 -0
- scripts/resize_image.py +15 -0
- scripts/tiny_shuffle.py +132 -0
- scripts/vis/vis.py +129 -0
- scripts/vis/vis_v2.py +151 -0
- scripts/vis/vis_v3.py +171 -0
scripts/0_process_CCI3.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
from tiny_shuffle import random_swap_contiguous # type: ignore
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
file_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/data/part-00001-6f0afd98-d375-4d7f-8299-ac5e070bf4fc-c000.jsonl"
|
| 8 |
+
save_path = f"/vol/zhaoy/ds-ocr/data/CCI3-Data/random_sample100/input.json"
|
| 9 |
+
|
| 10 |
+
# 1. 读取 jsonl 文件
|
| 11 |
+
data = []
|
| 12 |
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
| 13 |
+
for line in f:
|
| 14 |
+
line = line.strip()
|
| 15 |
+
if not line:
|
| 16 |
+
continue
|
| 17 |
+
try:
|
| 18 |
+
data.append(json.loads(line))
|
| 19 |
+
except json.JSONDecodeError:
|
| 20 |
+
continue
|
| 21 |
+
|
| 22 |
+
print(f"✅ 已读取 {len(data)} 条样本")
|
| 23 |
+
|
| 24 |
+
# 2. 筛选满足条件的样本
|
| 25 |
+
filtered = [
|
| 26 |
+
item for item in data
|
| 27 |
+
if 100 <= item.get("meta_info", {}).get("words_count", 0) <= 10000
|
| 28 |
+
]
|
| 29 |
+
print(f"✅ 满足条件的样本数: {len(filtered)}")
|
| 30 |
+
|
| 31 |
+
# 3. 随机抽取 500 条
|
| 32 |
+
filtered = random.sample(filtered, min(200, len(filtered)))
|
| 33 |
+
|
| 34 |
+
# 4. 组织格式
|
| 35 |
+
processed = []
|
| 36 |
+
os.makedirs("images", exist_ok=True)
|
| 37 |
+
|
| 38 |
+
for i, item in enumerate(filtered, 1):
|
| 39 |
+
sample_id = f"RS{i:03d}"
|
| 40 |
+
image_path = f"images/{sample_id}.png"
|
| 41 |
+
content = item.get("text", "") or item.get("content", "")
|
| 42 |
+
content = content.replace("\n", "").replace(" ", "") # 预处理的时候就把这些去掉
|
| 43 |
+
|
| 44 |
+
# tiny_shuffled_content, spans = random_swap_contiguous(content, n_swaps=1)
|
| 45 |
+
# shuffled_content = ''.join(random.sample(content, len(content)))
|
| 46 |
+
|
| 47 |
+
processed.append({
|
| 48 |
+
"id": sample_id,
|
| 49 |
+
"data_source": "CCI3",
|
| 50 |
+
"language": "zh",
|
| 51 |
+
"image_path": image_path,
|
| 52 |
+
"content": content,
|
| 53 |
+
"length": len(content),
|
| 54 |
+
# "tiny_shuffled_content": tiny_shuffled_content,
|
| 55 |
+
# "spans": spans,
|
| 56 |
+
# "shuffled_content": shuffled_content
|
| 57 |
+
})
|
| 58 |
+
|
| 59 |
+
# 5. 保存为 JSON 文件
|
| 60 |
+
with open(save_path, "w", encoding="utf-8") as f:
|
| 61 |
+
json.dump(processed, f, ensure_ascii=False, indent=2)
|
| 62 |
+
|
| 63 |
+
print(f"✅ 已生成 {len(processed)} 条样本,保存至:{save_path}")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# 6. 画 length 分布柱状图
|
| 69 |
+
import matplotlib.pyplot as plt
|
| 70 |
+
from collections import Counter
|
| 71 |
+
import numpy as np
|
| 72 |
+
|
| 73 |
+
lengths = [p["length"] for p in processed]
|
| 74 |
+
bin_width = 100 # 每 50 字一个柱
|
| 75 |
+
max_len = max(lengths) if lengths else 0
|
| 76 |
+
bins = list(range(0, max_len + bin_width, bin_width))
|
| 77 |
+
hist, edges = np.histogram(lengths, bins=bins)
|
| 78 |
+
|
| 79 |
+
plt.figure(figsize=(10, 5))
|
| 80 |
+
plt.bar(edges[:-1], hist, width=bin_width, align="edge", color="skyblue", edgecolor="black")
|
| 81 |
+
plt.xticks(edges[::2], rotation=45)
|
| 82 |
+
plt.xlabel("Length (characters)")
|
| 83 |
+
plt.ylabel("Count")
|
| 84 |
+
plt.title("Random-Sample Length Distribution")
|
| 85 |
+
plt.tight_layout()
|
| 86 |
+
plt.savefig("/vol/zhaoy/ds-ocr/data/CCI3-Data/random_sample100/length_dist.png", dpi=300)
|
| 87 |
+
print("✅ 柱状图已保存为 length_dist.png")
|
scripts/0_process_CCI3_avg.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
from tiny_shuffle import random_swap_contiguous # type: ignore
|
| 5 |
+
from collections import defaultdict # <--- 导入 defaultdict 用于分桶
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
from collections import Counter
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
# --- 路径定义 ---
|
| 11 |
+
base_dir = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_5k-10k_sample100_interval500_per10"
|
| 12 |
+
file_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/data/part-00001-6f0afd98-d375-4d7f-8299-ac5e070bf4fc-c000.jsonl"
|
| 13 |
+
save_path = f"{base_dir}/meta.json"
|
| 14 |
+
plot_save_path = f"{base_dir}/length_dist.png"
|
| 15 |
+
image_save_dir = f"/images"
|
| 16 |
+
|
| 17 |
+
# 1. 读取 jsonl 文件 (不变)
|
| 18 |
+
data = []
|
| 19 |
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
| 20 |
+
for line in f:
|
| 21 |
+
line = line.strip()
|
| 22 |
+
if not line:
|
| 23 |
+
continue
|
| 24 |
+
try:
|
| 25 |
+
data.append(json.loads(line))
|
| 26 |
+
except json.JSONDecodeError:
|
| 27 |
+
continue
|
| 28 |
+
|
| 29 |
+
print(f"✅ 已读取 {len(data)} 条样本")
|
| 30 |
+
|
| 31 |
+
# 2. (***修改***) 筛选满足条件的样本
|
| 32 |
+
# 不再使用 meta_info, 而是直接计算真实长度来进行筛选
|
| 33 |
+
print("🚀 开始按 'content' 真实字符长度进行预筛选...")
|
| 34 |
+
filtered_by_char_length = []
|
| 35 |
+
for item in data:
|
| 36 |
+
# 使用与步骤 3 中完全一致的 content 清洗逻辑
|
| 37 |
+
content = item.get("text", "") or item.get("content", "")
|
| 38 |
+
content = content.replace("\n", "").replace(" ", "")
|
| 39 |
+
length = len(content)
|
| 40 |
+
|
| 41 |
+
# 在这里进行你想要的真实长度过滤 (例如,你希望过滤掉超过 10000 个字符的)
|
| 42 |
+
# 注意:如果你这里设置了 10000,你就永远不会得到 [24500 - 24999] 的桶
|
| 43 |
+
# 假设你真正的意思是 100 到 100000
|
| 44 |
+
if 5000 <= length <= 10000: # <--- 在这里设置你想要的真实字符长度范围
|
| 45 |
+
# 将计算好的 length 存储起来,避免第 3 步重复计算
|
| 46 |
+
item['_true_length'] = length
|
| 47 |
+
filtered_by_char_length.append(item)
|
| 48 |
+
|
| 49 |
+
print(f"✅ 满足 '真实字符长度' 条件的样本数: {len(filtered_by_char_length)}")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# 3. (***修改***) 按 'content' 字符长度进行平衡采样
|
| 53 |
+
print("🚀 开始按 'content' 字符长度进行平衡采样...")
|
| 54 |
+
bin_size = 500 # 采样间隔 (每 500 个字符一个区间)
|
| 55 |
+
samples_per_bin = 10 # 每个区间抽 10 条
|
| 56 |
+
|
| 57 |
+
# 3.1. 分桶
|
| 58 |
+
binned_data = defaultdict(list)
|
| 59 |
+
# 注意:这里要遍历上一步筛选过的 filtered_by_char_length
|
| 60 |
+
for item in filtered_by_char_length:
|
| 61 |
+
|
| 62 |
+
# 直接使用上一步计算好的长度
|
| 63 |
+
length = item['_true_length']
|
| 64 |
+
|
| 65 |
+
# 计算所属的桶 (bin)
|
| 66 |
+
bin_key = length // bin_size
|
| 67 |
+
binned_data[bin_key].append(item)
|
| 68 |
+
|
| 69 |
+
# 3.2. 从每个桶中采样
|
| 70 |
+
balanced_samples = []
|
| 71 |
+
print(f" -> 采样间隔: {bin_size} 字符, 每个区间最多: {samples_per_bin} 条")
|
| 72 |
+
for bin_key in sorted(binned_data.keys()):
|
| 73 |
+
items_in_bin = binned_data[bin_key]
|
| 74 |
+
n_to_sample = min(samples_per_bin, len(items_in_bin))
|
| 75 |
+
sampled_items = random.sample(items_in_bin, n_to_sample)
|
| 76 |
+
balanced_samples.extend(sampled_items)
|
| 77 |
+
|
| 78 |
+
min_len = bin_key * bin_size
|
| 79 |
+
max_len = (bin_key + 1) * bin_size - 1
|
| 80 |
+
print(f" -> 区间 [{min_len:>5d} - {max_len:>5d}]: 找到 {len(items_in_bin):>4} 条, 抽取 {n_to_sample} 条")
|
| 81 |
+
|
| 82 |
+
# 3.3. 替换原来的 'filtered' 变量
|
| 83 |
+
filtered = balanced_samples
|
| 84 |
+
print(f"✅ 平衡采样后总样本数: {len(filtered)}")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# 4. 组织格式 (***修改***)
|
| 88 |
+
processed = []
|
| 89 |
+
os.makedirs(image_save_dir, exist_ok=True)
|
| 90 |
+
|
| 91 |
+
for i, item in enumerate(filtered, 1):
|
| 92 |
+
sample_id = f"RS{i:03d}"
|
| 93 |
+
image_path = f"{image_save_dir}/{sample_id}.png"
|
| 94 |
+
|
| 95 |
+
# 再次清理(或者直接使用上一步的清理结果,但为保险起见,再次清理也无妨)
|
| 96 |
+
content = item.get("text", "") or item.get("content", "")
|
| 97 |
+
content = content.replace("\n", "").replace(" ", "")
|
| 98 |
+
|
| 99 |
+
processed.append({
|
| 100 |
+
"id": sample_id,
|
| 101 |
+
"data_source": "CCI3",
|
| 102 |
+
"language": "zh",
|
| 103 |
+
"image_path": image_path,
|
| 104 |
+
"content": content,
|
| 105 |
+
"length": item['_true_length'], # <--- 直接使用缓存的长度
|
| 106 |
+
})
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# 5. 保存为 JSON 文件 (不变)
|
| 110 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
| 111 |
+
with open(save_path, "w", encoding="utf-8") as f:
|
| 112 |
+
json.dump(processed, f, ensure_ascii=False, indent=2)
|
| 113 |
+
|
| 114 |
+
print(f"✅ 已生成 {len(processed)} 条样本,保存至:{save_path}")
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# 6. 画 length 分布柱状图 (基本不变)
|
| 118 |
+
lengths = [p["length"] for p in processed]
|
| 119 |
+
|
| 120 |
+
if not lengths:
|
| 121 |
+
print("⚠️ 采样结果为空,无法绘制柱状图。")
|
| 122 |
+
else:
|
| 123 |
+
# 注意:这里的 bin_width 是绘图的柱宽,可以和采样的 bin_size (500) 不一样
|
| 124 |
+
# 使用 100 作为柱宽,可以更精细地看清分布
|
| 125 |
+
bin_width = bin_size
|
| 126 |
+
max_len = max(lengths)
|
| 127 |
+
bins = list(range(0, max_len + bin_width, bin_width))
|
| 128 |
+
hist, edges = np.histogram(lengths, bins=bins)
|
| 129 |
+
|
| 130 |
+
plt.figure(figsize=(10, 5))
|
| 131 |
+
plt.bar(edges[:-1], hist, width=bin_width, align="edge", color="skyblue", edgecolor="black")
|
| 132 |
+
|
| 133 |
+
# 动态调整 x 轴刻度,避免过于密集
|
| 134 |
+
tick_step = max(1, len(edges) // 20) # 保持最多约 20 个刻度
|
| 135 |
+
plt.xticks(edges[::tick_step], rotation=45)
|
| 136 |
+
|
| 137 |
+
plt.xlabel("Length (characters)")
|
| 138 |
+
plt.ylabel("Count")
|
| 139 |
+
plt.title(f"Balanced-Sample Length Distribution (Total: {len(lengths)})")
|
| 140 |
+
plt.tight_layout()
|
| 141 |
+
plt.savefig(plot_save_path, dpi=300) # 使用变量
|
| 142 |
+
print(f"✅ 柱状图已保存为 {plot_save_path}")
|
scripts/0_process_stories.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import re
|
| 5 |
+
from tiny_shuffle import random_swap_contiguous # type: ignore
|
| 6 |
+
|
| 7 |
+
def shuffle_words_and_punct(text: str) -> str:
|
| 8 |
+
"""
|
| 9 |
+
随机打乱英文文本中的单词和标点顺序。
|
| 10 |
+
保留标点作为独立token。
|
| 11 |
+
"""
|
| 12 |
+
# 用正则提取单词和标点
|
| 13 |
+
tokens = re.findall(r"[A-Za-z0-9]+|[^\w\s]", text)
|
| 14 |
+
if not tokens:
|
| 15 |
+
return text
|
| 16 |
+
|
| 17 |
+
shuffled = tokens[:]
|
| 18 |
+
random.shuffle(shuffled)
|
| 19 |
+
|
| 20 |
+
# 重新拼接成句子
|
| 21 |
+
# 逻辑:若下一个是标点,则不加空格;否则加空格
|
| 22 |
+
result = ""
|
| 23 |
+
for i, tok in enumerate(shuffled):
|
| 24 |
+
if i > 0 and not re.match(r"[^\w\s]", tok): # 不是标点才加空格
|
| 25 |
+
result += " "
|
| 26 |
+
result += tok
|
| 27 |
+
|
| 28 |
+
return result
|
| 29 |
+
|
| 30 |
+
file_path = "/vol/zhaoy/ds-ocr/data/Stories_en/data/Children-Stories-0-Final.json"
|
| 31 |
+
save_path = f"/vol/zhaoy/ds-ocr/data/Stories_en/sample200_len0.8-1.2k/input.json"
|
| 32 |
+
|
| 33 |
+
# 1. 读取 jsonl 文件
|
| 34 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 35 |
+
data_o = json.load(f)
|
| 36 |
+
|
| 37 |
+
new_data = []
|
| 38 |
+
i = 0
|
| 39 |
+
while i < len(data_o):
|
| 40 |
+
# 若只剩最后一条,直接保存
|
| 41 |
+
if i == len(data_o) - 1:
|
| 42 |
+
item = {
|
| 43 |
+
"text": data_o[i]["text"],
|
| 44 |
+
"text_token_length": len(data_o[i]["text"].split())
|
| 45 |
+
}
|
| 46 |
+
new_data.append(item)
|
| 47 |
+
break
|
| 48 |
+
|
| 49 |
+
# 合并相邻两条
|
| 50 |
+
text1 = data_o[i]["text"].strip()
|
| 51 |
+
text2 = data_o[i + 1]["text"].strip()
|
| 52 |
+
merged_text = text1 + " " + text2
|
| 53 |
+
|
| 54 |
+
item = {
|
| 55 |
+
"text": merged_text,
|
| 56 |
+
"text_length": len(merged_text.split())
|
| 57 |
+
}
|
| 58 |
+
new_data.append(item)
|
| 59 |
+
i += 2 # 跳过两条
|
| 60 |
+
|
| 61 |
+
# 2. 筛选满足条件的样本
|
| 62 |
+
filtered = [
|
| 63 |
+
item for item in new_data
|
| 64 |
+
if 800 <= item["text_length"] <= 1200
|
| 65 |
+
]
|
| 66 |
+
print(f"✅ 满足条件的样本数: {len(filtered)}")
|
| 67 |
+
|
| 68 |
+
# 3. 随机抽取 x 条
|
| 69 |
+
sampled = random.sample(filtered, min(10, len(filtered)))
|
| 70 |
+
|
| 71 |
+
# 4. 组织格式
|
| 72 |
+
processed = []
|
| 73 |
+
os.makedirs("images", exist_ok=True)
|
| 74 |
+
|
| 75 |
+
for i, item in enumerate(sampled, 1):
|
| 76 |
+
sample_id = f"RS{i:03d}"
|
| 77 |
+
image_path = f"images/{sample_id}.png"
|
| 78 |
+
content = item.get("text", "") or item.get("content", "")
|
| 79 |
+
content = content.replace("\n", "")
|
| 80 |
+
|
| 81 |
+
tiny_shuffled_content, spans = random_swap_contiguous(content, n_swaps=1)
|
| 82 |
+
shuffled_content = shuffle_words_and_punct(content)
|
| 83 |
+
|
| 84 |
+
processed.append({
|
| 85 |
+
"id": sample_id,
|
| 86 |
+
"image_path": image_path,
|
| 87 |
+
"content": content,
|
| 88 |
+
"tiny_shuffled_content": tiny_shuffled_content,
|
| 89 |
+
"spans": spans,
|
| 90 |
+
"shuffled_content": shuffled_content
|
| 91 |
+
})
|
| 92 |
+
|
| 93 |
+
# 5. 保存为 JSON 文件
|
| 94 |
+
with open(save_path, "w", encoding="utf-8") as f:
|
| 95 |
+
json.dump(processed, f, ensure_ascii=False, indent=2)
|
| 96 |
+
|
| 97 |
+
print(f"✅ 已生成 {len(processed)} 条样本,保存至:{save_path}")
|
scripts/1_render_images.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 做pilot study的时候用的渲染代码
|
| 2 |
+
|
| 3 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 4 |
+
import textwrap
|
| 5 |
+
import json, os
|
| 6 |
+
|
| 7 |
+
def read_jsonl(file_path):
|
| 8 |
+
"""
|
| 9 |
+
读取 JSONL 文件并返回解析后的数据列表。
|
| 10 |
+
|
| 11 |
+
:param file_path: JSONL 文件的路径
|
| 12 |
+
:return: 包含所有 JSON 对象的列表
|
| 13 |
+
"""
|
| 14 |
+
data = []
|
| 15 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 16 |
+
for line in f:
|
| 17 |
+
data.append(json.loads(line.strip()))
|
| 18 |
+
return data
|
| 19 |
+
|
| 20 |
+
# ========================
|
| 21 |
+
# 参数配置
|
| 22 |
+
# ========================
|
| 23 |
+
|
| 24 |
+
# 数据路径
|
| 25 |
+
json_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10/test.jsonl"
|
| 26 |
+
data = read_jsonl(json_path)
|
| 27 |
+
|
| 28 |
+
# 输出目录
|
| 29 |
+
output_root = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10"
|
| 30 |
+
|
| 31 |
+
# 三个版本及对应字段
|
| 32 |
+
versions = {
|
| 33 |
+
"normal": "content",
|
| 34 |
+
"shuffled": "shuffled_content",
|
| 35 |
+
"tiny_shuffled": "tiny_shuffled_content",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
# 字体路径与大小
|
| 39 |
+
font_path = "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc"
|
| 40 |
+
font_size = 64
|
| 41 |
+
font = ImageFont.truetype(font_path, font_size)
|
| 42 |
+
|
| 43 |
+
# 每行最大字符数
|
| 44 |
+
max_chars_per_line = 40 # 中文
|
| 45 |
+
# max_chars_per_line = 100 # 英文
|
| 46 |
+
|
| 47 |
+
# ========================
|
| 48 |
+
# 主流程
|
| 49 |
+
# ========================
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
for version_name, field in versions.items():
|
| 54 |
+
# 创建输出文件夹
|
| 55 |
+
image_root = os.path.join(output_root, version_name, "images")
|
| 56 |
+
os.makedirs(image_root, exist_ok=True)
|
| 57 |
+
|
| 58 |
+
print(f"\n🖋 正在生成 {version_name} 版本的图片到 {image_root} ...")
|
| 59 |
+
|
| 60 |
+
for item in data:
|
| 61 |
+
text = item.get(field, "")
|
| 62 |
+
if not text:
|
| 63 |
+
print(f"⚠️ 跳过 {item['id']}:字段 {field} 为空。")
|
| 64 |
+
continue
|
| 65 |
+
|
| 66 |
+
# 自动换行
|
| 67 |
+
wrapped_text = textwrap.fill(text, width=max_chars_per_line)
|
| 68 |
+
|
| 69 |
+
# 计算文本尺寸
|
| 70 |
+
dummy_img = Image.new("RGB", (10, 10))
|
| 71 |
+
draw = ImageDraw.Draw(dummy_img)
|
| 72 |
+
bbox = draw.multiline_textbbox((0, 0), wrapped_text, font=font, spacing=10)
|
| 73 |
+
text_w, text_h = bbox[2] - bbox[0], bbox[3] - bbox[1]
|
| 74 |
+
|
| 75 |
+
# 加 padding
|
| 76 |
+
padding = 50
|
| 77 |
+
img_w, img_h = text_w + 2 * padding, text_h + 2 * padding
|
| 78 |
+
|
| 79 |
+
# 创建并绘制图片
|
| 80 |
+
img = Image.new("RGB", (img_w, img_h), (255, 255, 255))
|
| 81 |
+
draw = ImageDraw.Draw(img)
|
| 82 |
+
draw.multiline_text(
|
| 83 |
+
(padding, padding),
|
| 84 |
+
wrapped_text,
|
| 85 |
+
font=font,
|
| 86 |
+
fill=(0, 0, 0),
|
| 87 |
+
spacing=10,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# 保存图片
|
| 91 |
+
image_path = os.path.join(image_root, f"{item['id']}.png")
|
| 92 |
+
img.save(image_path)
|
| 93 |
+
print(f"✅ {version_name} - 已生成 {item['id']} ({img_w}x{img_h})")
|
| 94 |
+
|
| 95 |
+
print("\n🎨 所有版本图片生成完毕!")
|
scripts/1_render_images_v2.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 2 |
+
import textwrap
|
| 3 |
+
|
| 4 |
+
def text_to_image(text, output_path="output.png",
|
| 5 |
+
font_path=None, font_size=24,
|
| 6 |
+
max_width=800, padding=40,
|
| 7 |
+
bg_color="white", text_color="black",
|
| 8 |
+
line_spacing=1.0):
|
| 9 |
+
"""
|
| 10 |
+
将文本渲染成图片(白底黑字)
|
| 11 |
+
自动根据字体行高调整行间距
|
| 12 |
+
适合论文插图或说明展示
|
| 13 |
+
"""
|
| 14 |
+
# 字体路径设置
|
| 15 |
+
if font_path is None:
|
| 16 |
+
# Windows 可改为 "C:/Windows/Fonts/simhei.ttf"
|
| 17 |
+
font_path = "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc"
|
| 18 |
+
font = ImageFont.truetype(font_path, font_size)
|
| 19 |
+
|
| 20 |
+
# 自动换行
|
| 21 |
+
lines = []
|
| 22 |
+
for paragraph in text.split("\n"):
|
| 23 |
+
# 粗略估算一行能放多少字
|
| 24 |
+
wrap_width = int(max_width / font_size * 1.8)
|
| 25 |
+
wrapped = textwrap.wrap(paragraph, width=wrap_width) or [" "]
|
| 26 |
+
lines.extend(wrapped)
|
| 27 |
+
|
| 28 |
+
# 获取字体的行高(更精确)
|
| 29 |
+
ascent, descent = font.getmetrics()
|
| 30 |
+
line_height = int((ascent + descent) * line_spacing)
|
| 31 |
+
|
| 32 |
+
# 计算图片大小
|
| 33 |
+
img_height = line_height * len(lines) + 2 * padding
|
| 34 |
+
img_width = max_width + 2 * padding
|
| 35 |
+
|
| 36 |
+
# 创建图片
|
| 37 |
+
img = Image.new("RGB", (img_width, img_height), color=bg_color)
|
| 38 |
+
draw = ImageDraw.Draw(img)
|
| 39 |
+
|
| 40 |
+
# 绘制文字
|
| 41 |
+
y = padding
|
| 42 |
+
for line in lines:
|
| 43 |
+
draw.text((padding, y), line, fill=text_color, font=font)
|
| 44 |
+
y += line_height
|
| 45 |
+
|
| 46 |
+
img.save(output_path)
|
| 47 |
+
print(f"✅ 图片已保存到: {output_path}")
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# 示例用法
|
| 51 |
+
if __name__ == "__main__":
|
| 52 |
+
sample_text = """这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。v
|
| 53 |
+
可以包含多行内容,用于渲染成白底黑字的论文插图风格。
|
| 54 |
+
支持中文和英文混排 English example line."""
|
| 55 |
+
text_to_image(sample_text, "/vol/text_image.png", font_size=28)
|
scripts/1_render_images_v3.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 2 |
+
import re, os, json
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
|
| 5 |
+
def read_jsonl(file_path):
|
| 6 |
+
"""
|
| 7 |
+
读取 JSONL 文件并返回解析后的数据列表。
|
| 8 |
+
|
| 9 |
+
:param file_path: JSONL 文件的路径
|
| 10 |
+
:return: 包含所有 JSON 对象的列表
|
| 11 |
+
"""
|
| 12 |
+
data = []
|
| 13 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 14 |
+
for line in f:
|
| 15 |
+
data.append(json.loads(line.strip()))
|
| 16 |
+
return data
|
| 17 |
+
|
| 18 |
+
def save_jsonl(data, file_path):
|
| 19 |
+
"""
|
| 20 |
+
将数据保存为 JSONL 文件。
|
| 21 |
+
|
| 22 |
+
:param data: 要保存的数据(Python 对象的列表)
|
| 23 |
+
:param file_path: 保存的 JSONL 文件路径
|
| 24 |
+
"""
|
| 25 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
| 26 |
+
for item in data:
|
| 27 |
+
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 28 |
+
|
| 29 |
+
def split_tokens_for_wrapping(text):
|
| 30 |
+
"""
|
| 31 |
+
将文本切分为用于换行的 tokens:
|
| 32 |
+
- 英文单词/数字/标点尽量按空格分块
|
| 33 |
+
- 连续中文或其它无空格脚本按字符切分
|
| 34 |
+
返回 token 列表(保持原始顺序)
|
| 35 |
+
"""
|
| 36 |
+
tokens = []
|
| 37 |
+
# 使用正则把英文单词/空格段和非空格段分开
|
| 38 |
+
# 这个策略:先按空格分段,然后对每段若含中文则拆成单字符,否则保留整段
|
| 39 |
+
for part in re.split(r'(\s+)', text):
|
| 40 |
+
if part == "":
|
| 41 |
+
continue
|
| 42 |
+
if part.isspace():
|
| 43 |
+
tokens.append(part) # 保留空格作为独立 token(便于保持空格)
|
| 44 |
+
continue
|
| 45 |
+
# 如果包含 CJK 文字,则把它拆成单字符(更安全)
|
| 46 |
+
if re.search(r'[\u4e00-\u9fff]', part):
|
| 47 |
+
tokens.extend(list(part))
|
| 48 |
+
else:
|
| 49 |
+
# 非中文段按单词/标点继续拆分(保持单词完整)
|
| 50 |
+
# 但如果单词非常长,也允许按字符拆分(后面处理)
|
| 51 |
+
tokens.append(part)
|
| 52 |
+
return tokens
|
| 53 |
+
|
| 54 |
+
def wrap_text_pixel(draw, text, font, max_width):
|
| 55 |
+
"""
|
| 56 |
+
基于像素宽度把 text 换行,返回行列表。
|
| 57 |
+
draw: ImageDraw.Draw 实例(用于测量)
|
| 58 |
+
"""
|
| 59 |
+
tokens = split_tokens_for_wrapping(text)
|
| 60 |
+
lines = []
|
| 61 |
+
cur_line = ""
|
| 62 |
+
for token in tokens:
|
| 63 |
+
# 试拼接 token(注意保留空格)
|
| 64 |
+
candidate = cur_line + token
|
| 65 |
+
# 使用 textbbox 更准确(返回 bbox 四元组)
|
| 66 |
+
bbox = draw.textbbox((0,0), candidate, font=font)
|
| 67 |
+
w = bbox[2] - bbox[0]
|
| 68 |
+
if w <= max_width:
|
| 69 |
+
cur_line = candidate
|
| 70 |
+
else:
|
| 71 |
+
if cur_line: # 把当前行推入,token 放到下一行
|
| 72 |
+
lines.append(cur_line.rstrip()) # 去掉尾部多余空格
|
| 73 |
+
# 如果 token 自己也超长(单词无空格且宽度>max_width),按字符拆分
|
| 74 |
+
token_bbox = draw.textbbox((0,0), token, font=font)
|
| 75 |
+
token_w = token_bbox[2] - token_bbox[0]
|
| 76 |
+
if token_w > max_width and len(token) > 1:
|
| 77 |
+
# 按字符贪心拆分
|
| 78 |
+
sub = ""
|
| 79 |
+
for ch in token:
|
| 80 |
+
cand2 = sub + ch
|
| 81 |
+
if draw.textbbox((0,0), cand2, font=font)[2] - draw.textbbox((0,0), cand2, font=font)[0] <= max_width:
|
| 82 |
+
sub = cand2
|
| 83 |
+
else:
|
| 84 |
+
if sub:
|
| 85 |
+
lines.append(sub)
|
| 86 |
+
sub = ch
|
| 87 |
+
if sub:
|
| 88 |
+
cur_line = sub
|
| 89 |
+
else:
|
| 90 |
+
cur_line = ""
|
| 91 |
+
else:
|
| 92 |
+
# token 能单独放下一行
|
| 93 |
+
cur_line = token.lstrip() # 去掉起始空格
|
| 94 |
+
else:
|
| 95 |
+
# cur_line 为空但 token 仍超过 max_width(非常长的单词/无空格序列)
|
| 96 |
+
# 按字符拆分
|
| 97 |
+
sub = ""
|
| 98 |
+
for ch in token:
|
| 99 |
+
cand2 = sub + ch
|
| 100 |
+
if draw.textbbox((0,0), cand2, font=font)[2] - draw.textbbox((0,0), cand2, font=font)[0] <= max_width:
|
| 101 |
+
sub = cand2
|
| 102 |
+
else:
|
| 103 |
+
if sub:
|
| 104 |
+
lines.append(sub)
|
| 105 |
+
sub = ch
|
| 106 |
+
if sub:
|
| 107 |
+
cur_line = sub
|
| 108 |
+
else:
|
| 109 |
+
cur_line = ""
|
| 110 |
+
if cur_line:
|
| 111 |
+
lines.append(cur_line.rstrip())
|
| 112 |
+
return lines
|
| 113 |
+
|
| 114 |
+
def text_to_image_precise(text, output_path="output.png",
|
| 115 |
+
font_path=None, font_size=24,
|
| 116 |
+
max_width=1600, padding=40,
|
| 117 |
+
bg_color="white", text_color="black",
|
| 118 |
+
line_spacing=1.0, min_width=200):
|
| 119 |
+
"""
|
| 120 |
+
基于像素测量的文本渲染,避免超出边距。
|
| 121 |
+
"""
|
| 122 |
+
# 字体
|
| 123 |
+
if font_path is None:
|
| 124 |
+
font_path = "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc"
|
| 125 |
+
font = ImageFont.truetype(font_path, font_size)
|
| 126 |
+
|
| 127 |
+
# 先创建临时画布用于测量
|
| 128 |
+
tmp_img = Image.new("RGB", (10, 10), color=bg_color)
|
| 129 |
+
draw = ImageDraw.Draw(tmp_img)
|
| 130 |
+
|
| 131 |
+
# 对每一段(以换行分隔)分别换行,然后合并成最终行列表
|
| 132 |
+
final_lines = []
|
| 133 |
+
for paragraph in text.split("\n"):
|
| 134 |
+
paragraph = paragraph.rstrip("\n")
|
| 135 |
+
if paragraph == "":
|
| 136 |
+
final_lines.append("") # 保持空行
|
| 137 |
+
continue
|
| 138 |
+
wrapped = wrap_text_pixel(draw, paragraph, font, max_width)
|
| 139 |
+
final_lines.extend(wrapped)
|
| 140 |
+
|
| 141 |
+
# 计算实际内容宽度(取最长行的像素宽度)
|
| 142 |
+
max_line_w = 0
|
| 143 |
+
for line in final_lines:
|
| 144 |
+
bbox = draw.textbbox((0,0), line, font=font)
|
| 145 |
+
w = bbox[2] - bbox[0]
|
| 146 |
+
if w > max_line_w:
|
| 147 |
+
max_line_w = w
|
| 148 |
+
|
| 149 |
+
content_width = max(min_width, max_line_w)
|
| 150 |
+
img_width = int(content_width + 2 * padding)
|
| 151 |
+
|
| 152 |
+
# 字高与行距
|
| 153 |
+
ascent, descent = font.getmetrics()
|
| 154 |
+
line_height = int((ascent + descent) * line_spacing)
|
| 155 |
+
|
| 156 |
+
img_height = line_height * len(final_lines) + 2 * padding
|
| 157 |
+
|
| 158 |
+
# 创建最终图片并绘制
|
| 159 |
+
img = Image.new("RGB", (img_width, img_height), color=bg_color)
|
| 160 |
+
draw_final = ImageDraw.Draw(img)
|
| 161 |
+
|
| 162 |
+
y = padding
|
| 163 |
+
for line in final_lines:
|
| 164 |
+
draw_final.text((padding, y), line, fill=text_color, font=font)
|
| 165 |
+
y += line_height
|
| 166 |
+
|
| 167 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 168 |
+
img.save(output_path)
|
| 169 |
+
print(f"✅ 图片已保存到: {output_path} (size: {img_width}x{img_height})")
|
| 170 |
+
return img # 方便调试时返回 PIL.Image 对象
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
# 示例用法
|
| 176 |
+
if __name__ == "__main__":
|
| 177 |
+
|
| 178 |
+
mapping = {
|
| 179 |
+
"形近字替换": "similar_char",
|
| 180 |
+
"音近/同音字替换": "phonetic_char",
|
| 181 |
+
"语序颠倒": "word_order",
|
| 182 |
+
"字符增衍": "char_insertion",
|
| 183 |
+
"字符缺失": "char_deletion"
|
| 184 |
+
}
|
| 185 |
+
image_root = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_5k-10k_sample100_interval500_per10_last_mode/"
|
| 186 |
+
meta_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_5k-10k_sample100_interval500_per10_last_mode/output_processed_last-mode.jsonl"
|
| 187 |
+
data = read_jsonl(meta_path)
|
| 188 |
+
for item in tqdm(data):
|
| 189 |
+
for dim, v in item["rewrite"].items():
|
| 190 |
+
for extent in v["extents"]:
|
| 191 |
+
text = extent["edited"]
|
| 192 |
+
ned = extent["NED"]
|
| 193 |
+
image_path = os.path.join(image_root, "images", mapping[dim], os.path.basename(item["image_path"]).replace(".png", f"_ned-{ned}.png"))
|
| 194 |
+
extent["image_path"] = os.path.join("images", mapping[dim], os.path.basename(item["image_path"]).replace(".png", f"_ned-{ned}.png"))
|
| 195 |
+
text_to_image_precise(text, image_path, font_size=28)
|
| 196 |
+
|
| 197 |
+
save_jsonl(data, meta_path) # 随便覆盖,只是加了个["image_path"]
|
| 198 |
+
# sample_text = """这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。这是一段示例文本。v
|
| 199 |
+
# 可以包含多行内容,用于渲染成白底黑字的论文插图风格。
|
| 200 |
+
# 支持中文和英文混排 English example line."""
|
| 201 |
+
# text_to_image_precise(sample_text, "/vol/text_image.png", font_size=28)
|
scripts/1_render_images_v3_concurrent.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 2 |
+
import re, os, json
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import concurrent
|
| 5 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 6 |
+
import copy
|
| 7 |
+
|
| 8 |
+
def read_jsonl(file_path):
|
| 9 |
+
"""
|
| 10 |
+
读取 JSONL 文件并返回解析后的数据列表。
|
| 11 |
+
|
| 12 |
+
:param file_path: JSONL 文件的路径
|
| 13 |
+
:return: 包含所有 JSON 对象的列表
|
| 14 |
+
"""
|
| 15 |
+
data = []
|
| 16 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 17 |
+
for line in f:
|
| 18 |
+
data.append(json.loads(line.strip()))
|
| 19 |
+
return data
|
| 20 |
+
|
| 21 |
+
def save_jsonl(data, file_path):
|
| 22 |
+
"""
|
| 23 |
+
将数据保存为 JSONL 文件。
|
| 24 |
+
|
| 25 |
+
:param data: 要保存的数据(Python 对象的列表)
|
| 26 |
+
:param file_path: 保存的 JSONL 文件路径
|
| 27 |
+
"""
|
| 28 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
| 29 |
+
for item in data:
|
| 30 |
+
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 31 |
+
|
| 32 |
+
def split_tokens_for_wrapping(text):
|
| 33 |
+
"""
|
| 34 |
+
将文本切分为用于换行的 tokens:
|
| 35 |
+
- 英文单词/数字/标点尽量按空格分块
|
| 36 |
+
- 连续中文或其它无空格脚本按字符切分
|
| 37 |
+
返回 token 列表(保持原始顺序)
|
| 38 |
+
"""
|
| 39 |
+
tokens = []
|
| 40 |
+
# 使用正则把英文单词/空格段和非空格段分开
|
| 41 |
+
# 这个策略:先按空格分段,然后对每段若含中文则拆成单字符,否则保留整段
|
| 42 |
+
for part in re.split(r'(\s+)', text):
|
| 43 |
+
if part == "":
|
| 44 |
+
continue
|
| 45 |
+
if part.isspace():
|
| 46 |
+
tokens.append(part) # 保留空格作为独立 token(便于保持空格)
|
| 47 |
+
continue
|
| 48 |
+
# 如果包含 CJK 文字,则把它拆成单字符(更安全)
|
| 49 |
+
if re.search(r'[\u4e00-\u9fff]', part):
|
| 50 |
+
tokens.extend(list(part))
|
| 51 |
+
else:
|
| 52 |
+
# 非中文段按单词/标点继续拆分(保持单词完整)
|
| 53 |
+
# 但如果单词非常长,也允许按字符拆分(后面处理)
|
| 54 |
+
tokens.append(part)
|
| 55 |
+
return tokens
|
| 56 |
+
|
| 57 |
+
def wrap_text_pixel(draw, text, font, max_width):
|
| 58 |
+
"""
|
| 59 |
+
基于像素宽度把 text 换行,返回行列表。
|
| 60 |
+
draw: ImageDraw.Draw 实例(用于测量)
|
| 61 |
+
"""
|
| 62 |
+
tokens = split_tokens_for_wrapping(text)
|
| 63 |
+
lines = []
|
| 64 |
+
cur_line = ""
|
| 65 |
+
for token in tokens:
|
| 66 |
+
# 试拼接 token(注意保留空格)
|
| 67 |
+
candidate = cur_line + token
|
| 68 |
+
# 使用 textbbox 更准确(返回 bbox 四元组)
|
| 69 |
+
bbox = draw.textbbox((0,0), candidate, font=font)
|
| 70 |
+
w = bbox[2] - bbox[0]
|
| 71 |
+
if w <= max_width:
|
| 72 |
+
cur_line = candidate
|
| 73 |
+
else:
|
| 74 |
+
if cur_line: # 把当前行推入,token 放到下一行
|
| 75 |
+
lines.append(cur_line.rstrip()) # 去掉尾部多余空格
|
| 76 |
+
# 如果 token 自己也超长(单词无空格且宽度>max_width),按字符拆分
|
| 77 |
+
token_bbox = draw.textbbox((0,0), token, font=font)
|
| 78 |
+
token_w = token_bbox[2] - token_bbox[0]
|
| 79 |
+
if token_w > max_width and len(token) > 1:
|
| 80 |
+
# 按字符贪心拆分
|
| 81 |
+
sub = ""
|
| 82 |
+
for ch in token:
|
| 83 |
+
cand2 = sub + ch
|
| 84 |
+
if draw.textbbox((0,0), cand2, font=font)[2] - draw.textbbox((0,0), cand2, font=font)[0] <= max_width:
|
| 85 |
+
sub = cand2
|
| 86 |
+
else:
|
| 87 |
+
if sub:
|
| 88 |
+
lines.append(sub)
|
| 89 |
+
sub = ch
|
| 90 |
+
if sub:
|
| 91 |
+
cur_line = sub
|
| 92 |
+
else:
|
| 93 |
+
cur_line = ""
|
| 94 |
+
else:
|
| 95 |
+
# token 能单独放下一行
|
| 96 |
+
cur_line = token.lstrip() # 去掉起始空格
|
| 97 |
+
else:
|
| 98 |
+
# cur_line 为空但 token 仍超过 max_width(非常长的单词/无空格序列)
|
| 99 |
+
# 按字符拆分
|
| 100 |
+
sub = ""
|
| 101 |
+
for ch in token:
|
| 102 |
+
cand2 = sub + ch
|
| 103 |
+
if draw.textbbox((0,0), cand2, font=font)[2] - draw.textbbox((0,0), cand2, font=font)[0] <= max_width:
|
| 104 |
+
sub = cand2
|
| 105 |
+
else:
|
| 106 |
+
if sub:
|
| 107 |
+
lines.append(sub)
|
| 108 |
+
sub = ch
|
| 109 |
+
if sub:
|
| 110 |
+
cur_line = sub
|
| 111 |
+
else:
|
| 112 |
+
cur_line = ""
|
| 113 |
+
if cur_line:
|
| 114 |
+
lines.append(cur_line.rstrip())
|
| 115 |
+
return lines
|
| 116 |
+
|
| 117 |
+
def text_to_image_precise(text, output_path="output.png",
|
| 118 |
+
font_path=None, font_size=24,
|
| 119 |
+
max_width=1600, padding=40,
|
| 120 |
+
bg_color="white", text_color="black",
|
| 121 |
+
line_spacing=1.0, min_width=200):
|
| 122 |
+
"""
|
| 123 |
+
基于像素测量的文本渲染,避免超出边距。
|
| 124 |
+
"""
|
| 125 |
+
# 字体
|
| 126 |
+
if font_path is None:
|
| 127 |
+
font_path = "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc"
|
| 128 |
+
font = ImageFont.truetype(font_path, font_size)
|
| 129 |
+
|
| 130 |
+
# 先创建临时画布用于测量
|
| 131 |
+
tmp_img = Image.new("RGB", (10, 10), color=bg_color)
|
| 132 |
+
draw = ImageDraw.Draw(tmp_img)
|
| 133 |
+
|
| 134 |
+
# 对每一段(以换行分隔)分别换行,然后合并成最终行列表
|
| 135 |
+
final_lines = []
|
| 136 |
+
for paragraph in text.split("\n"):
|
| 137 |
+
paragraph = paragraph.rstrip("\n")
|
| 138 |
+
if paragraph == "":
|
| 139 |
+
final_lines.append("") # 保持空行
|
| 140 |
+
continue
|
| 141 |
+
wrapped = wrap_text_pixel(draw, paragraph, font, max_width)
|
| 142 |
+
final_lines.extend(wrapped)
|
| 143 |
+
|
| 144 |
+
# 计算实际内容宽度(取最长行的像素宽度)
|
| 145 |
+
max_line_w = 0
|
| 146 |
+
for line in final_lines:
|
| 147 |
+
bbox = draw.textbbox((0,0), line, font=font)
|
| 148 |
+
w = bbox[2] - bbox[0]
|
| 149 |
+
if w > max_line_w:
|
| 150 |
+
max_line_w = w
|
| 151 |
+
|
| 152 |
+
content_width = max(min_width, max_line_w)
|
| 153 |
+
img_width = int(content_width + 2 * padding)
|
| 154 |
+
|
| 155 |
+
# 字高与行距
|
| 156 |
+
ascent, descent = font.getmetrics()
|
| 157 |
+
line_height = int((ascent + descent) * line_spacing)
|
| 158 |
+
|
| 159 |
+
img_height = line_height * len(final_lines) + 2 * padding
|
| 160 |
+
|
| 161 |
+
# 创建最终图片并绘制
|
| 162 |
+
img = Image.new("RGB", (img_width, img_height), color=bg_color)
|
| 163 |
+
draw_final = ImageDraw.Draw(img)
|
| 164 |
+
|
| 165 |
+
y = padding
|
| 166 |
+
for line in final_lines:
|
| 167 |
+
draw_final.text((padding, y), line, fill=text_color, font=font)
|
| 168 |
+
y += line_height
|
| 169 |
+
|
| 170 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 171 |
+
img.save(output_path)
|
| 172 |
+
print(f"✅ 图片已保存到: {output_path} (size: {img_width}x{img_height})")
|
| 173 |
+
return img # 方便调试时返回 PIL.Image 对象
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def process_render_item(item, mapping, image_root):
|
| 177 |
+
"""
|
| 178 |
+
负责渲染一个 item 下所有 extent 对应的图片。
|
| 179 |
+
会修改 item(添加 extent["image_path"])
|
| 180 |
+
"""
|
| 181 |
+
for dim, v in item["rewrite"].items():
|
| 182 |
+
for extent in v["extents"]:
|
| 183 |
+
text = extent["edited"]
|
| 184 |
+
ned = extent["NED"]
|
| 185 |
+
|
| 186 |
+
# 生成输出图片路径
|
| 187 |
+
filename = os.path.basename(item["image_path"]).replace(
|
| 188 |
+
".png", f"_ned-{ned}.png"
|
| 189 |
+
)
|
| 190 |
+
rel_path = os.path.join("images", mapping[dim], filename)
|
| 191 |
+
abs_path = os.path.join(image_root, rel_path)
|
| 192 |
+
|
| 193 |
+
# 创建文件夹
|
| 194 |
+
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
| 195 |
+
|
| 196 |
+
# 保存相对路径
|
| 197 |
+
extent["image_path"] = rel_path
|
| 198 |
+
|
| 199 |
+
# 渲染文本到图像
|
| 200 |
+
text_to_image_precise(text, abs_path, font_size=28)
|
| 201 |
+
|
| 202 |
+
return item # 返回已更新的 item
|
| 203 |
+
|
| 204 |
+
# 示例用法
|
| 205 |
+
if __name__ == "__main__":
|
| 206 |
+
# 不是,tmd怎么多线程比单线程还慢...
|
| 207 |
+
max_workers = 50
|
| 208 |
+
|
| 209 |
+
mapping = {
|
| 210 |
+
"形近字替换": "similar_char",
|
| 211 |
+
"音近/同音字替换": "phonetic_char",
|
| 212 |
+
"语序颠倒": "word_order",
|
| 213 |
+
"字符增衍": "char_insertion",
|
| 214 |
+
"字符缺失": "char_deletion"
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
image_root = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10_last-mode/"
|
| 218 |
+
meta_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10_last-mode/output_processed_last-mode.jsonl"
|
| 219 |
+
|
| 220 |
+
data = read_jsonl(meta_path)
|
| 221 |
+
|
| 222 |
+
# 启动多线程渲染
|
| 223 |
+
updated_items = []
|
| 224 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
| 225 |
+
futures = [
|
| 226 |
+
executor.submit(process_render_item, item, mapping, image_root)
|
| 227 |
+
for item in data
|
| 228 |
+
]
|
| 229 |
+
|
| 230 |
+
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
|
| 231 |
+
updated_items.append(copy.deepcopy(future.result()))
|
| 232 |
+
|
| 233 |
+
# 保存更新后的 JSONL(只写一次)
|
| 234 |
+
save_jsonl(updated_items, meta_path)
|
| 235 |
+
print("🚀 全部渲染完成并写入 JSON!")
|
scripts/3_calu_metric_v1.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import Levenshtein
|
| 3 |
+
|
| 4 |
+
def compute_edit_distance(pred: str, gt: str) -> float:
|
| 5 |
+
"""
|
| 6 |
+
计算两个字符串的归一化编辑距离 (Normalized Edit Distance, NED)
|
| 7 |
+
值域为 [0, 1],越小表示越相似。
|
| 8 |
+
"""
|
| 9 |
+
pred = pred.replace("\n", "").replace(" ", "")
|
| 10 |
+
gt = gt.replace("\n", "").replace(" ", "")
|
| 11 |
+
if not pred and not gt:
|
| 12 |
+
return 0.0
|
| 13 |
+
dist = Levenshtein.distance(pred, gt)
|
| 14 |
+
return round(dist / max(len(pred), len(gt)), 4)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def compute_accuracy(json_path: str):
|
| 18 |
+
"""从 JSON 文件批量计算平均 NED 及每条样本的编辑距离"""
|
| 19 |
+
with open(json_path, 'r', encoding='utf-8') as f:
|
| 20 |
+
data = json.load(f)
|
| 21 |
+
|
| 22 |
+
total_ned = 0
|
| 23 |
+
sample_accuracies = []
|
| 24 |
+
|
| 25 |
+
for sample in data:
|
| 26 |
+
# Ground truth 选择逻辑
|
| 27 |
+
if "normal" in json_path:
|
| 28 |
+
gt = sample.get("content", "")
|
| 29 |
+
elif "tiny_shuffled" in json_path:
|
| 30 |
+
gt = sample.get("tiny_shuffled_content", "")
|
| 31 |
+
elif "shuffled" in json_path:
|
| 32 |
+
gt = sample.get("shuffled_content", "")
|
| 33 |
+
else:
|
| 34 |
+
raise ValueError("无法确定使用哪个字段作为 ground truth")
|
| 35 |
+
|
| 36 |
+
pred = sample.get("ocr", "")
|
| 37 |
+
ned = compute_edit_distance(pred, gt)
|
| 38 |
+
total_ned += ned
|
| 39 |
+
|
| 40 |
+
sample_accuracies.append({
|
| 41 |
+
"id": sample["id"],
|
| 42 |
+
"image_path": sample["image_path"],
|
| 43 |
+
"content": gt,
|
| 44 |
+
"ocr": sample["ocr"],
|
| 45 |
+
"NED": ned
|
| 46 |
+
})
|
| 47 |
+
|
| 48 |
+
overall_ned = round(total_ned / len(data), 4)
|
| 49 |
+
print(f"共 {len(data)} 条样本, 平均 NED = {overall_ned}")
|
| 50 |
+
|
| 51 |
+
return overall_ned, sample_accuracies
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if __name__ == "__main__":
|
| 55 |
+
|
| 56 |
+
# json_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/sample200_len1.0-1.2k/tiny_shuffled/input_small_ocr.json"
|
| 57 |
+
# overall_ned, sample_accuracies = compute_accuracy(json_path)
|
| 58 |
+
# # 保存每条样本的编辑距离
|
| 59 |
+
# out_path = json_path.replace(".json", "_acc.json")
|
| 60 |
+
# with open(out_path, "w", encoding="utf-8") as f:
|
| 61 |
+
# json.dump(sample_accuracies, f, ensure_ascii=False, indent=2)
|
| 62 |
+
# print(f"结果已保存至:{out_path}")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
origin = "四月飞雪《四月飞雪》是四月菲雪创作的网络小说,发表于起点网。作品简介大千世界,两亿年前究竟发生了什么,在我们身边,好似缺少着什么,,一个生活在这个世界的小孩,究竟经历了什么,一步步走上了永无止境的通天大道。。接下来就让我们一起见证。。。。。"
|
| 66 |
+
mod = "四月飞雪《四月飞雪雪》是四月菲雪作创作的网络小说,发表于起点网。作作品简大简介世大千世界,两两亿年前究竟发竟发什生了什么,在我们身身边,好似缺什少着什么,,一个生活活在这个世世小界的小孩,经究竟经历了什么,一步步步上走上了永止无止天境的通天大道。。接下来就让我们一起见见证。。。。。"
|
| 67 |
+
ned = compute_edit_distance(origin, mod)
|
| 68 |
+
print(ned)
|
scripts/3_calu_metric_v2.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import Levenshtein
|
| 3 |
+
from fit_2d import plot_density_ned
|
| 4 |
+
from fit_3d import fit_and_plot_3d
|
| 5 |
+
import numpy as np
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
def read_jsonl(file_path):
|
| 10 |
+
"""
|
| 11 |
+
读取 JSONL 文件并返回解析后的数据列表。
|
| 12 |
+
|
| 13 |
+
:param file_path: JSONL 文件的路径
|
| 14 |
+
:return: 包含所有 JSON 对象的列表
|
| 15 |
+
"""
|
| 16 |
+
data = []
|
| 17 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 18 |
+
for line in f:
|
| 19 |
+
data.append(json.loads(line.strip()))
|
| 20 |
+
return data
|
| 21 |
+
|
| 22 |
+
def save_jsonl(data, file_path):
|
| 23 |
+
"""
|
| 24 |
+
将数据保存为 JSONL 文件。
|
| 25 |
+
|
| 26 |
+
:param data: 要保存的数据(Python 对象的列表)
|
| 27 |
+
:param file_path: 保存的 JSONL 文件路径
|
| 28 |
+
"""
|
| 29 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
| 30 |
+
for item in data:
|
| 31 |
+
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 32 |
+
|
| 33 |
+
def compute_edit_distance(pred: str, gt: str) -> float:
|
| 34 |
+
"""
|
| 35 |
+
计算两个字符串的归一化编辑距离 (Normalized Edit Distance, NED)
|
| 36 |
+
值域为 [0, 1],越小表示越相似。
|
| 37 |
+
"""
|
| 38 |
+
pred = pred.replace("\n", "").replace(" ", "")
|
| 39 |
+
gt = gt.replace("\n", "").replace(" ", "")
|
| 40 |
+
if not pred and not gt:
|
| 41 |
+
return 0.0
|
| 42 |
+
dist = Levenshtein.distance(pred, gt)
|
| 43 |
+
return round(dist / max(len(pred), len(gt)), 4)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def compute_accuracy(json_path: str):
|
| 47 |
+
"""从 JSON 文件批量计算平均 NED 及每条样本的编辑距离"""
|
| 48 |
+
with open(json_path, 'r', encoding='utf-8') as f:
|
| 49 |
+
data = json.load(f)
|
| 50 |
+
|
| 51 |
+
total_ned = 0
|
| 52 |
+
sample_accuracies = []
|
| 53 |
+
|
| 54 |
+
for sample in data:
|
| 55 |
+
# Ground truth 选择逻辑
|
| 56 |
+
if "normal" in json_path:
|
| 57 |
+
gt = sample.get("content", "")
|
| 58 |
+
elif "tiny_shuffled" in json_path:
|
| 59 |
+
gt = sample.get("tiny_shuffled_content", "")
|
| 60 |
+
elif "shuffled" in json_path:
|
| 61 |
+
gt = sample.get("shuffled_content", "")
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError("无法确定使用哪个字段作为 ground truth")
|
| 64 |
+
|
| 65 |
+
pred = sample.get("ocr", "")
|
| 66 |
+
ned = compute_edit_distance(pred, gt)
|
| 67 |
+
total_ned += ned
|
| 68 |
+
|
| 69 |
+
sample_accuracies.append({
|
| 70 |
+
"id": sample["id"],
|
| 71 |
+
"image_path": sample["image_path"],
|
| 72 |
+
"content": gt,
|
| 73 |
+
"ocr": sample["ocr"],
|
| 74 |
+
"NED": ned
|
| 75 |
+
})
|
| 76 |
+
|
| 77 |
+
overall_ned = round(total_ned / len(data), 4)
|
| 78 |
+
print(f"共 {len(data)} 条样本, 平均 NED = {overall_ned}")
|
| 79 |
+
|
| 80 |
+
return overall_ned, sample_accuracies
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def plot_length_ned_bar(data, output_path="length_ned_bar.png",
|
| 84 |
+
length_bin_width=500, ned_bin_width=0.05):
|
| 85 |
+
"""
|
| 86 |
+
用柱状图显示 item['length'] 与 extent['NED'] 的频率
|
| 87 |
+
|
| 88 |
+
参数:
|
| 89 |
+
data: 原始数据列表
|
| 90 |
+
output_path: 图片保存路径
|
| 91 |
+
length_bin_width: 文字长度的柱宽
|
| 92 |
+
ned_bin_width: NED的柱宽
|
| 93 |
+
"""
|
| 94 |
+
# 提取 length 和 NED
|
| 95 |
+
length_vals = []
|
| 96 |
+
ned_vals = []
|
| 97 |
+
for item in data:
|
| 98 |
+
for dim, v in item["rewrite"].items():
|
| 99 |
+
for extent in v["extents"]:
|
| 100 |
+
length_vals.append(len(extent["edited"]))
|
| 101 |
+
ned_vals.append(extent["NED"])
|
| 102 |
+
|
| 103 |
+
length_vals = np.array(length_vals)
|
| 104 |
+
ned_vals = np.array(ned_vals)
|
| 105 |
+
|
| 106 |
+
if len(length_vals) == 0:
|
| 107 |
+
print("⚠️ 数据为空,未生成图像。")
|
| 108 |
+
return
|
| 109 |
+
|
| 110 |
+
# --- 文字长度柱状图 ---
|
| 111 |
+
length_bins = np.arange(min(length_vals), max(length_vals) + length_bin_width, length_bin_width)
|
| 112 |
+
plt.figure(figsize=(8, 4))
|
| 113 |
+
plt.hist(length_vals, bins=length_bins, color='skyblue', edgecolor='black')
|
| 114 |
+
plt.xlabel("Text Length")
|
| 115 |
+
plt.ylabel("Frequency")
|
| 116 |
+
plt.title("Frequency of Text Length")
|
| 117 |
+
plt.xticks(length_bins)
|
| 118 |
+
plt.tight_layout()
|
| 119 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 120 |
+
plt.savefig(output_path.replace(".png", "_length.png"), dpi=300)
|
| 121 |
+
plt.close()
|
| 122 |
+
|
| 123 |
+
# --- NED柱状图 ---
|
| 124 |
+
ned_bins = np.arange(0, 1 + ned_bin_width, ned_bin_width)
|
| 125 |
+
plt.figure(figsize=(8, 4))
|
| 126 |
+
plt.hist(ned_vals, bins=ned_bins, color='salmon', edgecolor='black')
|
| 127 |
+
plt.xlabel("NED")
|
| 128 |
+
plt.ylabel("Frequency")
|
| 129 |
+
plt.title("Frequency of NED")
|
| 130 |
+
plt.xticks(ned_bins)
|
| 131 |
+
plt.tight_layout()
|
| 132 |
+
plt.savefig(output_path.replace(".png", "_ned.png"), dpi=300)
|
| 133 |
+
plt.close()
|
| 134 |
+
|
| 135 |
+
print(f"✅ 柱状图已保存:{output_path.replace('.png', '_length.png')} 和 {output_path.replace('.png', '_ned.png')}")
|
| 136 |
+
|
| 137 |
+
if __name__ == "__main__":
|
| 138 |
+
|
| 139 |
+
meta_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10_last-mode/output_processed_ocr_Gundam.jsonl"
|
| 140 |
+
data = read_jsonl(meta_path)
|
| 141 |
+
|
| 142 |
+
# 1. 计算NED
|
| 143 |
+
for item in data:
|
| 144 |
+
for dim, v in item["rewrite"].items():
|
| 145 |
+
for extent in v["extents"]:
|
| 146 |
+
orig = extent["edited"]
|
| 147 |
+
pred = extent["ocr"]
|
| 148 |
+
extent["NED_ocr"] = compute_edit_distance(orig, pred)
|
| 149 |
+
|
| 150 |
+
save_jsonl(data, "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10_last-mode/output_processed_ocr_Gundam_result.jsonl")
|
| 151 |
+
|
| 152 |
+
# 1. 不同维度的模型性能 最大程度。
|
| 153 |
+
metric_dim = {
|
| 154 |
+
"形近字替换": 0,
|
| 155 |
+
"音近/同音字替换": 0,
|
| 156 |
+
"语序颠倒": 0,
|
| 157 |
+
"字符增衍": 0,
|
| 158 |
+
"字符缺失": 0
|
| 159 |
+
}
|
| 160 |
+
for item in data:
|
| 161 |
+
for dim, v in item["rewrite"].items():
|
| 162 |
+
for extent in v["extents"]:
|
| 163 |
+
# TODO 不同level相对于原始字符串的NED也不同,要进行区分。即,数据集本身有bias,导致其他的自变量没有得到控制。
|
| 164 |
+
if extent["level"] == 1.0:
|
| 165 |
+
metric_dim[dim] += extent["NED_ocr"]
|
| 166 |
+
for k, v in metric_dim.items():
|
| 167 |
+
metric_dim[k] = v / len(data)
|
| 168 |
+
print(metric_dim)
|
| 169 |
+
|
| 170 |
+
# 2. 不同文字密度的模型性能。 最大程度。 分维度,出5条曲线。或者取均值?
|
| 171 |
+
metric_p = {
|
| 172 |
+
"形近字替换": [],
|
| 173 |
+
"音近/同音字替换": [],
|
| 174 |
+
"语序颠倒": [],
|
| 175 |
+
"字符增衍": [],
|
| 176 |
+
"字符缺失": []
|
| 177 |
+
}
|
| 178 |
+
for item in data:
|
| 179 |
+
for dim, v in item["rewrite"].items():
|
| 180 |
+
for extent in v["extents"]:
|
| 181 |
+
if extent["level"] == 1.0:
|
| 182 |
+
metric_p[dim].append([item["length"], extent["NED_ocr"]])
|
| 183 |
+
plot_density_ned(metric_p["形近字替换"], save_path="/vol/fit_p_ned.png")
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
metric_3d = []
|
| 187 |
+
# 3. 不同程度。 先不考虑文字密度。 文字密度不同,也是个自变量在这里。 分维度,出5条曲线。或者取均值?
|
| 188 |
+
# TODO
|
| 189 |
+
for item in data:
|
| 190 |
+
for dim, v in item["rewrite"].items():
|
| 191 |
+
for extent in v["extents"]:
|
| 192 |
+
metric_3d.append([item["length"], extent["NED"], extent["NED_ocr"]])
|
| 193 |
+
fit_and_plot_3d(metric_3d, "/vol/fit_3d.png")
|
| 194 |
+
|
| 195 |
+
# 统计自变量分布
|
| 196 |
+
plot_length_ned_bar(data, output_path="/vol/length_ned_bar.png",
|
| 197 |
+
length_bin_width=500, ned_bin_width=0.05)
|
scripts/3_calu_order.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
json_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/sample200_len1.0-1.2k/merged_ocr.json"
|
| 4 |
+
with open(json_path, 'r', encoding='utf-8') as f:
|
| 5 |
+
data = json.load(f)
|
| 6 |
+
|
| 7 |
+
num = cnt1 = cnt2 = 0
|
| 8 |
+
for item in data:
|
| 9 |
+
num += 1
|
| 10 |
+
if item["spans"][0]["before"] in item["content_ocr"] and item["spans"][0]["after"] not in item["tiny_shuffled_content_ocr"]:
|
| 11 |
+
cnt1 += 1
|
| 12 |
+
elif item["spans"][0]["before"] not in item["content_ocr"] and item["spans"][0]["after"] in item["tiny_shuffled_content_ocr"]:
|
| 13 |
+
cnt2 += 1
|
| 14 |
+
print(f"normal正确但是替换后错误:{cnt1}, normal错误但是替换后正确:{cnt2}")
|
| 15 |
+
|
| 16 |
+
# # 如果想保存每条样本的准确率:
|
| 17 |
+
# with open(json_path.replace(".json", "_acc.json"), "w", encoding="utf-8") as f:
|
| 18 |
+
# json.dump(sample_accuracies, f, ensure_ascii=False, indent=2)
|
scripts/Levenshtein1.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 使用Levenshtein库计算编辑距离
|
| 2 |
+
|
| 3 |
+
import Levenshtein
|
| 4 |
+
|
| 5 |
+
pred = "你好"
|
| 6 |
+
gt = "你好11"
|
| 7 |
+
upper_len = max(len(pred), len(gt))
|
| 8 |
+
edit_dist = Levenshtein.distance(pred, gt) / upper_len
|
| 9 |
+
print(edit_dist)
|
scripts/__pycache__/Levenshtein.cpython-312.pyc
ADDED
|
Binary file (421 Bytes). View file
|
|
|
scripts/__pycache__/fit.cpython-312.pyc
ADDED
|
Binary file (3.54 kB). View file
|
|
|
scripts/__pycache__/fit_2d.cpython-312.pyc
ADDED
|
Binary file (3.54 kB). View file
|
|
|
scripts/__pycache__/fit_3d.cpython-312.pyc
ADDED
|
Binary file (3.67 kB). View file
|
|
|
scripts/__pycache__/tiny_shuffle.cpython-312.pyc
ADDED
|
Binary file (5.28 kB). View file
|
|
|
scripts/fit_2d.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
from sklearn.linear_model import LinearRegression
|
| 4 |
+
from sklearn.preprocessing import PolynomialFeatures
|
| 5 |
+
from sklearn.metrics import r2_score
|
| 6 |
+
|
| 7 |
+
def plot_density_ned(data, degree=2, save_path="density_ned_fit.png"):
|
| 8 |
+
"""
|
| 9 |
+
根据输入二维数组 (density, NED),拟合并可视化趋势。
|
| 10 |
+
|
| 11 |
+
参数:
|
| 12 |
+
data: 二维数组或列表 [[density1, ned1], [density2, ned2], ...]
|
| 13 |
+
degree: 拟合多项式阶数 (默认2阶),二阶多项式拟合
|
| 14 |
+
save_path: 保存路径 (默认为 'density_ned_fit.png')
|
| 15 |
+
|
| 16 |
+
输出:
|
| 17 |
+
保存拟合图像,并返回拟合系数与R²值。
|
| 18 |
+
"""
|
| 19 |
+
# 转为numpy数组
|
| 20 |
+
data = np.array(data)
|
| 21 |
+
X = data[:, 0].reshape(-1, 1) # 文字密度
|
| 22 |
+
y = data[:, 1] # NED指标
|
| 23 |
+
|
| 24 |
+
# 多项式特征转换
|
| 25 |
+
poly = PolynomialFeatures(degree=degree)
|
| 26 |
+
X_poly = poly.fit_transform(X)
|
| 27 |
+
|
| 28 |
+
# 拟合回归模型
|
| 29 |
+
model = LinearRegression()
|
| 30 |
+
model.fit(X_poly, y)
|
| 31 |
+
|
| 32 |
+
# 预测与R²
|
| 33 |
+
y_pred = model.predict(X_poly)
|
| 34 |
+
r2 = r2_score(y, y_pred)
|
| 35 |
+
|
| 36 |
+
# 平滑曲线点
|
| 37 |
+
X_plot = np.linspace(X.min(), X.max(), 200).reshape(-1, 1)
|
| 38 |
+
X_plot_poly = poly.transform(X_plot)
|
| 39 |
+
y_plot = model.predict(X_plot_poly)
|
| 40 |
+
|
| 41 |
+
# 绘图
|
| 42 |
+
plt.figure(figsize=(8, 6))
|
| 43 |
+
plt.scatter(X, y, color='royalblue', alpha=0.7, label='Data points')
|
| 44 |
+
plt.plot(X_plot, y_plot, color='crimson', linewidth=2.5, label=f'Polynomial Fit (deg={degree})')
|
| 45 |
+
|
| 46 |
+
plt.xlabel("Text Density", fontsize=14)
|
| 47 |
+
plt.ylabel("NED", fontsize=14)
|
| 48 |
+
plt.title("Relationship between Text Density and NED", fontsize=16, pad=15)
|
| 49 |
+
plt.grid(alpha=0.3)
|
| 50 |
+
plt.legend(fontsize=12)
|
| 51 |
+
plt.tight_layout()
|
| 52 |
+
|
| 53 |
+
# 保存高分辨率图片
|
| 54 |
+
plt.savefig(save_path, dpi=300)
|
| 55 |
+
plt.close()
|
| 56 |
+
|
| 57 |
+
print(f"✅ 拟合完成,图片已保存至:{save_path}")
|
| 58 |
+
print(f"📈 拟合公式系数: {model.coef_}")
|
| 59 |
+
print(f"📊 R² = {r2:.4f}")
|
| 60 |
+
|
| 61 |
+
return model.coef_, r2
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# 示例使用
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
data = [
|
| 67 |
+
[0.1, 0.12],
|
| 68 |
+
[0.2, 0.18],
|
| 69 |
+
[0.3, 0.25],
|
| 70 |
+
[0.4, 0.32],
|
| 71 |
+
[0.5, 0.45],
|
| 72 |
+
[0.6, 0.51],
|
| 73 |
+
[0.7, 0.62],
|
| 74 |
+
]
|
| 75 |
+
plot_density_ned(data, degree=2, save_path="/vol/density_ned_fit.png")
|
scripts/fit_3d.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
from sklearn.linear_model import LinearRegression
|
| 4 |
+
from sklearn.metrics import r2_score
|
| 5 |
+
from mpl_toolkits.mplot3d import Axes3D
|
| 6 |
+
|
| 7 |
+
def fit_and_plot_3d(data, output_path="fit_surface.png"):
|
| 8 |
+
"""
|
| 9 |
+
对两个自变量(文字密度、程序)和一个因变量(NED)进行线性拟合并可视化
|
| 10 |
+
|
| 11 |
+
参数:
|
| 12 |
+
data: 二维数组或列表,每行格式为 [文字密度, 程序, NED]
|
| 13 |
+
output_path: 图片保存路径
|
| 14 |
+
"""
|
| 15 |
+
data = np.array(data)
|
| 16 |
+
X = data[:, :2] # 自变量(文字密度、程序)
|
| 17 |
+
y = data[:, 2] # 因变量(NED)
|
| 18 |
+
|
| 19 |
+
# 拟合多元线性回归
|
| 20 |
+
model = LinearRegression()
|
| 21 |
+
model.fit(X, y)
|
| 22 |
+
y_pred = model.predict(X)
|
| 23 |
+
|
| 24 |
+
# 计算R²
|
| 25 |
+
r2 = r2_score(y, y_pred)
|
| 26 |
+
print(f"✅ 拟合完成,R² = {r2:.4f}")
|
| 27 |
+
print(f"回归系数: {model.coef_}, 截距: {model.intercept_:.4f}")
|
| 28 |
+
|
| 29 |
+
# 创建网格点用于绘制拟合面
|
| 30 |
+
x1_range = np.linspace(X[:,0].min(), X[:,0].max(), 50)
|
| 31 |
+
x2_range = np.linspace(X[:,1].min(), X[:,1].max(), 50)
|
| 32 |
+
X1, X2 = np.meshgrid(x1_range, x2_range)
|
| 33 |
+
X_grid = np.c_[X1.ravel(), X2.ravel()]
|
| 34 |
+
Y_pred = model.predict(X_grid).reshape(X1.shape)
|
| 35 |
+
|
| 36 |
+
# 绘制3D散点+拟合平面
|
| 37 |
+
fig = plt.figure(figsize=(8, 6))
|
| 38 |
+
ax = fig.add_subplot(111, projection='3d')
|
| 39 |
+
|
| 40 |
+
ax.scatter(X[:,0], X[:,1], y, color='blue', label='Data points')
|
| 41 |
+
ax.plot_surface(X1, X2, Y_pred, color='lightcoral', alpha=0.6, label='Fitted surface')
|
| 42 |
+
|
| 43 |
+
ax.set_xlabel('Text Density')
|
| 44 |
+
ax.set_ylabel('Program')
|
| 45 |
+
ax.set_zlabel('NED')
|
| 46 |
+
ax.set_title(f'3D Regression Fit (R²={r2:.3f})')
|
| 47 |
+
|
| 48 |
+
# 保存图片
|
| 49 |
+
plt.tight_layout()
|
| 50 |
+
plt.savefig(output_path, dpi=300)
|
| 51 |
+
plt.close(fig)
|
| 52 |
+
print(f"📊 图像已保存到: {output_path}")
|
| 53 |
+
|
| 54 |
+
# 示例
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
# 示例数据 [文字密度, 程序, NED]
|
| 57 |
+
sample_data = [
|
| 58 |
+
[0.1, 1, 0.05],
|
| 59 |
+
[0.2, 1, 0.10],
|
| 60 |
+
[0.3, 2, 0.18],
|
| 61 |
+
[0.4, 2, 0.23],
|
| 62 |
+
[0.5, 3, 0.28],
|
| 63 |
+
[0.6, 3, 0.35],
|
| 64 |
+
[0.7, 4, 0.38],
|
| 65 |
+
[0.8, 4, 0.45],
|
| 66 |
+
[0.9, 5, 0.50],
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
fit_and_plot_3d(sample_data, "ned_fit.png")
|
scripts/gen_random_char.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def load_vocab_zh():
|
| 8 |
+
COMMON_CHARS = ""
|
| 9 |
+
with open("/vol/zhaoy/ds-ocr/scripts/zh_level1_3500.txt", "r", encoding="utf-8") as f:
|
| 10 |
+
for line in f:
|
| 11 |
+
parts = line.strip().split('\t')
|
| 12 |
+
if len(parts) >= 2: # 保证至少有编号和汉字
|
| 13 |
+
char = parts[1]
|
| 14 |
+
# 只保留真正的汉字(防止空行或错误字符)
|
| 15 |
+
if '\u4e00' <= char <= '\u9fff':
|
| 16 |
+
COMMON_CHARS += char
|
| 17 |
+
# print(f"✅ 共读取 {len(COMMON_CHARS)} 个汉字。")
|
| 18 |
+
# print(COMMON_CHARS[:50])
|
| 19 |
+
return COMMON_CHARS
|
| 20 |
+
|
| 21 |
+
def random_common_chinese(COMMON_CHARS, length):
|
| 22 |
+
return ''.join(random.choice(COMMON_CHARS) for _ in range(length))
|
| 23 |
+
|
| 24 |
+
def generate_dataset(num_samples, save_path):
|
| 25 |
+
"""生成多个样本并保存为 JSON"""
|
| 26 |
+
data = []
|
| 27 |
+
os.makedirs("images", exist_ok=True)
|
| 28 |
+
|
| 29 |
+
for i in range(1, num_samples + 1):
|
| 30 |
+
sample_id = f"RS{i:03d}"
|
| 31 |
+
length = 2000 # 长度
|
| 32 |
+
|
| 33 |
+
COMMON_CHARS = load_vocab_zh()
|
| 34 |
+
content = random_common_chinese(COMMON_CHARS, length)
|
| 35 |
+
image_path = f"images/{sample_id}.png" # 可后续生成图片时用
|
| 36 |
+
|
| 37 |
+
data.append({
|
| 38 |
+
"id": sample_id,
|
| 39 |
+
"image_path": image_path,
|
| 40 |
+
"content": content
|
| 41 |
+
})
|
| 42 |
+
|
| 43 |
+
# 保存为 JSON 文件
|
| 44 |
+
with open(save_path, "w", encoding="utf-8") as f:
|
| 45 |
+
json.dump(data, f, ensure_ascii=False, indent=2)
|
| 46 |
+
|
| 47 |
+
print(f"✅ 已生成 {num_samples} 条样本,保存至:{save_path}")
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if __name__ == "__main__":
|
| 51 |
+
save_path = "/vol/zhaoy/ds-ocr/data/rand_zh_2k/meta.json"
|
| 52 |
+
generate_dataset(10, save_path) # 样本数量
|
scripts/resize_image.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
|
| 3 |
+
# 读取图片
|
| 4 |
+
img_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/CCI3_100-5k_sample100_interval500_per10/images/char_deletion/RS061_ned-0.0907.png" # 原始图片路径
|
| 5 |
+
output_path = "/vol/RS061_ned-0.0907.png" # 保存路径
|
| 6 |
+
target_size = (1280, 1280) # 指定尺寸 (宽, 高)
|
| 7 |
+
|
| 8 |
+
# 打开并 resize
|
| 9 |
+
img = Image.open(img_path)
|
| 10 |
+
img_resized = img.resize(target_size, Image.Resampling.LANCZOS) # 高质量缩放
|
| 11 |
+
|
| 12 |
+
# 保存
|
| 13 |
+
img_resized.save(output_path)
|
| 14 |
+
|
| 15 |
+
print(f"✅ 图片已保存到 {output_path},尺寸为 {img_resized.size}")
|
scripts/tiny_shuffle.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
def random_swap_contiguous_v1(text: str, n_swaps: int = 1, max_retry: int = 10):
|
| 5 |
+
"""
|
| 6 |
+
在字符串中随机选取连续的非标点、非数字中文字符片段,
|
| 7 |
+
并在该片段内随机交换字符,同时记录修改区域的索引。
|
| 8 |
+
若打乱后与原字符串相同,则重试,最多 max_retry 次。
|
| 9 |
+
"""
|
| 10 |
+
chinese_spans = [(m.start(), m.end()) for m in re.finditer(r'[\u4e00-\u9fa5]+', text)]
|
| 11 |
+
if not chinese_spans:
|
| 12 |
+
return text, []
|
| 13 |
+
|
| 14 |
+
text_list = list(text)
|
| 15 |
+
modified_spans = []
|
| 16 |
+
|
| 17 |
+
for _ in range(n_swaps):
|
| 18 |
+
for attempt in range(max_retry):
|
| 19 |
+
start, end = random.choice(chinese_spans)
|
| 20 |
+
length = end - start
|
| 21 |
+
if length < 2:
|
| 22 |
+
continue
|
| 23 |
+
|
| 24 |
+
sub_len = random.randint(2, min(5, length)) # [2,5]
|
| 25 |
+
sub_start = random.randint(start, end - sub_len)
|
| 26 |
+
sub_end = sub_start + sub_len
|
| 27 |
+
|
| 28 |
+
before = ''.join(text_list[sub_start:sub_end])
|
| 29 |
+
sub_chars = text_list[sub_start:sub_end]
|
| 30 |
+
|
| 31 |
+
# 尝试打乱,直到不同
|
| 32 |
+
shuffled = sub_chars[:]
|
| 33 |
+
random.shuffle(shuffled)
|
| 34 |
+
after = ''.join(shuffled)
|
| 35 |
+
|
| 36 |
+
if after != before:
|
| 37 |
+
text_list[sub_start:sub_end] = shuffled
|
| 38 |
+
modified_spans.append({
|
| 39 |
+
"start": sub_start,
|
| 40 |
+
"end": sub_end,
|
| 41 |
+
"before": before,
|
| 42 |
+
"after": after
|
| 43 |
+
})
|
| 44 |
+
break # 成功打乱后跳出 retry 循环
|
| 45 |
+
else:
|
| 46 |
+
print("⚠️ 无法有效打乱片段(内容重复),已跳过。")
|
| 47 |
+
|
| 48 |
+
return ''.join(text_list), modified_spans
|
| 49 |
+
|
| 50 |
+
def random_swap_contiguous(text: str, n_swaps: int = 1, max_retry: int = 10):
|
| 51 |
+
"""
|
| 52 |
+
在字符串中随机扰动连续片段:
|
| 53 |
+
- 中文:在连续中文片段内打乱若干个连续汉字
|
| 54 |
+
- 英文:在连续英文单词序列内打乱若干个连续单词
|
| 55 |
+
打乱后记录修改区域索引和变化内容。
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
# 拆分文本为token,保留分隔符位置
|
| 59 |
+
tokens = re.findall(r'[\u4e00-\u9fa5]+|[A-Za-z]+(?:\s+[A-Za-z]+)*|[^\u4e00-\u9fa5A-Za-z]+', text)
|
| 60 |
+
|
| 61 |
+
text_list = list(text)
|
| 62 |
+
modified_spans = []
|
| 63 |
+
|
| 64 |
+
# 找出中文和英文片段(带索引范围)
|
| 65 |
+
spans = []
|
| 66 |
+
for m in re.finditer(r'[\u4e00-\u9fa5]+|([A-Za-z]+(?:\s+[A-Za-z]+)*)', text):
|
| 67 |
+
start, end = m.span()
|
| 68 |
+
spans.append((start, end, 'en' if m.group(1) else 'zh'))
|
| 69 |
+
|
| 70 |
+
if not spans:
|
| 71 |
+
return text, []
|
| 72 |
+
|
| 73 |
+
for _ in range(n_swaps):
|
| 74 |
+
for attempt in range(max_retry):
|
| 75 |
+
start, end, lang = random.choice(spans)
|
| 76 |
+
segment = text[start:end]
|
| 77 |
+
|
| 78 |
+
if lang == 'zh':
|
| 79 |
+
# 中文:随机扰动连续2~5个字符
|
| 80 |
+
if len(segment) < 2:
|
| 81 |
+
continue
|
| 82 |
+
sub_len = random.randint(2, min(5, len(segment)))
|
| 83 |
+
sub_start = random.randint(0, len(segment) - sub_len)
|
| 84 |
+
sub_end = sub_start + sub_len
|
| 85 |
+
before = segment[sub_start:sub_end]
|
| 86 |
+
sub_chars = list(before)
|
| 87 |
+
random.shuffle(sub_chars)
|
| 88 |
+
after = ''.join(sub_chars)
|
| 89 |
+
if after == before:
|
| 90 |
+
continue
|
| 91 |
+
new_segment = segment[:sub_start] + after + segment[sub_end:]
|
| 92 |
+
else:
|
| 93 |
+
# 英文:按单词打乱
|
| 94 |
+
words = segment.split()
|
| 95 |
+
if len(words) < 2:
|
| 96 |
+
continue
|
| 97 |
+
sub_len = random.randint(2, min(4, len(words)))
|
| 98 |
+
sub_start = random.randint(0, len(words) - sub_len)
|
| 99 |
+
sub_end = sub_start + sub_len
|
| 100 |
+
before = ' '.join(words[sub_start:sub_end])
|
| 101 |
+
sub_words = words[sub_start:sub_end]
|
| 102 |
+
random.shuffle(sub_words)
|
| 103 |
+
after = ' '.join(sub_words)
|
| 104 |
+
if after == before:
|
| 105 |
+
continue
|
| 106 |
+
new_segment = ' '.join(words[:sub_start] + sub_words + words[sub_end:])
|
| 107 |
+
|
| 108 |
+
# 替换文本
|
| 109 |
+
text = text[:start] + new_segment + text[end:]
|
| 110 |
+
modified_spans.append({
|
| 111 |
+
"start": start,
|
| 112 |
+
"end": end,
|
| 113 |
+
"lang": lang,
|
| 114 |
+
"before": segment,
|
| 115 |
+
"after": new_segment
|
| 116 |
+
})
|
| 117 |
+
break
|
| 118 |
+
else:
|
| 119 |
+
print("⚠️ 无法有效打乱片段(内容重复),已跳过。")
|
| 120 |
+
|
| 121 |
+
return text, modified_spans
|
| 122 |
+
|
| 123 |
+
if __name__ == "__main__":
|
| 124 |
+
# 示例
|
| 125 |
+
s = "需要在俄罗斯莫斯科和中国北京各转一次机,然后才能抵达西安,期间至少需要25个小时。"
|
| 126 |
+
new_s, spans = random_swap_contiguous(s, n_swaps=1)
|
| 127 |
+
|
| 128 |
+
print("原句: ", s)
|
| 129 |
+
print("打乱后:", new_s)
|
| 130 |
+
print("修改记录:")
|
| 131 |
+
for span in spans:
|
| 132 |
+
print(span)
|
scripts/vis/vis.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import difflib
|
| 3 |
+
|
| 4 |
+
def highlight_diff(a, b):
|
| 5 |
+
"""生成红色高亮 HTML,红底=不同部分"""
|
| 6 |
+
matcher = difflib.SequenceMatcher(None, a, b)
|
| 7 |
+
html_a, html_b = [], []
|
| 8 |
+
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
|
| 9 |
+
if tag == 'equal':
|
| 10 |
+
html_a.append(a[i1:i2])
|
| 11 |
+
html_b.append(b[j1:j2])
|
| 12 |
+
elif tag == 'replace' or tag == 'delete':
|
| 13 |
+
html_a.append(f'<span class="del">{a[i1:i2]}</span>')
|
| 14 |
+
elif tag == 'insert':
|
| 15 |
+
html_b.append(f'<span class="ins">{b[j1:j2]}</span>')
|
| 16 |
+
return ''.join(html_a), ''.join(html_b)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_html(data, output_path="ocr_diff_viewer.html"):
|
| 20 |
+
"""生成带翻页功能的HTML"""
|
| 21 |
+
html_items = []
|
| 22 |
+
|
| 23 |
+
for i, item in enumerate(data):
|
| 24 |
+
# 修改这里
|
| 25 |
+
text = item.get("tiny_shuffled_content", "")
|
| 26 |
+
ocr = item.get("tiny_shuffled_content_ocr", "").replace("\n", "").replace(" ", "")
|
| 27 |
+
html_text, html_ocr = highlight_diff(text, ocr)
|
| 28 |
+
|
| 29 |
+
html_items.append(f"""
|
| 30 |
+
<div class="page" id="page-{i}" style="display: {'block' if i==0 else 'none'};">
|
| 31 |
+
<h2>样本 {i+1}/{len(data)}</h2>
|
| 32 |
+
<div class="block">
|
| 33 |
+
<h3>原文(text)</h3>
|
| 34 |
+
<div class="box">{html_text}</div>
|
| 35 |
+
</div>
|
| 36 |
+
<div class="block">
|
| 37 |
+
<h3>OCR 结果(ocr)</h3>
|
| 38 |
+
<div class="box">{html_ocr}</div>
|
| 39 |
+
</div>
|
| 40 |
+
</div>
|
| 41 |
+
""")
|
| 42 |
+
|
| 43 |
+
html_content = f"""
|
| 44 |
+
<html>
|
| 45 |
+
<head>
|
| 46 |
+
<meta charset="utf-8">
|
| 47 |
+
<title>OCR Diff Viewer</title>
|
| 48 |
+
<style>
|
| 49 |
+
body {{
|
| 50 |
+
font-family: "Courier New", monospace;
|
| 51 |
+
background-color: #f6f6f6;
|
| 52 |
+
padding: 20px;
|
| 53 |
+
}}
|
| 54 |
+
.block {{
|
| 55 |
+
margin-bottom: 30px;
|
| 56 |
+
}}
|
| 57 |
+
.box {{
|
| 58 |
+
background: #fff;
|
| 59 |
+
border: 1px solid #ccc;
|
| 60 |
+
padding: 15px;
|
| 61 |
+
white-space: pre-wrap;
|
| 62 |
+
font-size: 16px;
|
| 63 |
+
line-height: 1.5;
|
| 64 |
+
}}
|
| 65 |
+
.del {{ background-color: #ffcccc; }}
|
| 66 |
+
.ins {{ background-color: #ccffcc; }}
|
| 67 |
+
.nav {{
|
| 68 |
+
text-align: center;
|
| 69 |
+
margin-top: 30px;
|
| 70 |
+
}}
|
| 71 |
+
button {{
|
| 72 |
+
font-size: 16px;
|
| 73 |
+
padding: 6px 12px;
|
| 74 |
+
margin: 0 8px;
|
| 75 |
+
}}
|
| 76 |
+
</style>
|
| 77 |
+
</head>
|
| 78 |
+
<body>
|
| 79 |
+
<h1>📘 OCR 文本差异可视化</h1>
|
| 80 |
+
{"".join(html_items)}
|
| 81 |
+
<div class="nav">
|
| 82 |
+
<button onclick="prevPage()">上一条</button>
|
| 83 |
+
<button onclick="nextPage()">下一条</button>
|
| 84 |
+
<p id="page-indicator"></p>
|
| 85 |
+
</div>
|
| 86 |
+
|
| 87 |
+
<script>
|
| 88 |
+
let currentPage = 0;
|
| 89 |
+
const total = {len(data)};
|
| 90 |
+
const indicator = document.getElementById("page-indicator");
|
| 91 |
+
|
| 92 |
+
function showPage(i) {{
|
| 93 |
+
document.querySelectorAll('.page').forEach((el, idx) => {{
|
| 94 |
+
el.style.display = idx === i ? 'block' : 'none';
|
| 95 |
+
}});
|
| 96 |
+
indicator.innerText = `第 ${{i+1}} / ${{total}} 条`;
|
| 97 |
+
}}
|
| 98 |
+
|
| 99 |
+
function nextPage() {{
|
| 100 |
+
if (currentPage < total - 1) {{
|
| 101 |
+
currentPage++;
|
| 102 |
+
showPage(currentPage);
|
| 103 |
+
}}
|
| 104 |
+
}}
|
| 105 |
+
function prevPage() {{
|
| 106 |
+
if (currentPage > 0) {{
|
| 107 |
+
currentPage--;
|
| 108 |
+
showPage(currentPage);
|
| 109 |
+
}}
|
| 110 |
+
}}
|
| 111 |
+
|
| 112 |
+
showPage(currentPage);
|
| 113 |
+
</script>
|
| 114 |
+
</body>
|
| 115 |
+
</html>
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 119 |
+
f.write(html_content)
|
| 120 |
+
print(f"✅ 可视化结果已保存到 {output_path}")
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
if __name__ == "__main__":
|
| 124 |
+
# 示例:加载 JSON 文件
|
| 125 |
+
input_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/sample10_len1.0-1.2k/merged_ocr.json" # 你自己的路径
|
| 126 |
+
with open(input_path, "r", encoding="utf-8") as f:
|
| 127 |
+
data = json.load(f)
|
| 128 |
+
|
| 129 |
+
generate_html(data, "ocr_diff_viewer.html")
|
scripts/vis/vis_v2.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import difflib
|
| 3 |
+
|
| 4 |
+
def highlight_diff(a, b):
|
| 5 |
+
"""生成红色高亮 HTML,红底=不同部分"""
|
| 6 |
+
matcher = difflib.SequenceMatcher(None, a, b)
|
| 7 |
+
html_a, html_b = [], []
|
| 8 |
+
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
|
| 9 |
+
if tag == 'equal':
|
| 10 |
+
html_a.append(a[i1:i2])
|
| 11 |
+
html_b.append(b[j1:j2])
|
| 12 |
+
elif tag in ('replace', 'delete'):
|
| 13 |
+
html_a.append(f'<span class="del">{a[i1:i2]}</span>')
|
| 14 |
+
elif tag == 'insert':
|
| 15 |
+
html_b.append(f'<span class="ins">{b[j1:j2]}</span>')
|
| 16 |
+
return ''.join(html_a), ''.join(html_b)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_html(data, output_path="ocr_diff_viewer.html"):
|
| 20 |
+
"""生成带翻页功能的HTML"""
|
| 21 |
+
html_items = []
|
| 22 |
+
|
| 23 |
+
for i, item in enumerate(data):
|
| 24 |
+
text = item.get("tiny_shuffled_content", "")
|
| 25 |
+
ocr = item.get("tiny_shuffled_content_ocr", "").replace("\n", "").replace(" ", "")
|
| 26 |
+
start = item["spans"][0]["start"]
|
| 27 |
+
end = item["spans"][0]["end"]
|
| 28 |
+
|
| 29 |
+
# 1️⃣ 添加黄色高亮标注
|
| 30 |
+
if start is not None and end is not None and 0 <= start < len(text) and start < end <= len(text):
|
| 31 |
+
text_highlighted = (
|
| 32 |
+
text[:start]
|
| 33 |
+
+ f'<span class="highlight">{text[start:end]}</span>'
|
| 34 |
+
+ text[end:]
|
| 35 |
+
)
|
| 36 |
+
else:
|
| 37 |
+
text_highlighted = text
|
| 38 |
+
|
| 39 |
+
# 2️⃣ difflib 比较
|
| 40 |
+
html_text, html_ocr = highlight_diff(text_highlighted, ocr)
|
| 41 |
+
|
| 42 |
+
# 3️⃣ 新增 "before" 一栏
|
| 43 |
+
html_items.append(f"""
|
| 44 |
+
<div class="page" id="page-{i}" style="display: {'block' if i==0 else 'none'};">
|
| 45 |
+
<h2>样本 {i+1}/{len(data)}</h2>
|
| 46 |
+
|
| 47 |
+
<div class="block">
|
| 48 |
+
<h3>Before(空)</h3>
|
| 49 |
+
<div class="box"></div>
|
| 50 |
+
</div>
|
| 51 |
+
|
| 52 |
+
<div class="block">
|
| 53 |
+
<h3>原文(text)</h3>
|
| 54 |
+
<div class="box">{html_text}</div>
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
<div class="block">
|
| 58 |
+
<h3>OCR 结果(ocr)</h3>
|
| 59 |
+
<div class="box">{html_ocr}</div>
|
| 60 |
+
</div>
|
| 61 |
+
</div>
|
| 62 |
+
""")
|
| 63 |
+
|
| 64 |
+
# --- HTML 模板 ---
|
| 65 |
+
html_content = f"""
|
| 66 |
+
<html>
|
| 67 |
+
<head>
|
| 68 |
+
<meta charset="utf-8">
|
| 69 |
+
<title>OCR Diff Viewer</title>
|
| 70 |
+
<style>
|
| 71 |
+
body {{
|
| 72 |
+
font-family: "Courier New", monospace;
|
| 73 |
+
background-color: #f6f6f6;
|
| 74 |
+
padding: 20px;
|
| 75 |
+
}}
|
| 76 |
+
.block {{
|
| 77 |
+
margin-bottom: 30px;
|
| 78 |
+
}}
|
| 79 |
+
.box {{
|
| 80 |
+
background: #fff;
|
| 81 |
+
border: 1px solid #ccc;
|
| 82 |
+
padding: 15px;
|
| 83 |
+
white-space: pre-wrap;
|
| 84 |
+
font-size: 16px;
|
| 85 |
+
line-height: 1.5;
|
| 86 |
+
}}
|
| 87 |
+
.del {{ background-color: #ffcccc; }}
|
| 88 |
+
.ins {{ background-color: #ccffcc; }}
|
| 89 |
+
.highlight {{ background-color: #fff59d; }} /* 黄色标注 */
|
| 90 |
+
.nav {{
|
| 91 |
+
text-align: center;
|
| 92 |
+
margin-top: 30px;
|
| 93 |
+
}}
|
| 94 |
+
button {{
|
| 95 |
+
font-size: 16px;
|
| 96 |
+
padding: 6px 12px;
|
| 97 |
+
margin: 0 8px;
|
| 98 |
+
}}
|
| 99 |
+
</style>
|
| 100 |
+
</head>
|
| 101 |
+
<body>
|
| 102 |
+
<h1>📘 OCR 文本差异可视化</h1>
|
| 103 |
+
{"".join(html_items)}
|
| 104 |
+
<div class="nav">
|
| 105 |
+
<button onclick="prevPage()">上一条</button>
|
| 106 |
+
<button onclick="nextPage()">下一条</button>
|
| 107 |
+
<p id="page-indicator"></p>
|
| 108 |
+
</div>
|
| 109 |
+
|
| 110 |
+
<script>
|
| 111 |
+
let currentPage = 0;
|
| 112 |
+
const total = {len(data)};
|
| 113 |
+
const indicator = document.getElementById("page-indicator");
|
| 114 |
+
|
| 115 |
+
function showPage(i) {{
|
| 116 |
+
document.querySelectorAll('.page').forEach((el, idx) => {{
|
| 117 |
+
el.style.display = idx === i ? 'block' : 'none';
|
| 118 |
+
}});
|
| 119 |
+
indicator.innerText = `第 ${{i+1}} / ${{total}} 条`;
|
| 120 |
+
}}
|
| 121 |
+
|
| 122 |
+
function nextPage() {{
|
| 123 |
+
if (currentPage < total - 1) {{
|
| 124 |
+
currentPage++;
|
| 125 |
+
showPage(currentPage);
|
| 126 |
+
}}
|
| 127 |
+
}}
|
| 128 |
+
function prevPage() {{
|
| 129 |
+
if (currentPage > 0) {{
|
| 130 |
+
currentPage--;
|
| 131 |
+
showPage(currentPage);
|
| 132 |
+
}}
|
| 133 |
+
}}
|
| 134 |
+
|
| 135 |
+
showPage(currentPage);
|
| 136 |
+
</script>
|
| 137 |
+
</body>
|
| 138 |
+
</html>
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 142 |
+
f.write(html_content)
|
| 143 |
+
print(f"✅ 可视化结果已保存到 {output_path}")
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
if __name__ == "__main__":
|
| 147 |
+
input_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/sample10_len1.0-1.2k/merged_ocr.json"
|
| 148 |
+
with open(input_path, "r", encoding="utf-8") as f:
|
| 149 |
+
data = json.load(f)
|
| 150 |
+
|
| 151 |
+
generate_html(data, "ocr_diff_viewer.html")
|
scripts/vis/vis_v3.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import difflib
|
| 3 |
+
|
| 4 |
+
def highlight_diff(a, b):
|
| 5 |
+
"""生成红色/绿色高亮 HTML"""
|
| 6 |
+
matcher = difflib.SequenceMatcher(None, a, b)
|
| 7 |
+
html_a, html_b = [], []
|
| 8 |
+
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
|
| 9 |
+
if tag == 'equal':
|
| 10 |
+
html_a.append(a[i1:i2])
|
| 11 |
+
html_b.append(b[j1:j2])
|
| 12 |
+
elif tag in ('replace', 'delete'):
|
| 13 |
+
html_a.append(f'<span class="del">{a[i1:i2]}</span>')
|
| 14 |
+
elif tag == 'insert':
|
| 15 |
+
html_b.append(f'<span class="ins">{b[j1:j2]}</span>')
|
| 16 |
+
return ''.join(html_a), ''.join(html_b)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def insert_highlight(html_text, start, end):
|
| 20 |
+
"""在 HTML 文本中插入黄色下划线(基于字符索引)"""
|
| 21 |
+
# 去掉 HTML 标签的干扰:我们对纯文本位置标注
|
| 22 |
+
# difflib 输出可能含 <span>,所以要对“裸文本索引”映射
|
| 23 |
+
plain = ''
|
| 24 |
+
mapping = [] # plain[i] -> html_text[idx]
|
| 25 |
+
inside_tag = False
|
| 26 |
+
for idx, ch in enumerate(html_text):
|
| 27 |
+
if ch == '<':
|
| 28 |
+
inside_tag = True
|
| 29 |
+
elif ch == '>':
|
| 30 |
+
inside_tag = False
|
| 31 |
+
elif not inside_tag:
|
| 32 |
+
mapping.append(idx)
|
| 33 |
+
plain += ch
|
| 34 |
+
|
| 35 |
+
if end > len(mapping):
|
| 36 |
+
return html_text # 索引越界保护
|
| 37 |
+
|
| 38 |
+
# 将 start/end 转为 html_text 索引
|
| 39 |
+
html_start = mapping[start]
|
| 40 |
+
html_end = mapping[end - 1] + 1
|
| 41 |
+
|
| 42 |
+
return (
|
| 43 |
+
html_text[:html_start]
|
| 44 |
+
+ f'<span class="highlight" title="标注区域: {start}-{end}">'
|
| 45 |
+
+ html_text[html_start:html_end]
|
| 46 |
+
+ '</span>'
|
| 47 |
+
+ html_text[html_end:]
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def generate_html(data, output_path="ocr_diff_viewer.html"):
|
| 52 |
+
html_items = []
|
| 53 |
+
|
| 54 |
+
for i, item in enumerate(data):
|
| 55 |
+
text = item.get("tiny_shuffled_content", "")
|
| 56 |
+
ocr = item.get("tiny_shuffled_content_ocr", "").replace("\n", "").replace(" ", "") # 英文不替换空格
|
| 57 |
+
span = item.get("spans", [{}])[0]
|
| 58 |
+
start, end = span.get("start"), span.get("end")
|
| 59 |
+
|
| 60 |
+
# text = item.get("content", "")
|
| 61 |
+
# ocr = item.get("content_ocr", "").replace("\n", "").replace(" ", "") # 英文不替换空格
|
| 62 |
+
# start = end = None
|
| 63 |
+
|
| 64 |
+
# text = item.get("shuffled_content", "")
|
| 65 |
+
# ocr = item.get("shuffled_content_ocr", "").replace("\n", "").replace(" ", "") # 英文不替换空格
|
| 66 |
+
# start = end = None
|
| 67 |
+
|
| 68 |
+
html_text, html_ocr = highlight_diff(text, ocr)
|
| 69 |
+
|
| 70 |
+
# ✅ difflib 完成后再插入黄色下划线
|
| 71 |
+
if start is not None and end is not None:
|
| 72 |
+
html_text = insert_highlight(html_text, start, end)
|
| 73 |
+
|
| 74 |
+
html_items.append(f"""
|
| 75 |
+
<div class="page" id="page-{i}" style="display: {'block' if i==0 else 'none'};">
|
| 76 |
+
<h2>样本 {i+1}/{len(data)}</h2>
|
| 77 |
+
|
| 78 |
+
<div class="block">
|
| 79 |
+
<h3>原文(text)</h3>
|
| 80 |
+
<div class="box">{html_text}</div>
|
| 81 |
+
</div>
|
| 82 |
+
|
| 83 |
+
<div class="block">
|
| 84 |
+
<h3>OCR 结果(ocr)</h3>
|
| 85 |
+
<div class="box">{html_ocr}</div>
|
| 86 |
+
</div>
|
| 87 |
+
</div>
|
| 88 |
+
""")
|
| 89 |
+
|
| 90 |
+
html_template = f"""
|
| 91 |
+
<html>
|
| 92 |
+
<head>
|
| 93 |
+
<meta charset="utf-8">
|
| 94 |
+
<title>OCR Diff Viewer</title>
|
| 95 |
+
<style>
|
| 96 |
+
body {{
|
| 97 |
+
font-family: "Courier New", monospace;
|
| 98 |
+
background-color: #f6f6f6;
|
| 99 |
+
padding: 20px;
|
| 100 |
+
}}
|
| 101 |
+
.box {{
|
| 102 |
+
background: #fff;
|
| 103 |
+
border: 1px solid #ccc;
|
| 104 |
+
padding: 15px;
|
| 105 |
+
white-space: pre-wrap;
|
| 106 |
+
font-size: 16px;
|
| 107 |
+
line-height: 1.5;
|
| 108 |
+
}}
|
| 109 |
+
.del {{ background-color: #ffcccc; }}
|
| 110 |
+
.ins {{ background-color: #ccffcc; }}
|
| 111 |
+
.highlight {{
|
| 112 |
+
text-decoration: underline solid #FFD700 3px;
|
| 113 |
+
text-underline-offset: 4px;
|
| 114 |
+
cursor: help;
|
| 115 |
+
background-color: rgba(255, 215, 0, 0.15);
|
| 116 |
+
}}
|
| 117 |
+
.highlight:hover {{
|
| 118 |
+
background-color: rgba(255, 215, 0, 0.35);
|
| 119 |
+
}}
|
| 120 |
+
</style>
|
| 121 |
+
</head>
|
| 122 |
+
<body>
|
| 123 |
+
<h1>📘 OCR 文本差异可视化</h1>
|
| 124 |
+
{"".join(html_items)}
|
| 125 |
+
<div class="nav" style="text-align:center; margin-top:30px;">
|
| 126 |
+
<button onclick="prevPage()">上一条</button>
|
| 127 |
+
<button onclick="nextPage()">下一条</button>
|
| 128 |
+
<p id="page-indicator"></p>
|
| 129 |
+
</div>
|
| 130 |
+
<script>
|
| 131 |
+
let currentPage = 0;
|
| 132 |
+
const total = {len(data)};
|
| 133 |
+
const indicator = document.getElementById("page-indicator");
|
| 134 |
+
|
| 135 |
+
function showPage(i) {{
|
| 136 |
+
document.querySelectorAll('.page').forEach((el, idx) => {{
|
| 137 |
+
el.style.display = idx === i ? 'block' : 'none';
|
| 138 |
+
}});
|
| 139 |
+
indicator.innerText = `第 ${{i+1}} / ${{total}} 条`;
|
| 140 |
+
}}
|
| 141 |
+
|
| 142 |
+
function nextPage() {{
|
| 143 |
+
if (currentPage < total - 1) {{
|
| 144 |
+
currentPage++;
|
| 145 |
+
showPage(currentPage);
|
| 146 |
+
}}
|
| 147 |
+
}}
|
| 148 |
+
function prevPage() {{
|
| 149 |
+
if (currentPage > 0) {{
|
| 150 |
+
currentPage--;
|
| 151 |
+
showPage(currentPage);
|
| 152 |
+
}}
|
| 153 |
+
}}
|
| 154 |
+
|
| 155 |
+
showPage(currentPage);
|
| 156 |
+
</script>
|
| 157 |
+
</body>
|
| 158 |
+
</html>
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 162 |
+
f.write(html_template)
|
| 163 |
+
print(f"✅ 已生成: {output_path}")
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
if __name__ == "__main__":
|
| 167 |
+
input_path = "/vol/zhaoy/ds-ocr/data/CCI3-Data/sample200_len1.0-1.2k/merged_ocr.json"
|
| 168 |
+
with open(input_path, "r", encoding="utf-8") as f:
|
| 169 |
+
data = json.load(f)
|
| 170 |
+
|
| 171 |
+
generate_html(data)
|