mirror of
https://github.com/opendatalab/MinerU.git
synced 2026-04-12 07:06:44 +07:00
Compare commits
37 Commits
magic_pdf-
...
magic_pdf-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
016cde3ece | ||
|
|
4b8dbd7cfb | ||
|
|
d6a5724b26 | ||
|
|
50a543ce0e | ||
|
|
575ca00e01 | ||
|
|
7f0c734ff6 | ||
|
|
872cd73f4a | ||
|
|
7fcbae01fe | ||
|
|
752d620a0c | ||
|
|
fc10772503 | ||
|
|
fd616c5778 | ||
|
|
acb9cbd6d2 | ||
|
|
433684c646 | ||
|
|
fffee0ae97 | ||
|
|
e73606250e | ||
|
|
7162debc38 | ||
|
|
a343175d66 | ||
|
|
671ce1d97c | ||
|
|
6f80beaa31 | ||
|
|
cb1b02e716 | ||
|
|
8ebb79a43a | ||
|
|
154eed1ade | ||
|
|
b7652171ea | ||
|
|
f0c463ed6d | ||
|
|
3d2fcc9dce | ||
|
|
d3c9cb84f8 | ||
|
|
8c089976ed | ||
|
|
473a0a7de0 | ||
|
|
61c970f7da | ||
|
|
d3ee9abbab | ||
|
|
07e4f115e6 | ||
|
|
bf8d8e217d | ||
|
|
744b3f75eb | ||
|
|
2e772467ee | ||
|
|
efed5faa53 | ||
|
|
05161c6e62 | ||
|
|
15c8830416 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -34,3 +34,4 @@ tmp
|
||||
ocr_demo
|
||||
|
||||
/app/common/__init__.py
|
||||
/magic_pdf/config/__init__.py
|
||||
|
||||
@@ -28,7 +28,9 @@ pip install -r requirements.txt
|
||||
3.Run the main script
|
||||
|
||||
```sh
|
||||
use demo/demo_test.py
|
||||
use demo/text_demo.py
|
||||
or
|
||||
use demo/ocr_demo.py
|
||||
```
|
||||
|
||||
### 版权说明
|
||||
|
||||
32
demo/demo_commons.py
Normal file
32
demo/demo_commons.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import json
|
||||
|
||||
from magic_pdf.spark.s3 import get_s3_config
|
||||
from magic_pdf.libs.commons import join_path, read_file, json_dump_path
|
||||
|
||||
|
||||
local_json_path = "Z:/format.json"
|
||||
local_jsonl_path = "Z:/format.jsonl"
|
||||
|
||||
def get_json_from_local_or_s3(book_name=None):
|
||||
if book_name is None:
|
||||
with open(local_json_path, "r", encoding="utf-8") as json_file:
|
||||
json_line = json_file.read()
|
||||
json_object = json.loads(json_line)
|
||||
else:
|
||||
# error_log_path & json_dump_path
|
||||
# 可配置从上述两个地址获取源json
|
||||
json_path = join_path(json_dump_path, book_name + ".json")
|
||||
s3_config = get_s3_config(json_path)
|
||||
file_content = read_file(json_path, s3_config)
|
||||
json_str = file_content.decode("utf-8")
|
||||
# logger.info(json_str)
|
||||
json_object = json.loads(json_str)
|
||||
return json_object
|
||||
|
||||
|
||||
def write_json_to_local(jso, book_name=None):
|
||||
if book_name is None:
|
||||
with open(local_json_path, "w", encoding="utf-8") as file:
|
||||
file.write(json.dumps(jso, ensure_ascii=False))
|
||||
else:
|
||||
pass
|
||||
@@ -4,17 +4,14 @@ import os
|
||||
from loguru import logger
|
||||
from pathlib import Path
|
||||
|
||||
from app.common.s3 import get_s3_config
|
||||
from demo.demo_test import get_json_from_local_or_s3
|
||||
from magic_pdf.pipeline_ocr import ocr_parse_pdf_core
|
||||
from magic_pdf.spark.s3 import get_s3_config
|
||||
from demo.demo_commons import get_json_from_local_or_s3
|
||||
from magic_pdf.dict2md.ocr_mkcontent import (
|
||||
ocr_mk_mm_markdown_with_para,
|
||||
ocr_mk_nlp_markdown,
|
||||
ocr_mk_mm_markdown,
|
||||
ocr_mk_mm_standard_format,
|
||||
ocr_mk_mm_markdown_with_para_and_pagination
|
||||
make_standard_format_with_para
|
||||
)
|
||||
from magic_pdf.libs.commons import join_path
|
||||
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
|
||||
from magic_pdf.libs.commons import join_path, read_file
|
||||
|
||||
|
||||
def save_markdown(markdown_text, input_filepath):
|
||||
@@ -42,7 +39,8 @@ def ocr_local_parse(ocr_pdf_path, ocr_json_file_path):
|
||||
ocr_pdf_model_info = read_json_file(ocr_json_file_path)
|
||||
pth = Path(ocr_json_file_path)
|
||||
book_name = pth.name
|
||||
ocr_parse_core(book_name, ocr_pdf_path, ocr_pdf_model_info)
|
||||
pdf_bytes = read_file(ocr_pdf_path, None)
|
||||
ocr_parse_core(book_name, pdf_bytes, ocr_pdf_model_info)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@@ -53,24 +51,19 @@ def ocr_online_parse(book_name, start_page_id=0, debug_mode=True):
|
||||
# logger.info(json_object)
|
||||
s3_pdf_path = json_object["file_location"]
|
||||
s3_config = get_s3_config(s3_pdf_path)
|
||||
pdf_bytes = read_file(s3_pdf_path, s3_config)
|
||||
ocr_pdf_model_info = json_object.get("doc_layout_result")
|
||||
ocr_parse_core(book_name, s3_pdf_path, ocr_pdf_model_info, s3_config=s3_config)
|
||||
ocr_parse_core(book_name, pdf_bytes, ocr_pdf_model_info)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
|
||||
def ocr_parse_core(book_name, ocr_pdf_path, ocr_pdf_model_info, start_page_id=0, s3_config=None):
|
||||
def ocr_parse_core(book_name, pdf_bytes, ocr_pdf_model_info, start_page_id=0):
|
||||
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest")
|
||||
save_path = join_path(save_tmp_path, "md")
|
||||
save_path_with_bookname = os.path.join(save_path, book_name)
|
||||
text_content_save_path = f"{save_path_with_bookname}/book.md"
|
||||
pdf_info_dict = parse_pdf_by_ocr(
|
||||
ocr_pdf_path,
|
||||
s3_config,
|
||||
ocr_pdf_model_info,
|
||||
save_path,
|
||||
book_name,
|
||||
debug_mode=True)
|
||||
pdf_info_dict, parse_time = ocr_parse_pdf_core(pdf_bytes, ocr_pdf_model_info, book_name, start_page_id=start_page_id, debug_mode=True)
|
||||
|
||||
parent_dir = os.path.dirname(text_content_save_path)
|
||||
if not os.path.exists(parent_dir):
|
||||
@@ -83,16 +76,18 @@ def ocr_parse_core(book_name, ocr_pdf_path, ocr_pdf_model_info, start_page_id=0,
|
||||
with open(text_content_save_path, "w", encoding="utf-8") as f:
|
||||
f.write(markdown_content)
|
||||
|
||||
standard_format = ocr_mk_mm_standard_format(pdf_info_dict)
|
||||
standard_format = make_standard_format_with_para(pdf_info_dict)
|
||||
standard_format_save_path = f"{save_path_with_bookname}/standard_format.txt"
|
||||
with open(standard_format_save_path, "w", encoding="utf-8") as f:
|
||||
f.write(str(standard_format))
|
||||
# 将standard_format dump成json文本并保存
|
||||
f.write(json.dumps(standard_format, ensure_ascii=False))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf"
|
||||
# json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json"
|
||||
pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf"
|
||||
json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json"
|
||||
# ocr_local_parse(pdf_path, json_file_path)
|
||||
# book_name = "数学新星网/edu_00001236"
|
||||
# ocr_online_parse(book_name)
|
||||
book_name = "科数网/edu_00011318"
|
||||
ocr_online_parse(book_name)
|
||||
|
||||
pass
|
||||
|
||||
@@ -5,7 +5,7 @@ from pathlib import Path
|
||||
import click
|
||||
from loguru import logger
|
||||
|
||||
from magic_pdf.libs.commons import join_path
|
||||
from magic_pdf.libs.commons import join_path, read_file
|
||||
from magic_pdf.dict2md.mkcontent import mk_mm_markdown
|
||||
from magic_pdf.pipeline import parse_pdf_by_model
|
||||
|
||||
@@ -21,9 +21,11 @@ def main(s3_pdf_path: str, s3_pdf_profile: str, pdf_model_path: str, pdf_model_p
|
||||
text_content_save_path = f"{save_path}/{book_name}/book.md"
|
||||
# metadata_save_path = f"{save_path}/{book_name}/metadata.json"
|
||||
|
||||
pdf_bytes = read_file(s3_pdf_path, s3_pdf_profile)
|
||||
|
||||
try:
|
||||
paras_dict = parse_pdf_by_model(
|
||||
s3_pdf_path, s3_pdf_profile, pdf_model_path, save_path, book_name, pdf_model_profile, start_page_num, debug_mode=debug_mode
|
||||
pdf_bytes, pdf_model_path, save_path, book_name, pdf_model_profile, start_page_num, debug_mode=debug_mode
|
||||
)
|
||||
parent_dir = os.path.dirname(text_content_save_path)
|
||||
if not os.path.exists(parent_dir):
|
||||
|
||||
@@ -5,6 +5,7 @@ from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from demo.demo_commons import get_json_from_local_or_s3, write_json_to_local, local_jsonl_path, local_json_path
|
||||
from magic_pdf.dict2md.mkcontent import mk_mm_markdown
|
||||
from magic_pdf.pipeline import (
|
||||
meta_scan,
|
||||
@@ -13,38 +14,10 @@ from magic_pdf.pipeline import (
|
||||
pdf_intermediate_dict_to_markdown,
|
||||
save_tables_to_s3,
|
||||
)
|
||||
from magic_pdf.libs.commons import join_path, read_file, json_dump_path
|
||||
from app.common.s3 import get_s3_config
|
||||
from magic_pdf.libs.commons import join_path
|
||||
from loguru import logger
|
||||
|
||||
|
||||
local_json_path = "Z:/format.json"
|
||||
local_jsonl_path = "Z:/format.jsonl"
|
||||
|
||||
|
||||
def get_json_from_local_or_s3(book_name=None):
|
||||
if book_name is None:
|
||||
with open(local_json_path, "r", encoding="utf-8") as json_file:
|
||||
json_line = json_file.read()
|
||||
json_object = json.loads(json_line)
|
||||
else:
|
||||
# error_log_path & json_dump_path
|
||||
# 可配置从上述两个地址获取源json
|
||||
json_path = join_path(json_dump_path, book_name + ".json")
|
||||
s3_config = get_s3_config(json_path)
|
||||
file_content = read_file(json_path, s3_config)
|
||||
json_str = file_content.decode("utf-8")
|
||||
# logger.info(json_str)
|
||||
json_object = json.loads(json_str)
|
||||
return json_object
|
||||
|
||||
|
||||
def write_json_to_local(jso, book_name=None):
|
||||
if book_name is None:
|
||||
with open(local_json_path, "w", encoding="utf-8") as file:
|
||||
file.write(json.dumps(jso, ensure_ascii=False))
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
def demo_parse_pdf(book_name=None, start_page_id=0, debug_mode=True):
|
||||
@@ -166,45 +166,65 @@ def mk_mm_markdown_1(para_dict: dict):
|
||||
return content_text
|
||||
|
||||
|
||||
def __insert_after_para(text, image_path, content_list):
|
||||
def __insert_after_para(text, type, element, content_list):
|
||||
"""
|
||||
在content_list中找到text,将image_path作为一个新的node插入到text后面
|
||||
"""
|
||||
for i, c in enumerate(content_list):
|
||||
content_type = c.get("type")
|
||||
if content_type in UNI_FORMAT_TEXT_TYPE and text in c.get("text", ''):
|
||||
img_node = {
|
||||
"type": "image",
|
||||
"img_path": image_path,
|
||||
"img_alt":"",
|
||||
"img_title":"",
|
||||
"img_caption":""
|
||||
}
|
||||
content_list.insert(i+1, img_node)
|
||||
if type == "image":
|
||||
content_node = {
|
||||
"type": "image",
|
||||
"img_path": element.get("image_path"),
|
||||
"img_alt": "",
|
||||
"img_title": "",
|
||||
"img_caption": "",
|
||||
}
|
||||
elif type == "table":
|
||||
content_node = {
|
||||
"type": "table",
|
||||
"img_path": element.get("image_path"),
|
||||
"table_latex": element.get("text"),
|
||||
"table_title": "",
|
||||
"table_caption": "",
|
||||
"table_quality": element.get("quality"),
|
||||
}
|
||||
content_list.insert(i+1, content_node)
|
||||
break
|
||||
else:
|
||||
logger.error(f"Can't find the location of image {image_path} in the markdown file, search target is {text}")
|
||||
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
|
||||
|
||||
|
||||
|
||||
def __insert_before_para(text, image_path, content_list):
|
||||
def __insert_before_para(text, type, element, content_list):
|
||||
"""
|
||||
在content_list中找到text,将image_path作为一个新的node插入到text前面
|
||||
"""
|
||||
for i, c in enumerate(content_list):
|
||||
content_type = c.get("type")
|
||||
if content_type in UNI_FORMAT_TEXT_TYPE and text in c.get("text", ''):
|
||||
img_node = {
|
||||
"type": "image",
|
||||
"img_path": image_path,
|
||||
"img_alt":"",
|
||||
"img_title":"",
|
||||
"img_caption":""
|
||||
}
|
||||
content_list.insert(i, img_node)
|
||||
if type == "image":
|
||||
content_node = {
|
||||
"type": "image",
|
||||
"img_path": element.get("image_path"),
|
||||
"img_alt": "",
|
||||
"img_title": "",
|
||||
"img_caption": "",
|
||||
}
|
||||
elif type == "table":
|
||||
content_node = {
|
||||
"type": "table",
|
||||
"img_path": element.get("image_path"),
|
||||
"table_latex": element.get("text"),
|
||||
"table_title": "",
|
||||
"table_caption": "",
|
||||
"table_quality": element.get("quality"),
|
||||
}
|
||||
content_list.insert(i, content_node)
|
||||
break
|
||||
else:
|
||||
logger.error(f"Can't find the location of image {image_path} in the markdown file, search target is {text}")
|
||||
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
|
||||
|
||||
|
||||
def mk_universal_format(para_dict: dict):
|
||||
@@ -220,9 +240,11 @@ def mk_universal_format(para_dict: dict):
|
||||
all_page_images = []
|
||||
all_page_images.extend(page_info.get("images",[]))
|
||||
all_page_images.extend(page_info.get("image_backup", []) )
|
||||
all_page_images.extend(page_info.get("tables",[]))
|
||||
all_page_images.extend(page_info.get("table_backup",[]) )
|
||||
|
||||
# all_page_images.extend(page_info.get("tables",[]))
|
||||
# all_page_images.extend(page_info.get("table_backup",[]) )
|
||||
all_page_tables = []
|
||||
all_page_tables.extend(page_info.get("tables", []))
|
||||
|
||||
if not para_blocks or not pymu_raw_blocks: # 只有图片的拼接的场景
|
||||
for img in all_page_images:
|
||||
content_node = {
|
||||
@@ -233,6 +255,16 @@ def mk_universal_format(para_dict: dict):
|
||||
"img_caption":""
|
||||
}
|
||||
page_lst.append(content_node) # TODO 图片顺序
|
||||
for table in all_page_tables:
|
||||
content_node = {
|
||||
"type": "table",
|
||||
"img_path": table['image_path'],
|
||||
"table_latex": table.get("text"),
|
||||
"table_title": "",
|
||||
"table_caption": "",
|
||||
"table_quality": table.get("quality"),
|
||||
}
|
||||
page_lst.append(content_node) # TODO 图片顺序
|
||||
else:
|
||||
for block in para_blocks:
|
||||
item = block["paras"]
|
||||
@@ -266,56 +298,65 @@ def mk_universal_format(para_dict: dict):
|
||||
|
||||
"""插入图片"""
|
||||
for img in all_page_images:
|
||||
imgbox = img['bbox']
|
||||
img_content = f"{img['image_path']}"
|
||||
# 先看在哪个block内
|
||||
for block in pymu_raw_blocks:
|
||||
bbox = block['bbox']
|
||||
if bbox[0]-1 <= imgbox[0] < bbox[2]+1 and bbox[1]-1 <= imgbox[1] < bbox[3]+1:# 确定在这个大的block内,然后进入逐行比较距离
|
||||
for l in block['lines']:
|
||||
line_box = l['bbox']
|
||||
if line_box[0]-1 <= imgbox[0] < line_box[2]+1 and line_box[1]-1 <= imgbox[1] < line_box[3]+1: # 在line内的,插入line前面
|
||||
line_txt = "".join([s['text'] for s in l['spans']])
|
||||
__insert_before_para(line_txt, img_content, content_lst)
|
||||
break
|
||||
break
|
||||
else:# 在行与行之间
|
||||
# 找到图片x0,y0与line的x0,y0最近的line
|
||||
min_distance = 100000
|
||||
min_line = None
|
||||
for l in block['lines']:
|
||||
line_box = l['bbox']
|
||||
distance = math.sqrt((line_box[0] - imgbox[0])**2 + (line_box[1] - imgbox[1])**2)
|
||||
if distance < min_distance:
|
||||
min_distance = distance
|
||||
min_line = l
|
||||
if min_line:
|
||||
line_txt = "".join([s['text'] for s in min_line['spans']])
|
||||
img_h = imgbox[3] - imgbox[1]
|
||||
if min_distance<img_h: # 文字在图片前面
|
||||
__insert_after_para(line_txt, img_content, content_lst)
|
||||
else:
|
||||
__insert_before_para(line_txt, img_content, content_lst)
|
||||
break
|
||||
else:
|
||||
logger.error(f"Can't find the location of image {img['image_path']} in the markdown file #1")
|
||||
else:# 应当在两个block之间
|
||||
# 找到上方最近的block,如果上方没有就找大下方最近的block
|
||||
top_txt_block = find_top_nearest_text_bbox(pymu_raw_blocks, imgbox)
|
||||
if top_txt_block:
|
||||
line_txt = "".join([s['text'] for s in top_txt_block['lines'][-1]['spans']])
|
||||
__insert_after_para(line_txt, img_content, content_lst)
|
||||
else:
|
||||
bottom_txt_block = find_bottom_nearest_text_bbox(pymu_raw_blocks, imgbox)
|
||||
if bottom_txt_block:
|
||||
line_txt = "".join([s['text'] for s in bottom_txt_block['lines'][0]['spans']])
|
||||
__insert_before_para(line_txt, img_content, content_lst)
|
||||
else: # TODO ,图片可能独占一列,这种情况上下是没有图片的
|
||||
logger.error(f"Can't find the location of image {img['image_path']} in the markdown file #2")
|
||||
insert_img_or_table("image", img, pymu_raw_blocks, content_lst)
|
||||
|
||||
"""插入表格"""
|
||||
for table in all_page_tables:
|
||||
insert_img_or_table("table", table, pymu_raw_blocks, content_lst)
|
||||
# end for
|
||||
return content_lst
|
||||
|
||||
|
||||
def insert_img_or_table(type, element, pymu_raw_blocks, content_lst):
|
||||
element_bbox = element['bbox']
|
||||
# 先看在哪个block内
|
||||
for block in pymu_raw_blocks:
|
||||
bbox = block['bbox']
|
||||
if bbox[0] - 1 <= element_bbox[0] < bbox[2] + 1 and bbox[1] - 1 <= element_bbox[1] < bbox[
|
||||
3] + 1: # 确定在这个大的block内,然后进入逐行比较距离
|
||||
for l in block['lines']:
|
||||
line_box = l['bbox']
|
||||
if line_box[0] - 1 <= element_bbox[0] < line_box[2] + 1 and line_box[1] - 1 <= element_bbox[1] < line_box[
|
||||
3] + 1: # 在line内的,插入line前面
|
||||
line_txt = "".join([s['text'] for s in l['spans']])
|
||||
__insert_before_para(line_txt, type, element, content_lst)
|
||||
break
|
||||
break
|
||||
else: # 在行与行之间
|
||||
# 找到图片x0,y0与line的x0,y0最近的line
|
||||
min_distance = 100000
|
||||
min_line = None
|
||||
for l in block['lines']:
|
||||
line_box = l['bbox']
|
||||
distance = math.sqrt((line_box[0] - element_bbox[0]) ** 2 + (line_box[1] - element_bbox[1]) ** 2)
|
||||
if distance < min_distance:
|
||||
min_distance = distance
|
||||
min_line = l
|
||||
if min_line:
|
||||
line_txt = "".join([s['text'] for s in min_line['spans']])
|
||||
img_h = element_bbox[3] - element_bbox[1]
|
||||
if min_distance < img_h: # 文字在图片前面
|
||||
__insert_after_para(line_txt, type, element, content_lst)
|
||||
else:
|
||||
__insert_before_para(line_txt, type, element, content_lst)
|
||||
break
|
||||
else:
|
||||
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file #1")
|
||||
else: # 应当在两个block之间
|
||||
# 找到上方最近的block,如果上方没有就找大下方最近的block
|
||||
top_txt_block = find_top_nearest_text_bbox(pymu_raw_blocks, element_bbox)
|
||||
if top_txt_block:
|
||||
line_txt = "".join([s['text'] for s in top_txt_block['lines'][-1]['spans']])
|
||||
__insert_after_para(line_txt, type, element, content_lst)
|
||||
else:
|
||||
bottom_txt_block = find_bottom_nearest_text_bbox(pymu_raw_blocks, element_bbox)
|
||||
if bottom_txt_block:
|
||||
line_txt = "".join([s['text'] for s in bottom_txt_block['lines'][0]['spans']])
|
||||
__insert_before_para(line_txt, type, element, content_lst)
|
||||
else: # TODO ,图片可能独占一列,这种情况上下是没有图片的
|
||||
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file #2")
|
||||
|
||||
|
||||
def mk_mm_markdown(content_list):
|
||||
"""
|
||||
基于同一格式的内容列表,构造markdown,含图片
|
||||
@@ -348,6 +389,8 @@ def mk_nlp_markdown(content_list):
|
||||
content_md.append(c.get("text"))
|
||||
elif content_type == "equation":
|
||||
content_md.append(f"$$\n{c.get('latex')}\n$$")
|
||||
elif content_type == "table":
|
||||
content_md.append(f"$$\n{c.get('table_latex')}\n$$")
|
||||
elif content_type in UNI_FORMAT_TEXT_TYPE:
|
||||
content_md.append(f"{'#'*int(content_type[1])} {c.get('text')}")
|
||||
return "\n\n".join(content_md)
|
||||
@@ -72,18 +72,26 @@ def ocr_mk_mm_markdown_with_para(pdf_info_dict: dict):
|
||||
markdown = []
|
||||
for _, page_info in pdf_info_dict.items():
|
||||
paras_of_layout = page_info.get("para_blocks")
|
||||
page_markdown = ocr_mk_mm_markdown_with_para_core(paras_of_layout)
|
||||
page_markdown = ocr_mk_mm_markdown_with_para_core(paras_of_layout, "mm")
|
||||
markdown.extend(page_markdown)
|
||||
return '\n\n'.join(markdown)
|
||||
|
||||
|
||||
def ocr_mk_nlp_markdown_with_para(pdf_info_dict: dict):
|
||||
markdown = []
|
||||
for _, page_info in pdf_info_dict.items():
|
||||
paras_of_layout = page_info.get("para_blocks")
|
||||
page_markdown = ocr_mk_mm_markdown_with_para_core(paras_of_layout, "nlp")
|
||||
markdown.extend(page_markdown)
|
||||
return '\n\n'.join(markdown)
|
||||
|
||||
def ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict: dict):
|
||||
markdown_with_para_and_pagination = []
|
||||
for page_no, page_info in pdf_info_dict.items():
|
||||
paras_of_layout = page_info.get("para_blocks")
|
||||
if not paras_of_layout:
|
||||
continue
|
||||
page_markdown = ocr_mk_mm_markdown_with_para_core(paras_of_layout)
|
||||
page_markdown = ocr_mk_mm_markdown_with_para_core(paras_of_layout, "mm")
|
||||
markdown_with_para_and_pagination.append({
|
||||
'page_no': page_no,
|
||||
'md_content': '\n\n'.join(page_markdown)
|
||||
@@ -91,7 +99,7 @@ def ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict: dict):
|
||||
return markdown_with_para_and_pagination
|
||||
|
||||
|
||||
def ocr_mk_mm_markdown_with_para_core(paras_of_layout):
|
||||
def ocr_mk_mm_markdown_with_para_core(paras_of_layout, mode):
|
||||
page_markdown = []
|
||||
for paras in paras_of_layout:
|
||||
for para in paras:
|
||||
@@ -99,30 +107,60 @@ def ocr_mk_mm_markdown_with_para_core(paras_of_layout):
|
||||
for line in para:
|
||||
for span in line['spans']:
|
||||
span_type = span.get('type')
|
||||
content = ''
|
||||
if span_type == ContentType.Text:
|
||||
content = split_long_words(span['content'])
|
||||
# content = span['content']
|
||||
content = ocr_escape_special_markdown_char(split_long_words(span['content']))
|
||||
elif span_type == ContentType.InlineEquation:
|
||||
content = f"${span['content']}$"
|
||||
content = f"${ocr_escape_special_markdown_char(span['content'])}$"
|
||||
elif span_type == ContentType.InterlineEquation:
|
||||
content = f"\n$$\n{span['content']}\n$$\n"
|
||||
content = f"\n$$\n{ocr_escape_special_markdown_char(span['content'])}\n$$\n"
|
||||
elif span_type in [ContentType.Image, ContentType.Table]:
|
||||
content = f"\n})\n"
|
||||
para_text += content + ' '
|
||||
page_markdown.append(para_text.strip() + ' ')
|
||||
if mode == 'mm':
|
||||
content = f"\n})\n"
|
||||
elif mode == 'nlp':
|
||||
pass
|
||||
if content != '':
|
||||
para_text += content + ' '
|
||||
if para_text.strip() == '':
|
||||
continue
|
||||
else:
|
||||
page_markdown.append(para_text.strip() + ' ')
|
||||
return page_markdown
|
||||
|
||||
|
||||
def para_to_standard_format(para):
|
||||
para_content = {}
|
||||
if len(para) == 1:
|
||||
para_content = line_to_standard_format(para[0])
|
||||
elif len(para) > 1:
|
||||
para_text = ''
|
||||
inline_equation_num = 0
|
||||
for line in para:
|
||||
for span in line['spans']:
|
||||
span_type = span.get('type')
|
||||
if span_type == ContentType.Text:
|
||||
content = ocr_escape_special_markdown_char(split_long_words(span['content']))
|
||||
elif span_type == ContentType.InlineEquation:
|
||||
content = f"${ocr_escape_special_markdown_char(span['content'])}$"
|
||||
inline_equation_num += 1
|
||||
para_text += content + ' '
|
||||
para_content = {
|
||||
'type': 'text',
|
||||
'text': para_text,
|
||||
'inline_equation_num': inline_equation_num
|
||||
}
|
||||
return para_content
|
||||
|
||||
def make_standard_format_with_para(pdf_info_dict: dict):
|
||||
content_list = []
|
||||
for _, page_info in pdf_info_dict.items():
|
||||
paras = page_info.get("para_blocks")
|
||||
if not paras:
|
||||
paras_of_layout = page_info.get("para_blocks")
|
||||
if not paras_of_layout:
|
||||
continue
|
||||
for para in paras:
|
||||
for line in para:
|
||||
content = line_to_standard_format(line)
|
||||
content_list.append(content)
|
||||
for paras in paras_of_layout:
|
||||
for para in paras:
|
||||
para_content = para_to_standard_format(para)
|
||||
content_list.append(para_content)
|
||||
return content_list
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from magic_pdf.libs.ocr_content_type import ContentType
|
||||
LINE_STOP_FLAG = ['.', '!', '?', '。', '!', '?',":", ":", ")", ")", ";"]
|
||||
INLINE_EQUATION = ContentType.InlineEquation
|
||||
INTERLINE_EQUATION = ContentType.InterlineEquation
|
||||
TEXT = "text"
|
||||
TEXT = ContentType.Text
|
||||
|
||||
|
||||
def __get_span_text(span):
|
||||
@@ -20,7 +20,7 @@ def __get_span_text(span):
|
||||
return c
|
||||
|
||||
|
||||
def __detect_list_lines(lines, new_layout_bboxes, lang='en'):
|
||||
def __detect_list_lines(lines, new_layout_bboxes, lang):
|
||||
"""
|
||||
探测是否包含了列表,并且把列表的行分开.
|
||||
这样的段落特点是,顶格字母大写/数字,紧跟着几行缩进的。缩进的行首字母含小写的。
|
||||
@@ -315,11 +315,14 @@ def __split_para_in_layoutbox(lines_group, new_layout_bbox, lang="en", char_avg_
|
||||
|
||||
return layout_paras, list_info
|
||||
|
||||
def __connect_list_inter_layout(layout_paras, new_layout_bbox, layout_list_info, page_num, lang="en"):
|
||||
def __connect_list_inter_layout(layout_paras, new_layout_bbox, layout_list_info, page_num, lang):
|
||||
"""
|
||||
如果上个layout的最后一个段落是列表,下一个layout的第一个段落也是列表,那么将他们连接起来。
|
||||
如果上个layout的最后一个段落是列表,下一个layout的第一个段落也是列表,那么将他们连接起来。 TODO 因为没有区分列表和段落,所以这个方法暂时不实现。
|
||||
根据layout_list_info判断是不是列表。,下个layout的第一个段如果不是列表,那么看他们是否有几行都有相同的缩进。
|
||||
"""
|
||||
if len(layout_paras)==0 or len(layout_list_info)==0: # 0的时候最后的return 会出错
|
||||
return layout_paras, [False, False]
|
||||
|
||||
for i in range(1, len(layout_paras)):
|
||||
pre_layout_list_info = layout_list_info[i-1]
|
||||
next_layout_list_info = layout_list_info[i]
|
||||
@@ -345,7 +348,37 @@ def __connect_list_inter_layout(layout_paras, new_layout_bbox, layout_list_info,
|
||||
pre_last_para.extend(may_list_lines)
|
||||
layout_paras[i] = layout_paras[i][len(may_list_lines):]
|
||||
|
||||
return layout_paras
|
||||
return layout_paras, [layout_list_info[0][0], layout_list_info[-1][1]] # 同时还返回了这个页面级别的开头、结尾是不是列表的信息
|
||||
|
||||
|
||||
def __connect_list_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox, next_page_layout_bbox, pre_page_list_info, next_page_list_info, page_num, lang):
|
||||
"""
|
||||
如果上个layout的最后一个段落是列表,下一个layout的第一个段落也是列表,那么将他们连接起来。 TODO 因为没有区分列表和段落,所以这个方法暂时不实现。
|
||||
根据layout_list_info判断是不是列表。,下个layout的第一个段如果不是列表,那么看他们是否有几行都有相同的缩进。
|
||||
"""
|
||||
if len(pre_page_paras)==0 or len(next_page_paras)==0: # 0的时候最后的return 会出错
|
||||
return False
|
||||
|
||||
if pre_page_list_info[1] and not next_page_list_info[0]: # 前一个是列表结尾,后一个是非列表开头,此时检测是否有相同的缩进
|
||||
logger.info(f"连接page {page_num} 内的list")
|
||||
# 向layout_paras[i] 寻找开头具有相同缩进的连续的行
|
||||
may_list_lines = []
|
||||
for j in range(len(next_page_paras[0])):
|
||||
line = next_page_paras[0][j]
|
||||
if len(line)==1: # 只可能是一行,多行情况再需要分析了
|
||||
if line[0]['bbox'][0] > __find_layout_bbox_by_line(line[0]['bbox'], next_page_layout_bbox)[0]:
|
||||
may_list_lines.append(line[0])
|
||||
else:
|
||||
break
|
||||
else:
|
||||
break
|
||||
# 如果这些行的缩进是相等的,那么连到上一个layout的最后一个段落上。
|
||||
if len(may_list_lines)>0 and len(set([x['bbox'][0] for x in may_list_lines]))==1:
|
||||
pre_page_paras[-1].append(may_list_lines)
|
||||
next_page_paras[0] = next_page_paras[0][len(may_list_lines):]
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def __find_layout_bbox_by_line(line_bbox, layout_bboxes):
|
||||
@@ -358,7 +391,7 @@ def __find_layout_bbox_by_line(line_bbox, layout_bboxes):
|
||||
return None
|
||||
|
||||
|
||||
def __connect_para_inter_layoutbox(layout_paras, new_layout_bbox, lang="en"):
|
||||
def __connect_para_inter_layoutbox(layout_paras, new_layout_bbox, lang):
|
||||
"""
|
||||
layout之间进行分段。
|
||||
主要是计算前一个layOut的最后一行和后一个layout的第一行是否可以连接。
|
||||
@@ -368,10 +401,19 @@ def __connect_para_inter_layoutbox(layout_paras, new_layout_bbox, lang="en"):
|
||||
|
||||
"""
|
||||
connected_layout_paras = []
|
||||
if len(layout_paras)==0:
|
||||
return connected_layout_paras
|
||||
|
||||
connected_layout_paras.append(layout_paras[0])
|
||||
for i in range(1, len(layout_paras)):
|
||||
pre_last_line = layout_paras[i-1][-1][-1]
|
||||
next_first_line = layout_paras[i][0][0]
|
||||
try:
|
||||
if len(layout_paras[i])==0 or len(layout_paras[i-1])==0: # TODO 考虑连接问题,
|
||||
continue
|
||||
pre_last_line = layout_paras[i-1][-1][-1]
|
||||
next_first_line = layout_paras[i][0][0]
|
||||
except Exception as e:
|
||||
logger.error(f"page layout {i} has no line")
|
||||
continue
|
||||
pre_last_line_text = ''.join([__get_span_text(span) for span in pre_last_line['spans']])
|
||||
pre_last_line_type = pre_last_line['spans'][-1]['type']
|
||||
next_first_line_text = ''.join([__get_span_text(span) for span in next_first_line['spans']])
|
||||
@@ -400,7 +442,7 @@ def __connect_para_inter_layoutbox(layout_paras, new_layout_bbox, lang="en"):
|
||||
return connected_layout_paras
|
||||
|
||||
|
||||
def __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox, next_page_layout_bbox, lang):
|
||||
def __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox, next_page_layout_bbox, page_num, lang):
|
||||
"""
|
||||
连接起来相邻两个页面的段落——前一个页面最后一个段落和后一个页面的第一个段落。
|
||||
是否可以连接的条件:
|
||||
@@ -408,7 +450,7 @@ def __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_b
|
||||
2. 后一个页面的第一个段落第一行没有空白开头。
|
||||
"""
|
||||
# 有的页面可能压根没有文字
|
||||
if len(pre_page_paras)==0 or len(next_page_paras)==0:
|
||||
if len(pre_page_paras)==0 or len(next_page_paras)==0 or len(pre_page_paras[0])==0 or len(next_page_paras[0])==0: # TODO [[]]为什么出现在pre_page_paras里?
|
||||
return False
|
||||
pre_last_para = pre_page_paras[-1][-1]
|
||||
next_first_para = next_page_paras[0][0]
|
||||
@@ -436,8 +478,85 @@ def __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_b
|
||||
else:
|
||||
return False
|
||||
|
||||
def find_consecutive_true_regions(input_array):
|
||||
start_index = None # 连续True区域的起始索引
|
||||
regions = [] # 用于保存所有连续True区域的起始和结束索引
|
||||
|
||||
def __do_split(blocks, layout_bboxes, new_layout_bbox, page_num, lang="en"):
|
||||
for i in range(len(input_array)):
|
||||
# 如果我们找到了一个True值,并且当前并没有在连续True区域中
|
||||
if input_array[i] and start_index is None:
|
||||
start_index = i # 记录连续True区域的起始索引
|
||||
|
||||
# 如果我们找到了一个False值,并且当前在连续True区域中
|
||||
elif not input_array[i] and start_index is not None:
|
||||
# 如果连续True区域长度大于1,那么将其添加到结果列表中
|
||||
if i - start_index > 1:
|
||||
regions.append((start_index, i-1))
|
||||
start_index = None # 重置起始索引
|
||||
|
||||
# 如果最后一个元素是True,那么需要将最后一个连续True区域加入到结果列表中
|
||||
if start_index is not None and len(input_array) - start_index > 1:
|
||||
regions.append((start_index, len(input_array)-1))
|
||||
|
||||
return regions
|
||||
|
||||
|
||||
def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, debug_mode):
|
||||
"""
|
||||
找出来中间对齐的连续单行文本,如果连续行高度相同,那么合并为一个段落。
|
||||
一个line居中的条件是:
|
||||
1. 水平中心点跨越layout的中心点。
|
||||
2. 左右两侧都有空白
|
||||
"""
|
||||
|
||||
for layout_i, layout_para in enumerate(page_paras):
|
||||
layout_box = new_layout_bbox[layout_i]
|
||||
single_line_paras_tag = []
|
||||
for i in range(len(layout_para)):
|
||||
single_line_paras_tag.append(len(layout_para[i])==1 and layout_para[i][0]['spans'][0]['type']==TEXT)
|
||||
|
||||
"""找出来连续的单行文本,如果连续行高度相同,那么合并为一个段落。"""
|
||||
consecutive_single_line_indices = find_consecutive_true_regions(single_line_paras_tag)
|
||||
if len(consecutive_single_line_indices)>0:
|
||||
index_offset = 0
|
||||
"""检查这些行是否是高度相同的,居中的"""
|
||||
for start, end in consecutive_single_line_indices:
|
||||
start += index_offset
|
||||
end += index_offset
|
||||
line_hi = np.array([line[0]['bbox'][3]-line[0]['bbox'][1] for line in layout_para[start:end+1]])
|
||||
first_line_text = ''.join([__get_span_text(span) for span in layout_para[start][0]['spans']])
|
||||
if "Table" in first_line_text or "Figure" in first_line_text:
|
||||
pass
|
||||
if debug_mode:
|
||||
logger.info(line_hi.std())
|
||||
|
||||
if line_hi.std()<2:
|
||||
"""行高度相同,那么判断是否居中"""
|
||||
all_left_x0 = [line[0]['bbox'][0] for line in layout_para[start:end+1]]
|
||||
all_right_x1 = [line[0]['bbox'][2] for line in layout_para[start:end+1]]
|
||||
layout_center = (layout_box[0] + layout_box[2]) / 2
|
||||
if all([x0 < layout_center < x1 for x0, x1 in zip(all_left_x0, all_right_x1)]) \
|
||||
and not all([x0==layout_box[0] for x0 in all_left_x0]) \
|
||||
and not all([x1==layout_box[2] for x1 in all_right_x1]):
|
||||
merge_para = [l[0] for l in layout_para[start:end+1]]
|
||||
para_text = ''.join([__get_span_text(span) for line in merge_para for span in line['spans']])
|
||||
if debug_mode:
|
||||
logger.info(para_text)
|
||||
layout_para[start:end+1] = [merge_para]
|
||||
index_offset -= end-start
|
||||
|
||||
return
|
||||
|
||||
|
||||
def __merge_signle_list_text(page_paras, new_layout_bbox, page_num, lang):
|
||||
"""
|
||||
找出来连续的单行文本,如果首行顶格,接下来的几个单行段落缩进对齐,那么合并为一个段落。
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def __do_split_page(blocks, layout_bboxes, new_layout_bbox, page_num, lang):
|
||||
"""
|
||||
根据line和layout情况进行分段
|
||||
先实现一个根据行末尾特征分段的简单方法。
|
||||
@@ -451,35 +570,54 @@ def __do_split(blocks, layout_bboxes, new_layout_bbox, page_num, lang="en"):
|
||||
"""
|
||||
lines_group = __group_line_by_layout(blocks, layout_bboxes, lang) # block内分段
|
||||
layout_paras, layout_list_info = __split_para_in_layoutbox(lines_group, new_layout_bbox, lang) # layout内分段
|
||||
layout_paras2 = __connect_list_inter_layout(layout_paras, new_layout_bbox, layout_list_info, page_num, lang) # layout之间连接列表段落
|
||||
layout_paras2, page_list_info = __connect_list_inter_layout(layout_paras, new_layout_bbox, layout_list_info, page_num, lang) # layout之间连接列表段落
|
||||
connected_layout_paras = __connect_para_inter_layoutbox(layout_paras2, new_layout_bbox, lang) # layout间链接段落
|
||||
|
||||
return connected_layout_paras
|
||||
|
||||
|
||||
def para_split(pdf_info_dict, lang="en"):
|
||||
return connected_layout_paras, page_list_info
|
||||
|
||||
|
||||
def para_split(pdf_info_dict, debug_mode, lang="en"):
|
||||
"""
|
||||
根据line和layout情况进行分段
|
||||
"""
|
||||
new_layout_of_pages = [] # 数组的数组,每个元素是一个页面的layoutS
|
||||
all_page_list_info = [] # 保存每个页面开头和结尾是否是列表
|
||||
for page_num, page in pdf_info_dict.items():
|
||||
blocks = page['preproc_blocks']
|
||||
layout_bboxes = page['layout_bboxes']
|
||||
new_layout_bbox = __common_pre_proc(blocks, layout_bboxes)
|
||||
new_layout_of_pages.append(new_layout_bbox)
|
||||
splited_blocks = __do_split(blocks, layout_bboxes, new_layout_bbox, page_num, lang)
|
||||
splited_blocks, page_list_info = __do_split_page(blocks, layout_bboxes, new_layout_bbox, page_num, lang)
|
||||
all_page_list_info.append(page_list_info)
|
||||
page['para_blocks'] = splited_blocks
|
||||
|
||||
"""连接页面与页面之间的可能合并的段落"""
|
||||
pdf_infos = list(pdf_info_dict.values())
|
||||
for i, page in enumerate(pdf_info_dict.values()):
|
||||
if i==0:
|
||||
for page_num, page in enumerate(pdf_info_dict.values()):
|
||||
if page_num==0:
|
||||
continue
|
||||
pre_page_paras = pdf_infos[i-1]['para_blocks']
|
||||
next_page_paras = pdf_infos[i]['para_blocks']
|
||||
pre_page_layout_bbox = new_layout_of_pages[i-1]
|
||||
next_page_layout_bbox = new_layout_of_pages[i]
|
||||
pre_page_paras = pdf_infos[page_num-1]['para_blocks']
|
||||
next_page_paras = pdf_infos[page_num]['para_blocks']
|
||||
pre_page_layout_bbox = new_layout_of_pages[page_num-1]
|
||||
next_page_layout_bbox = new_layout_of_pages[page_num]
|
||||
|
||||
is_conn= __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox, next_page_layout_bbox, lang)
|
||||
if is_conn:
|
||||
logger.info(f"连接了第{i-1}页和第{i}页的段落")
|
||||
is_conn = __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox, next_page_layout_bbox, page_num, lang)
|
||||
if debug_mode:
|
||||
if is_conn:
|
||||
logger.info(f"连接了第{page_num-1}页和第{page_num}页的段落")
|
||||
|
||||
is_list_conn = __connect_list_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox, next_page_layout_bbox, all_page_list_info[page_num-1], all_page_list_info[page_num], page_num, lang)
|
||||
if debug_mode:
|
||||
if is_list_conn:
|
||||
logger.info(f"连接了第{page_num-1}页和第{page_num}页的列表段落")
|
||||
|
||||
"""接下来可能会漏掉一些特别的一些可以合并的内容,对他们进行段落连接
|
||||
1. 正文中有时出现一个行顶格,接下来几行缩进的情况。
|
||||
2. 居中的一些连续单行,如果高度相同,那么可能是一个段落。
|
||||
"""
|
||||
for page_num, page in enumerate(pdf_info_dict.values()):
|
||||
page_paras = page['para_blocks']
|
||||
new_layout_bbox = new_layout_of_pages[page_num]
|
||||
__connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, debug_mode=debug_mode)
|
||||
__merge_signle_list_text(page_paras, new_layout_bbox, page_num, lang)
|
||||
|
||||
@@ -53,7 +53,7 @@ from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
|
||||
from magic_pdf.pre_proc.equations_replace import combine_chars_to_pymudict, remove_chars_in_text_blocks, replace_equations_in_textblock
|
||||
from magic_pdf.pre_proc.pdf_pre_filter import pdf_filter
|
||||
from magic_pdf.pre_proc.detect_footer_header_by_statistics import drop_footer_header
|
||||
from magic_pdf.pre_proc.construct_paras import construct_page_component
|
||||
from magic_pdf.pre_proc.construct_page_dict import construct_page_component
|
||||
from magic_pdf.pre_proc.fix_image import combine_images, fix_image_vertical, fix_seperated_image, include_img_title
|
||||
from magic_pdf.post_proc.pdf_post_filter import pdf_post_filter
|
||||
from magic_pdf.pre_proc.remove_rotate_bbox import get_side_boundry, remove_rotate_side_textblock, remove_side_blank_block
|
||||
@@ -71,8 +71,7 @@ paraMergeException_msg = ParaMergeException().message
|
||||
|
||||
|
||||
def parse_pdf_by_model(
|
||||
s3_pdf_path,
|
||||
s3_pdf_profile,
|
||||
pdf_bytes,
|
||||
pdf_model_output,
|
||||
save_path,
|
||||
book_name,
|
||||
@@ -83,7 +82,7 @@ def parse_pdf_by_model(
|
||||
junk_img_bojids=[],
|
||||
debug_mode=False,
|
||||
):
|
||||
pdf_bytes = read_file(s3_pdf_path, s3_pdf_profile)
|
||||
|
||||
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest")
|
||||
md_bookname_save_path = ""
|
||||
book_name = sanitize_filename(book_name)
|
||||
|
||||
@@ -18,6 +18,7 @@ from magic_pdf.libs.drop_tag import DropTag
|
||||
from magic_pdf.libs.ocr_content_type import ContentType
|
||||
from magic_pdf.libs.safe_filename import sanitize_filename
|
||||
from magic_pdf.para.para_split import para_split
|
||||
from magic_pdf.pre_proc.construct_page_dict import ocr_construct_page_component
|
||||
from magic_pdf.pre_proc.detect_footer_by_model import parse_footers
|
||||
from magic_pdf.pre_proc.detect_footnote import parse_footnotes_by_model
|
||||
from magic_pdf.pre_proc.detect_header import parse_headers
|
||||
@@ -33,42 +34,19 @@ from magic_pdf.pre_proc.ocr_span_list_modify import remove_spans_by_bboxes, remo
|
||||
from magic_pdf.pre_proc.remove_bbox_overlap import remove_overlap_between_bbox
|
||||
|
||||
|
||||
def construct_page_component(blocks, layout_bboxes, page_id, page_w, page_h, layout_tree,
|
||||
images, tables, interline_equations, inline_equations,
|
||||
dropped_text_block, dropped_image_block, dropped_table_block, dropped_equation_block,
|
||||
need_remove_spans_bboxes_dict):
|
||||
return_dict = {
|
||||
'preproc_blocks': blocks,
|
||||
'layout_bboxes': layout_bboxes,
|
||||
'page_idx': page_id,
|
||||
'page_size': [page_w, page_h],
|
||||
'_layout_tree': layout_tree,
|
||||
'images': images,
|
||||
'tables': tables,
|
||||
'interline_equations': interline_equations,
|
||||
'inline_equations': inline_equations,
|
||||
'droped_text_block': dropped_text_block,
|
||||
'droped_image_block': dropped_image_block,
|
||||
'droped_table_block': dropped_table_block,
|
||||
'dropped_equation_block': dropped_equation_block,
|
||||
'droped_bboxes': need_remove_spans_bboxes_dict,
|
||||
}
|
||||
return return_dict
|
||||
|
||||
|
||||
def parse_pdf_by_ocr(
|
||||
pdf_path,
|
||||
s3_pdf_profile,
|
||||
pdf_model_output,
|
||||
save_path,
|
||||
book_name,
|
||||
pdf_model_profile=None,
|
||||
image_s3_config=None,
|
||||
start_page_id=0,
|
||||
end_page_id=None,
|
||||
debug_mode=False,
|
||||
pdf_bytes,
|
||||
pdf_model_output,
|
||||
save_path,
|
||||
book_name,
|
||||
pdf_model_profile=None,
|
||||
image_s3_config=None,
|
||||
start_page_id=0,
|
||||
end_page_id=None,
|
||||
debug_mode=False,
|
||||
):
|
||||
pdf_bytes = read_file(pdf_path, s3_pdf_profile)
|
||||
|
||||
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest")
|
||||
book_name = sanitize_filename(book_name)
|
||||
md_bookname_save_path = ""
|
||||
@@ -95,7 +73,6 @@ def parse_pdf_by_ocr(
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
|
||||
for page_id in range(start_page_id, end_page_id + 1):
|
||||
|
||||
@@ -125,13 +102,6 @@ def parse_pdf_by_ocr(
|
||||
page_id, page, ocr_page_info, md_bookname_save_path, debug_mode=debug_mode
|
||||
)
|
||||
|
||||
# 构建需要remove的bbox列表
|
||||
# need_remove_spans_bboxes = []
|
||||
# need_remove_spans_bboxes.extend(page_no_bboxes)
|
||||
# need_remove_spans_bboxes.extend(header_bboxes)
|
||||
# need_remove_spans_bboxes.extend(footer_bboxes)
|
||||
# need_remove_spans_bboxes.extend(footnote_bboxes)
|
||||
|
||||
# 构建需要remove的bbox字典
|
||||
need_remove_spans_bboxes_dict = {
|
||||
DropTag.PAGE_NUMBER: page_no_bboxes,
|
||||
@@ -199,50 +169,48 @@ def parse_pdf_by_ocr(
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
|
||||
|
||||
# 删除重叠spans中较小的那些
|
||||
'''删除重叠spans中较小的那些'''
|
||||
spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
|
||||
|
||||
# 删除remove_span_block_bboxes中的bbox
|
||||
# spans = remove_spans_by_bboxes(spans, need_remove_spans_bboxes)
|
||||
# 按qa要求,增加drop相关数据
|
||||
'''
|
||||
删除remove_span_block_bboxes中的bbox
|
||||
并增加drop相关数据
|
||||
'''
|
||||
spans, dropped_spans_by_removed_bboxes = remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict)
|
||||
|
||||
# 对image和table截图
|
||||
'''对image和table截图'''
|
||||
spans = cut_image_and_table(spans, page, page_id, book_name, save_path, img_s3_client)
|
||||
|
||||
# 行内公式调整, 高度调整至与同行文字高度一致(优先左侧, 其次右侧)
|
||||
'''行内公式调整, 高度调整至与同行文字高度一致(优先左侧, 其次右侧)'''
|
||||
displayed_list = []
|
||||
text_inline_lines = []
|
||||
modify_y_axis(spans, displayed_list, text_inline_lines)
|
||||
# 模型识别错误的行间公式, type类型转换成行内公式
|
||||
|
||||
'''模型识别错误的行间公式, type类型转换成行内公式'''
|
||||
spans = modify_inline_equation(spans, displayed_list, text_inline_lines)
|
||||
|
||||
# bbox去除粘连
|
||||
'''bbox去除粘连'''
|
||||
spans = remove_overlap_between_bbox(spans)
|
||||
|
||||
# 对tpye=["interline_equation", "image", "table"]进行额外处理,如果左边有字的话,将该span的bbox中y0调整至不高于文字的y0
|
||||
'''
|
||||
对tpye=["interline_equation", "image", "table"]进行额外处理,
|
||||
如果左边有字的话,将该span的bbox中y0调整至不高于文字的y0
|
||||
'''
|
||||
spans = adjust_bbox_for_standalone_block(spans)
|
||||
|
||||
|
||||
# 从ocr_page_info中解析layout信息(按自然阅读方向排序,并修复重叠和交错的bad case)
|
||||
'''从ocr_page_info中解析layout信息(按自然阅读方向排序,并修复重叠和交错的bad case)'''
|
||||
layout_bboxes, layout_tree = layout_detect(ocr_page_info['subfield_dets'], page, ocr_page_info)
|
||||
|
||||
# 将spans合并成line(在layout内,从上到下,从左到右)
|
||||
'''将spans合并成line(在layout内,从上到下,从左到右)'''
|
||||
lines, dropped_spans_by_layout = merge_spans_to_line_by_layout(spans, layout_bboxes)
|
||||
|
||||
# 将lines合并成block
|
||||
'''将lines合并成block'''
|
||||
blocks = merge_lines_to_block(lines)
|
||||
|
||||
# 根据block合并段落
|
||||
#para_blocks = para_split(blocks, layout_bboxes)
|
||||
|
||||
# 获取QA需要外置的list
|
||||
'''获取QA需要外置的list'''
|
||||
images, tables, interline_equations, inline_equations = get_qa_need_list(blocks)
|
||||
|
||||
# drop的span_list合并
|
||||
'''drop的span_list合并'''
|
||||
dropped_spans = []
|
||||
dropped_spans.extend(dropped_spans_by_span_overlap)
|
||||
dropped_spans.extend(dropped_spans_by_removed_bboxes)
|
||||
@@ -263,19 +231,18 @@ def parse_pdf_by_ocr(
|
||||
elif span['type'] in [ContentType.InlineEquation, ContentType.InterlineEquation]:
|
||||
dropped_equation_block.append(span)
|
||||
|
||||
|
||||
|
||||
# 构造pdf_info_dict
|
||||
page_info = construct_page_component(blocks, layout_bboxes, page_id, page_w, page_h, layout_tree,
|
||||
'''构造pdf_info_dict'''
|
||||
page_info = ocr_construct_page_component(blocks, layout_bboxes, page_id, page_w, page_h, layout_tree,
|
||||
images, tables, interline_equations, inline_equations,
|
||||
dropped_text_block, dropped_image_block, dropped_table_block, dropped_equation_block,
|
||||
dropped_text_block, dropped_image_block, dropped_table_block,
|
||||
dropped_equation_block,
|
||||
need_remove_spans_bboxes_dict)
|
||||
pdf_info_dict[f"page_{page_id}"] = page_info
|
||||
|
||||
"""分段"""
|
||||
para_split(pdf_info_dict)
|
||||
|
||||
# 在测试时,保存调试信息
|
||||
para_split(pdf_info_dict, debug_mode=debug_mode)
|
||||
|
||||
'''在测试时,保存调试信息'''
|
||||
if debug_mode:
|
||||
params_file_save_path = join_path(
|
||||
save_tmp_path, "md", book_name, "preproc_out.json"
|
||||
|
||||
@@ -75,7 +75,7 @@ from magic_pdf.pre_proc.equations_replace import (
|
||||
)
|
||||
from magic_pdf.pre_proc.pdf_pre_filter import pdf_filter
|
||||
from magic_pdf.pre_proc.detect_footer_header_by_statistics import drop_footer_header
|
||||
from magic_pdf.pre_proc.construct_paras import construct_page_component
|
||||
from magic_pdf.pre_proc.construct_page_dict import construct_page_component
|
||||
from magic_pdf.pre_proc.fix_image import (
|
||||
combine_images,
|
||||
fix_image_vertical,
|
||||
@@ -220,7 +220,7 @@ def parse_pdf_for_train(
|
||||
# 解析表格并对table_bboxes进行位置的微调,防止表格周围的文字被截断
|
||||
table_bboxes = parse_tables(page_id, page, model_output_json)
|
||||
table_bboxes = fix_tables(
|
||||
page, table_bboxes, include_table_title=True, scan_line_num=2
|
||||
page, table_bboxes, include_table_title=False, scan_line_num=2
|
||||
) # 修正
|
||||
table_bboxes = fix_table_text_block(
|
||||
text_raw_blocks, table_bboxes
|
||||
@@ -253,7 +253,7 @@ def parse_pdf_for_train(
|
||||
# isSimpleLayout_flag, fullColumn_cnt, subColumn_cnt, curPage_loss = evaluate_pdf_layout(page_id, page, model_output_json)
|
||||
接下来开始进行预处理过程
|
||||
"""
|
||||
title_bboxs = parse_titles(page_id, page, model_output_json)
|
||||
# title_bboxs = parse_titles(page_id, page, model_output_json)
|
||||
|
||||
"""去掉每页的页码、页眉、页脚"""
|
||||
page_no_bboxs = parse_pageNos(page_id, page, model_output_json)
|
||||
@@ -531,6 +531,7 @@ def parse_pdf_for_train(
|
||||
page_info["bak_page_no_bboxes"] = page_no_bboxs
|
||||
page_info["bak_header_bboxes"] = header_bboxs
|
||||
page_info["bak_footer_bboxes"] = footer_bboxs
|
||||
page_info["bak_footer_note_bboxes"] = footnote_bboxes_tmp
|
||||
|
||||
pdf_info_dict[f"page_{page_id}"] = page_info
|
||||
|
||||
|
||||
@@ -3,12 +3,6 @@ import sys
|
||||
import time
|
||||
from urllib.parse import quote
|
||||
|
||||
from magic_pdf.dict2md.ocr_mkcontent import (
|
||||
ocr_mk_nlp_markdown,
|
||||
ocr_mk_mm_markdown,
|
||||
ocr_mk_mm_standard_format,
|
||||
ocr_mk_mm_markdown_with_para, ocr_mk_mm_markdown_with_para_and_pagination,
|
||||
)
|
||||
from magic_pdf.libs.commons import (
|
||||
read_file,
|
||||
join_path,
|
||||
@@ -18,45 +12,18 @@ from magic_pdf.libs.commons import (
|
||||
)
|
||||
from magic_pdf.libs.drop_reason import DropReason
|
||||
from magic_pdf.libs.json_compressor import JsonCompressor
|
||||
from magic_pdf.dict2md.mkcontent import mk_nlp_markdown, mk_universal_format
|
||||
from magic_pdf.dict2md.mkcontent import mk_universal_format
|
||||
from magic_pdf.pdf_parse_by_model import parse_pdf_by_model
|
||||
from magic_pdf.filter.pdf_classify_by_type import classify
|
||||
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
|
||||
from loguru import logger
|
||||
|
||||
from magic_pdf.pdf_parse_for_train import parse_pdf_for_train
|
||||
from magic_pdf.spark import exception_handler, get_data_source
|
||||
from magic_pdf.train_utils.convert_to_train_format import convert_to_train_format
|
||||
from app.common.s3 import get_s3_config, get_s3_client
|
||||
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
|
||||
from magic_pdf.spark import get_s3_config, get_s3_client
|
||||
|
||||
|
||||
def exception_handler(jso: dict, e):
|
||||
logger.exception(e)
|
||||
jso["need_drop"] = True
|
||||
jso["drop_reason"] = DropReason.Exception
|
||||
jso["exception"] = f"ERROR: {e}"
|
||||
return jso
|
||||
|
||||
|
||||
def get_data_type(jso: dict):
|
||||
data_type = jso.get("data_type")
|
||||
if data_type is None:
|
||||
data_type = jso.get("file_type")
|
||||
return data_type
|
||||
|
||||
|
||||
def get_bookid(jso: dict):
|
||||
book_id = jso.get("bookid")
|
||||
if book_id is None:
|
||||
book_id = jso.get("original_file_id")
|
||||
return book_id
|
||||
|
||||
|
||||
def get_data_source(jso: dict):
|
||||
data_source = jso.get("data_source")
|
||||
if data_source is None:
|
||||
data_source = jso.get("file_source")
|
||||
return data_source
|
||||
|
||||
|
||||
def meta_scan(jso: dict, doc_layout_check=True) -> dict:
|
||||
s3_pdf_path = jso.get("file_location")
|
||||
@@ -319,22 +286,13 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
# 开始正式逻辑
|
||||
s3_pdf_path = jso.get("file_location")
|
||||
s3_config = get_s3_config(s3_pdf_path)
|
||||
pdf_bytes = read_file(s3_pdf_path, s3_config)
|
||||
model_output_json_list = jso.get("doc_layout_result")
|
||||
data_source = get_data_source(jso)
|
||||
file_id = jso.get("file_id")
|
||||
book_name = f"{data_source}/{file_id}"
|
||||
|
||||
# 1.23.22已修复
|
||||
# if debug_mode:
|
||||
# pass
|
||||
# else:
|
||||
# if book_name == "zlib/zlib_21929367":
|
||||
# jso['need_drop'] = True
|
||||
# jso['drop_reason'] = DropReason.SPECIAL_PDF
|
||||
# return jso
|
||||
|
||||
junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
|
||||
# total_page = jso['pdf_meta']['total_page']
|
||||
|
||||
# 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
|
||||
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
|
||||
@@ -342,9 +300,6 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
if max_svgs > 3000:
|
||||
jso["need_drop"] = True
|
||||
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
|
||||
# elif total_page > 1000:
|
||||
# jso['need_drop'] = True
|
||||
# jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
|
||||
else:
|
||||
try:
|
||||
save_path = s3_image_save_path
|
||||
@@ -356,8 +311,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
file=sys.stderr,
|
||||
)
|
||||
pdf_info_dict = parse_pdf_by_model(
|
||||
s3_pdf_path,
|
||||
s3_config,
|
||||
pdf_bytes,
|
||||
model_output_json_list,
|
||||
save_path,
|
||||
book_name,
|
||||
@@ -388,233 +342,6 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
return jso
|
||||
|
||||
|
||||
"""
|
||||
统一处理逻辑
|
||||
1.先调用parse_pdf对文本类pdf进行处理
|
||||
2.再调用ocr_dropped_parse_pdf,对之前drop的pdf进行处理
|
||||
"""
|
||||
|
||||
|
||||
def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
jso = parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
||||
jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
||||
return jso
|
||||
|
||||
|
||||
# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
|
||||
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
if not jso.get("need_drop", False):
|
||||
return jso
|
||||
else:
|
||||
jso = ocr_parse_pdf_core(
|
||||
jso, start_page_id=start_page_id, debug_mode=debug_mode
|
||||
)
|
||||
jso["need_drop"] = False
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
# 检测debug开关
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
return jso
|
||||
|
||||
jso = ocr_parse_pdf_core(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_parse_pdf_core(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
s3_pdf_path = jso.get("file_location")
|
||||
s3_config = get_s3_config(s3_pdf_path)
|
||||
model_output_json_list = jso.get("doc_layout_result")
|
||||
data_source = get_data_source(jso)
|
||||
file_id = jso.get("file_id")
|
||||
book_name = f"{data_source}/{file_id}"
|
||||
try:
|
||||
save_path = s3_image_save_path
|
||||
image_s3_config = get_s3_config(save_path)
|
||||
start_time = time.time() # 记录开始时间
|
||||
# 先打印一下book_name和解析开始的时间
|
||||
logger.info(
|
||||
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
pdf_info_dict = parse_pdf_by_ocr(
|
||||
s3_pdf_path,
|
||||
s3_config,
|
||||
model_output_json_list,
|
||||
save_path,
|
||||
book_name,
|
||||
pdf_model_profile=None,
|
||||
image_s3_config=image_s3_config,
|
||||
start_page_id=start_page_id,
|
||||
debug_mode=debug_mode,
|
||||
)
|
||||
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
|
||||
jso["pdf_intermediate_dict"] = pdf_info_dict
|
||||
end_time = time.time() # 记录完成时间
|
||||
parse_time = int(end_time - start_time) # 计算执行时间
|
||||
# 解析完成后打印一下book_name和耗时
|
||||
logger.info(
|
||||
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
jso["parse_time"] = parse_time
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
|
||||
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown(pdf_intermediate_dict)
|
||||
jso["content"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, debug_mode=False) -> dict:
|
||||
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
|
||||
jso["content"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, debug_mode=False) -> dict:
|
||||
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown_with_para_and_pagination(pdf_intermediate_dict)
|
||||
jso["content"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
# jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
# jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
|
||||
jso: dict, debug_mode=False
|
||||
) -> dict:
|
||||
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
|
||||
jso["content_ocr"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["mid_json_ocr"] = pdf_intermediate_dict
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
|
||||
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
|
||||
jso["content_list"] = standard_format
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
# 检测debug开关
|
||||
if debug_mode:
|
||||
@@ -695,5 +422,17 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
|
||||
return jso
|
||||
|
||||
|
||||
"""
|
||||
统一处理逻辑
|
||||
1.先调用parse_pdf对文本类pdf进行处理
|
||||
2.再调用ocr_dropped_parse_pdf,对之前drop的pdf进行处理
|
||||
"""
|
||||
|
||||
# def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
# jso = parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
||||
# jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
||||
# return jso
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
||||
259
magic_pdf/pipeline_ocr.py
Normal file
259
magic_pdf/pipeline_ocr.py
Normal file
@@ -0,0 +1,259 @@
|
||||
import sys
|
||||
import time
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_mm_markdown, ocr_mk_nlp_markdown_with_para, \
|
||||
ocr_mk_mm_markdown_with_para_and_pagination, ocr_mk_mm_markdown_with_para, ocr_mk_mm_standard_format, \
|
||||
make_standard_format_with_para
|
||||
from magic_pdf.libs.commons import join_path, s3_image_save_path, formatted_time
|
||||
from magic_pdf.libs.json_compressor import JsonCompressor
|
||||
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
|
||||
from magic_pdf.spark.base import get_data_source, exception_handler, get_pdf_bytes, get_bookname
|
||||
from magic_pdf.spark.s3 import get_s3_config
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown(pdf_intermediate_dict)
|
||||
jso["content"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode=False) -> dict:
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
|
||||
if mode == "mm":
|
||||
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
|
||||
elif mode == "nlp":
|
||||
markdown_content = ocr_mk_nlp_markdown_with_para(pdf_intermediate_dict)
|
||||
|
||||
jso["content"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, debug_mode=False) -> dict:
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown_with_para_and_pagination(pdf_intermediate_dict)
|
||||
jso["content"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
# jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
# jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
|
||||
jso: dict, debug_mode=False
|
||||
) -> dict:
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
|
||||
jso["content_ocr"] = markdown_content
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["mid_json_ocr"] = pdf_intermediate_dict
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
|
||||
jso["content_list"] = standard_format
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode=False) -> dict:
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
standard_format = make_standard_format_with_para(pdf_intermediate_dict)
|
||||
jso["content_list"] = standard_format
|
||||
logger.info(
|
||||
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_id=0, debug_mode=False):
|
||||
save_path = s3_image_save_path
|
||||
image_s3_config = get_s3_config(save_path)
|
||||
start_time = time.time() # 记录开始时间
|
||||
# 先打印一下book_name和解析开始的时间
|
||||
logger.info(
|
||||
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
pdf_info_dict = parse_pdf_by_ocr(
|
||||
pdf_bytes,
|
||||
model_output_json_list,
|
||||
save_path,
|
||||
book_name,
|
||||
pdf_model_profile=None,
|
||||
image_s3_config=image_s3_config,
|
||||
start_page_id=start_page_id,
|
||||
debug_mode=debug_mode,
|
||||
)
|
||||
end_time = time.time() # 记录完成时间
|
||||
parse_time = int(end_time - start_time) # 计算执行时间
|
||||
# 解析完成后打印一下book_name和耗时
|
||||
logger.info(
|
||||
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
return pdf_info_dict, parse_time
|
||||
|
||||
|
||||
# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
|
||||
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
if not jso.get("need_drop", False):
|
||||
return jso
|
||||
else:
|
||||
try:
|
||||
pdf_bytes = get_pdf_bytes(jso)
|
||||
model_output_json_list = jso.get("doc_layout_result")
|
||||
book_name = get_bookname(jso)
|
||||
pdf_info_dict, parse_time = ocr_parse_pdf_core(
|
||||
pdf_bytes, model_output_json_list, book_name, start_page_id=start_page_id, debug_mode=debug_mode
|
||||
)
|
||||
jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
|
||||
jso["parse_time"] = parse_time
|
||||
jso["need_drop"] = False
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
||||
# 检测debug开关
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
return jso
|
||||
try:
|
||||
pdf_bytes = get_pdf_bytes(jso)
|
||||
model_output_json_list = jso.get("doc_layout_result")
|
||||
book_name = get_bookname(jso)
|
||||
pdf_info_dict, parse_time = ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name,
|
||||
start_page_id=start_page_id, debug_mode=debug_mode)
|
||||
jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
|
||||
jso["parse_time"] = parse_time
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
68
magic_pdf/pipeline_txt.py
Normal file
68
magic_pdf/pipeline_txt.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
文本型pdf转化为统一清洗格式
|
||||
"""
|
||||
|
||||
# TODO 移动到spark/目录下
|
||||
|
||||
from loguru import logger
|
||||
from magic_pdf.dict2md.mkcontent import mk_mm_markdown, mk_universal_format
|
||||
from magic_pdf.libs.commons import join_path
|
||||
from magic_pdf.libs.json_compressor import JsonCompressor
|
||||
from magic_pdf.spark import exception_handler, get_data_source
|
||||
|
||||
|
||||
def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict:
|
||||
"""
|
||||
变成统一的标准格式
|
||||
"""
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop")
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
standard_format = mk_universal_format(pdf_intermediate_dict)
|
||||
jso["content_list"] = standard_format
|
||||
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
|
||||
|
||||
def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict:
|
||||
"""
|
||||
变成多模态的markdown格式
|
||||
"""
|
||||
if debug_mode:
|
||||
pass
|
||||
else: # 如果debug没开,则检测是否有needdrop字段
|
||||
if jso.get("need_drop", False):
|
||||
book_name = join_path(get_data_source(jso), jso["file_id"])
|
||||
logger.info(f"book_name is:{book_name} need drop")
|
||||
jso["dropped"] = True
|
||||
return jso
|
||||
try:
|
||||
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
|
||||
# 将 pdf_intermediate_dict 解压
|
||||
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
|
||||
standard_format = mk_universal_format(pdf_intermediate_dict)
|
||||
mm_content = mk_mm_markdown(standard_format)
|
||||
jso["content_list"] = mm_content
|
||||
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",)
|
||||
# 把无用的信息清空
|
||||
jso["doc_layout_result"] = ""
|
||||
jso["pdf_intermediate_dict"] = ""
|
||||
jso["pdf_meta"] = ""
|
||||
except Exception as e:
|
||||
jso = exception_handler(jso, e)
|
||||
return jso
|
||||
@@ -28,3 +28,26 @@ def construct_page_component(page_id, image_info, table_info, text_blocks_prepr
|
||||
return_dict['footnote_bboxes_tmp'] = footnote_bboxes_tmp
|
||||
|
||||
return return_dict
|
||||
|
||||
|
||||
def ocr_construct_page_component(blocks, layout_bboxes, page_id, page_w, page_h, layout_tree,
|
||||
images, tables, interline_equations, inline_equations,
|
||||
dropped_text_block, dropped_image_block, dropped_table_block, dropped_equation_block,
|
||||
need_remove_spans_bboxes_dict):
|
||||
return_dict = {
|
||||
'preproc_blocks': blocks,
|
||||
'layout_bboxes': layout_bboxes,
|
||||
'page_idx': page_id,
|
||||
'page_size': [page_w, page_h],
|
||||
'_layout_tree': layout_tree,
|
||||
'images': images,
|
||||
'tables': tables,
|
||||
'interline_equations': interline_equations,
|
||||
'inline_equations': inline_equations,
|
||||
'droped_text_block': dropped_text_block,
|
||||
'droped_image_block': dropped_image_block,
|
||||
'droped_table_block': dropped_table_block,
|
||||
'dropped_equation_block': dropped_equation_block,
|
||||
'droped_bboxes': need_remove_spans_bboxes_dict,
|
||||
}
|
||||
return return_dict
|
||||
@@ -44,10 +44,15 @@ def remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict):
|
||||
# logger.info(f"remove spans by bbox dict, drop_tag: {drop_tag}, removed_bboxes: {removed_bboxes}")
|
||||
need_remove_spans = []
|
||||
for span in spans:
|
||||
# 通过判断span的bbox是否在removed_bboxes中, 判断是否需要删除该span
|
||||
for removed_bbox in removed_bboxes:
|
||||
if calculate_overlap_area_in_bbox1_area_ratio(span['bbox'], removed_bbox) > 0.5:
|
||||
need_remove_spans.append(span)
|
||||
break
|
||||
# 当drop_tag为DropTag.FOOTNOTE时, 判断span是否在removed_bboxes中任意一个的下方,如果是,则删除该span
|
||||
elif drop_tag == DropTag.FOOTNOTE and (span['bbox'][1]+span['bbox'][3])/2 > removed_bbox[3] and removed_bbox[0] < (span['bbox'][0]+span['bbox'][2])/2 < removed_bbox[2]:
|
||||
need_remove_spans.append(span)
|
||||
break
|
||||
|
||||
for span in need_remove_spans:
|
||||
spans.remove(span)
|
||||
|
||||
0
magic_pdf/spark/__init__.py
Normal file
0
magic_pdf/spark/__init__.py
Normal file
50
magic_pdf/spark/base.py
Normal file
50
magic_pdf/spark/base.py
Normal file
@@ -0,0 +1,50 @@
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from magic_pdf.libs.commons import read_file
|
||||
from magic_pdf.libs.drop_reason import DropReason
|
||||
|
||||
from magic_pdf.spark.s3 import get_s3_config
|
||||
|
||||
|
||||
def get_data_source(jso: dict):
|
||||
data_source = jso.get("data_source")
|
||||
if data_source is None:
|
||||
data_source = jso.get("file_source")
|
||||
return data_source
|
||||
|
||||
|
||||
def get_data_type(jso: dict):
|
||||
data_type = jso.get("data_type")
|
||||
if data_type is None:
|
||||
data_type = jso.get("file_type")
|
||||
return data_type
|
||||
|
||||
|
||||
def get_bookid(jso: dict):
|
||||
book_id = jso.get("bookid")
|
||||
if book_id is None:
|
||||
book_id = jso.get("original_file_id")
|
||||
return book_id
|
||||
|
||||
|
||||
def exception_handler(jso: dict, e):
|
||||
logger.exception(e)
|
||||
jso["need_drop"] = True
|
||||
jso["drop_reason"] = DropReason.Exception
|
||||
jso["exception"] = f"ERROR: {e}"
|
||||
return jso
|
||||
|
||||
|
||||
def get_bookname(jso: dict):
|
||||
data_source = get_data_source(jso)
|
||||
file_id = jso.get("file_id")
|
||||
book_name = f"{data_source}/{file_id}"
|
||||
return book_name
|
||||
|
||||
|
||||
def get_pdf_bytes(jso: dict):
|
||||
pdf_s3_path = jso.get("file_location")
|
||||
s3_config = get_s3_config(pdf_s3_path)
|
||||
pdf_bytes = read_file(pdf_s3_path, s3_config)
|
||||
return pdf_bytes
|
||||
@@ -2,10 +2,13 @@
|
||||
import boto3
|
||||
from botocore.client import Config
|
||||
|
||||
from app.common import s3_buckets, s3_clusters, get_cluster_name, s3_users
|
||||
import re
|
||||
import random
|
||||
from typing import List, Union
|
||||
try:
|
||||
from app.config import s3_buckets, s3_clusters, get_cluster_name, s3_users
|
||||
except ImportError:
|
||||
from magic_pdf.config import s3_buckets, s3_clusters, get_cluster_name, s3_users
|
||||
|
||||
__re_s3_path = re.compile("^s3a?://([^/]+)(?:/(.*))?$")
|
||||
def get_s3_config(path: Union[str, List[str]], outside=False):
|
||||
@@ -54,8 +54,8 @@ def convert_to_train_format(jso: dict) -> []:
|
||||
n_bbox = {"category_id": 10, "bbox": inter_equation["bbox"]}
|
||||
bboxes.append(n_bbox)
|
||||
|
||||
for footnote in v['footnote_bboxes_tmp']:
|
||||
n_bbox = {"category_id": 5, "bbox": footnote["bbox"]}
|
||||
for footnote_bbox in v["bak_footer_note_bboxes"]:
|
||||
n_bbox = {"category_id": 5, "bbox": list(footnote_bbox)}
|
||||
bboxes.append(n_bbox)
|
||||
|
||||
info["bboxes"] = bboxes
|
||||
|
||||
@@ -46,6 +46,8 @@ def indicator_cal(json_standard,json_test):
|
||||
'''批量读取中间生成的json文件'''
|
||||
test_inline_equations=[]
|
||||
test_interline_equations=[]
|
||||
test_inline_euqations_bboxs=[]
|
||||
test_interline_equations_bboxs=[]
|
||||
test_dropped_text_bboxes=[]
|
||||
test_dropped_text_tag=[]
|
||||
test_dropped_image_bboxes=[]
|
||||
@@ -58,15 +60,20 @@ def indicator_cal(json_standard,json_test):
|
||||
mid_json=pd.DataFrame(i)
|
||||
mid_json=mid_json.iloc[:,:-1]
|
||||
for j1 in mid_json.loc['inline_equations',:]:
|
||||
page_in=[]
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k1 in j1:
|
||||
page_in.append(k1['latex_text'])
|
||||
test_inline_equations.append(page_in)
|
||||
page_in_text.append(k1['latex_text'])
|
||||
page_in_bbox.append(k1['bbox'])
|
||||
test_inline_equations.append(page_in_text)
|
||||
test_inline_euqations_bboxs.append(page_in_bbox)
|
||||
for j2 in mid_json.loc['interline_equations',:]:
|
||||
page_in=[]
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k2 in j2:
|
||||
page_in.append(k2['latex_text'])
|
||||
test_interline_equations.append(page_in)
|
||||
page_in_text.append(k2['latex_text'])
|
||||
test_interline_equations.append(page_in_text)
|
||||
test_interline_equations_bboxs.append(page_in_bbox)
|
||||
|
||||
for j3 in mid_json.loc['droped_text_block',:]:
|
||||
page_in_bbox=[]
|
||||
@@ -101,6 +108,8 @@ def indicator_cal(json_standard,json_test):
|
||||
|
||||
standard_inline_equations=[]
|
||||
standard_interline_equations=[]
|
||||
standard_inline_euqations_bboxs=[]
|
||||
standard_interline_equations_bboxs=[]
|
||||
standard_dropped_text_bboxes=[]
|
||||
standard_dropped_text_tag=[]
|
||||
standard_dropped_image_bboxes=[]
|
||||
@@ -113,15 +122,21 @@ def indicator_cal(json_standard,json_test):
|
||||
mid_json=pd.DataFrame(i)
|
||||
mid_json=mid_json.iloc[:,:-1]
|
||||
for j1 in mid_json.loc['inline_equations',:]:
|
||||
page_in=[]
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k1 in j1:
|
||||
page_in.append(k1['latex_text'])
|
||||
standard_inline_equations.append(page_in)
|
||||
page_in_text.append(k1['latex_text'])
|
||||
page_in_bbox.append(k1['bbox'])
|
||||
standard_inline_equations.append(page_in_text)
|
||||
standard_inline_euqations_bboxs.append(page_in_bbox)
|
||||
for j2 in mid_json.loc['interline_equations',:]:
|
||||
page_in=[]
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k2 in j2:
|
||||
page_in.append(k2['latex_text'])
|
||||
standard_interline_equations.append(page_in)
|
||||
page_in_text.append(k2['latex_text'])
|
||||
page_in_bbox.append(k2['bbox'])
|
||||
standard_interline_equations.append(page_in_text)
|
||||
standard_interline_equations_bboxs.append(page_in_bbox)
|
||||
for j3 in mid_json.loc['droped_text_block',:]:
|
||||
page_in_bbox=[]
|
||||
page_in_tag=[]
|
||||
@@ -195,6 +210,9 @@ def indicator_cal(json_standard,json_test):
|
||||
inline_equations_edit=np.mean(dis1)
|
||||
inline_equations_bleu=np.mean(bleu1)
|
||||
|
||||
'''行内公式bbox匹配相关指标'''
|
||||
inline_equations_bbox_report=bbox_match_indicator(test_inline_euqations_bboxs,standard_inline_euqations_bboxs)
|
||||
|
||||
|
||||
'''行间公式编辑距离和bleu'''
|
||||
dis2=[]
|
||||
@@ -217,6 +235,10 @@ def indicator_cal(json_standard,json_test):
|
||||
interline_equations_bleu=np.mean(bleu2)
|
||||
|
||||
|
||||
'''行间公式bbox匹配相关指标'''
|
||||
interline_equations_bbox_report=bbox_match_indicator(test_interline_equations_bboxs,standard_interline_equations_bboxs)
|
||||
|
||||
|
||||
|
||||
|
||||
'''可以先检查page和bbox数量是否一致'''
|
||||
@@ -289,87 +311,11 @@ def indicator_cal(json_standard,json_test):
|
||||
|
||||
'''dropped_image_block的bbox匹配相关指标'''
|
||||
'''有数据格式不一致的问题'''
|
||||
image_block_report=bbox_match_indicator(test_dropped_image_bboxes,standard_dropped_image_bboxes)
|
||||
|
||||
test_image_bbox=[]
|
||||
standard_image_bbox=[]
|
||||
for a,b in zip(test_dropped_image_bboxes,standard_dropped_image_bboxes):
|
||||
|
||||
test_page_bbox=[]
|
||||
standard_page_bbox=[]
|
||||
if len(a)==0 and len(b)==0:
|
||||
pass
|
||||
else:
|
||||
for i in b:
|
||||
if len(i)!=4:
|
||||
continue
|
||||
else:
|
||||
judge=0
|
||||
standard_page_bbox.append(1)
|
||||
for j in a:
|
||||
if bbox_offset(i,j):
|
||||
judge=1
|
||||
test_page_bbox.append(1)
|
||||
break
|
||||
if judge==0:
|
||||
test_page_bbox.append(0)
|
||||
|
||||
diff_num=len(a)+test_page_bbox.count(0)-len(b)
|
||||
if diff_num>0:#有多删的情况出现
|
||||
test_page_bbox.extend([1]*diff_num)
|
||||
standard_page_bbox.extend([0]*diff_num)
|
||||
|
||||
|
||||
test_image_bbox.extend(test_page_bbox)
|
||||
standard_image_bbox.extend(standard_page_bbox)
|
||||
|
||||
|
||||
image_block_report = {}
|
||||
image_block_report['accuracy']=metrics.accuracy_score(standard_image_bbox,test_image_bbox)
|
||||
image_block_report['precision']=metrics.precision_score(standard_image_bbox,test_image_bbox)
|
||||
image_block_report['recall']=metrics.recall_score(standard_image_bbox,test_image_bbox)
|
||||
image_block_report['f1_score']=metrics.f1_score(standard_image_bbox,test_image_bbox)
|
||||
|
||||
|
||||
|
||||
|
||||
'''dropped_table_block的bbox匹配相关指标'''
|
||||
test_table_bbox=[]
|
||||
standard_table_bbox=[]
|
||||
for a,b in zip(test_dropped_table_bboxes,standard_dropped_table_bboxes):
|
||||
|
||||
test_page_bbox=[]
|
||||
standard_page_bbox=[]
|
||||
if len(a)==0 and len(b)==0:
|
||||
pass
|
||||
else:
|
||||
for i in b:
|
||||
if len(i)!=4:
|
||||
continue
|
||||
else:
|
||||
judge=0
|
||||
standard_page_bbox.append(1)
|
||||
for j in a:
|
||||
if bbox_offset(i,j):
|
||||
judge=1
|
||||
test_page_bbox.append(1)
|
||||
break
|
||||
if judge==0:
|
||||
test_page_bbox.append(0)
|
||||
|
||||
diff_num=len(a)+test_page_bbox.count(0)-len(b)
|
||||
if diff_num>0:#有多删的情况出现
|
||||
test_page_bbox.extend([1]*diff_num)
|
||||
standard_page_bbox.extend([0]*diff_num)
|
||||
|
||||
|
||||
test_table_bbox.extend(test_page_bbox)
|
||||
standard_table_bbox.extend(standard_page_bbox)
|
||||
|
||||
table_block_report = {}
|
||||
table_block_report['accuracy']=metrics.accuracy_score(standard_table_bbox,test_table_bbox)
|
||||
table_block_report['precision']=metrics.precision_score(standard_table_bbox,test_table_bbox)
|
||||
table_block_report['recall']=metrics.recall_score(standard_table_bbox,test_table_bbox)
|
||||
table_block_report['f1_score']=metrics.f1_score(standard_table_bbox,test_table_bbox)
|
||||
table_block_report=bbox_match_indicator(test_dropped_table_bboxes,standard_dropped_table_bboxes)
|
||||
|
||||
|
||||
'''阅读顺序编辑距离的均值'''
|
||||
@@ -392,6 +338,8 @@ def indicator_cal(json_standard,json_test):
|
||||
output['行间公式平均编辑距离']=[interline_equations_edit]
|
||||
output['行内公式平均bleu']=[inline_equations_bleu]
|
||||
output['行间公式平均bleu']=[interline_equations_bleu]
|
||||
output['行内公式识别相关指标']=[inline_equations_bbox_report]
|
||||
output['行间公式识别相关指标']=[interline_equations_bbox_report]
|
||||
output['阅读顺序平均编辑距离']=[preproc_num_edit]
|
||||
output['分段准确率']=[acc_para]
|
||||
output['删除的text block的相关指标']=[text_block_report]
|
||||
@@ -434,6 +382,52 @@ def bbox_offset(b_t,b_s):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
'''bbox匹配和对齐函数,输出相关指标'''
|
||||
'''输入的是以page为单位的bbox列表'''
|
||||
def bbox_match_indicator(test_bbox_list,standard_bbox_list):
|
||||
|
||||
test_bbox=[]
|
||||
standard_bbox=[]
|
||||
for a,b in zip(test_bbox_list,standard_bbox_list):
|
||||
|
||||
test_page_bbox=[]
|
||||
standard_page_bbox=[]
|
||||
if len(a)==0 and len(b)==0:
|
||||
pass
|
||||
else:
|
||||
for i in b:
|
||||
if len(i)!=4:
|
||||
continue
|
||||
else:
|
||||
judge=0
|
||||
standard_page_bbox.append(1)
|
||||
for j in a:
|
||||
if bbox_offset(i,j):
|
||||
judge=1
|
||||
test_page_bbox.append(1)
|
||||
break
|
||||
if judge==0:
|
||||
test_page_bbox.append(0)
|
||||
|
||||
diff_num=len(a)+test_page_bbox.count(0)-len(b)
|
||||
if diff_num>0:#有多删的情况出现
|
||||
test_page_bbox.extend([1]*diff_num)
|
||||
standard_page_bbox.extend([0]*diff_num)
|
||||
|
||||
|
||||
test_bbox.extend(test_page_bbox)
|
||||
standard_bbox.extend(standard_page_bbox)
|
||||
|
||||
|
||||
block_report = {}
|
||||
block_report['accuracy']=metrics.accuracy_score(standard_bbox,test_bbox)
|
||||
block_report['precision']=metrics.precision_score(standard_bbox,test_bbox)
|
||||
block_report['recall']=metrics.recall_score(standard_bbox,test_bbox)
|
||||
block_report['f1_score']=metrics.f1_score(standard_bbox,test_bbox)
|
||||
|
||||
return block_report
|
||||
|
||||
|
||||
|
||||
|
||||
456
tests/pdf_indicator/overall_indicator.py
Normal file
456
tests/pdf_indicator/overall_indicator.py
Normal file
@@ -0,0 +1,456 @@
|
||||
import json
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from nltk.translate.bleu_score import sentence_bleu
|
||||
import argparse
|
||||
from sklearn.metrics import classification_report
|
||||
from collections import Counter
|
||||
from sklearn import metrics
|
||||
from pandas import isnull
|
||||
|
||||
|
||||
|
||||
def indicator_cal(json_standard,json_test):
|
||||
|
||||
json_standard = pd.DataFrame(json_standard)
|
||||
json_test = pd.DataFrame(json_test)
|
||||
|
||||
|
||||
|
||||
'''数据集总体指标'''
|
||||
|
||||
a=json_test[['id','mid_json']]
|
||||
b=json_standard[['id','mid_json','pass_label']]
|
||||
|
||||
a=a.drop_duplicates(subset='id',keep='first')
|
||||
a.index=range(len(a))
|
||||
b=b.drop_duplicates(subset='id',keep='first')
|
||||
b.index=range(len(b))
|
||||
|
||||
outer_merge=pd.merge(a,b,on='id',how='outer')
|
||||
outer_merge.columns=['id','standard_mid_json','test_mid_json','pass_label']
|
||||
standard_exist=outer_merge.standard_mid_json.apply(lambda x: not isnull(x))
|
||||
test_exist=outer_merge.test_mid_json.apply(lambda x: not isnull(x))
|
||||
|
||||
overall_report = {}
|
||||
overall_report['accuracy']=metrics.accuracy_score(standard_exist,test_exist)
|
||||
overall_report['precision']=metrics.precision_score(standard_exist,test_exist)
|
||||
overall_report['recall']=metrics.recall_score(standard_exist,test_exist)
|
||||
overall_report['f1_score']=metrics.f1_score(standard_exist,test_exist)
|
||||
|
||||
|
||||
inner_merge=pd.merge(a,b,on='id',how='inner')
|
||||
inner_merge.columns=['id','standard_mid_json','test_mid_json','pass_label']
|
||||
json_standard = inner_merge['standard_mid_json']#check一下是否对齐
|
||||
json_test = inner_merge['test_mid_json']
|
||||
|
||||
|
||||
|
||||
|
||||
'''批量读取中间生成的json文件'''
|
||||
test_inline_equations=[]
|
||||
test_interline_equations=[]
|
||||
test_inline_euqations_bboxs=[]
|
||||
test_interline_equations_bboxs=[]
|
||||
test_dropped_text_bboxes=[]
|
||||
test_dropped_text_tag=[]
|
||||
test_dropped_image_bboxes=[]
|
||||
test_dropped_table_bboxes=[]
|
||||
test_preproc_num=[]#阅读顺序
|
||||
test_para_num=[]
|
||||
test_para_text=[]
|
||||
|
||||
for i in json_test:
|
||||
mid_json=pd.DataFrame(i)
|
||||
mid_json=mid_json.iloc[:,:-1]
|
||||
for j1 in mid_json.loc['inline_equations',:]:
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k1 in j1:
|
||||
page_in_text.append(k1['latex_text'])
|
||||
page_in_bbox.append(k1['bbox'])
|
||||
test_inline_equations.append(page_in_text)
|
||||
test_inline_euqations_bboxs.append(page_in_bbox)
|
||||
for j2 in mid_json.loc['interline_equations',:]:
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k2 in j2:
|
||||
page_in_text.append(k2['latex_text'])
|
||||
page_in_bbox.append(k2['bbox'])
|
||||
test_interline_equations.append(page_in_text)
|
||||
test_interline_equations_bboxs.append(page_in_bbox)
|
||||
|
||||
for j3 in mid_json.loc['droped_text_block',:]:
|
||||
page_in_bbox=[]
|
||||
page_in_tag=[]
|
||||
for k3 in j3:
|
||||
page_in_bbox.append(k3['bbox'])
|
||||
#如果k3中存在tag这个key
|
||||
if 'tag' in k3.keys():
|
||||
page_in_tag.append(k3['tag'])
|
||||
else:
|
||||
page_in_tag.append('None')
|
||||
test_dropped_text_tag.append(page_in_tag)
|
||||
test_dropped_text_bboxes.append(page_in_bbox)
|
||||
for j4 in mid_json.loc['droped_image_block',:]:
|
||||
test_dropped_image_bboxes.append(j4)
|
||||
for j5 in mid_json.loc['droped_table_block',:]:
|
||||
test_dropped_table_bboxes.append(j5)
|
||||
for j6 in mid_json.loc['preproc_blocks',:]:
|
||||
page_in=[]
|
||||
for k6 in j6:
|
||||
page_in.append(k6['number'])
|
||||
test_preproc_num.append(page_in)
|
||||
|
||||
test_pdf_text=[]
|
||||
for j7 in mid_json.loc['para_blocks',:]:
|
||||
test_para_num.append(len(j7))
|
||||
for k7 in j7:
|
||||
test_pdf_text.append(k7['text'])
|
||||
test_para_text.append(test_pdf_text)
|
||||
|
||||
|
||||
|
||||
standard_inline_equations=[]
|
||||
standard_interline_equations=[]
|
||||
standard_inline_euqations_bboxs=[]
|
||||
standard_interline_equations_bboxs=[]
|
||||
standard_dropped_text_bboxes=[]
|
||||
standard_dropped_text_tag=[]
|
||||
standard_dropped_image_bboxes=[]
|
||||
standard_dropped_table_bboxes=[]
|
||||
standard_preproc_num=[]#阅读顺序
|
||||
standard_para_num=[]
|
||||
standard_para_text=[]
|
||||
|
||||
for i in json_standard:
|
||||
mid_json=pd.DataFrame(i)
|
||||
mid_json=mid_json.iloc[:,:-1]
|
||||
for j1 in mid_json.loc['inline_equations',:]:
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k1 in j1:
|
||||
page_in_text.append(k1['latex_text'])
|
||||
page_in_bbox.append(k1['bbox'])
|
||||
standard_inline_equations.append(page_in_text)
|
||||
standard_inline_euqations_bboxs.append(page_in_bbox)
|
||||
for j2 in mid_json.loc['interline_equations',:]:
|
||||
page_in_text=[]
|
||||
page_in_bbox=[]
|
||||
for k2 in j2:
|
||||
page_in_text.append(k2['latex_text'])
|
||||
page_in_bbox.append(k2['bbox'])
|
||||
standard_interline_equations.append(page_in_text)
|
||||
standard_interline_equations_bboxs.append(page_in_bbox)
|
||||
for j3 in mid_json.loc['droped_text_block',:]:
|
||||
page_in_bbox=[]
|
||||
page_in_tag=[]
|
||||
for k3 in j3:
|
||||
page_in_bbox.append(k3['bbox'])
|
||||
if 'tag' in k3.keys():
|
||||
page_in_tag.append(k3['tag'])
|
||||
else:
|
||||
page_in_tag.append('None')
|
||||
standard_dropped_text_bboxes.append(page_in_bbox)
|
||||
standard_dropped_text_tag.append(page_in_tag)
|
||||
for j4 in mid_json.loc['droped_image_block',:]:
|
||||
standard_dropped_image_bboxes.append(j4)
|
||||
for j5 in mid_json.loc['droped_table_block',:]:
|
||||
standard_dropped_table_bboxes.append(j5)
|
||||
for j6 in mid_json.loc['preproc_blocks',:]:
|
||||
page_in=[]
|
||||
for k6 in j6:
|
||||
page_in.append(k6['number'])
|
||||
standard_preproc_num.append(page_in)
|
||||
|
||||
standard_pdf_text=[]
|
||||
for j7 in mid_json.loc['para_blocks',:]:
|
||||
standard_para_num.append(len(j7))
|
||||
for k7 in j7:
|
||||
standard_pdf_text.append(k7['text'])
|
||||
standard_para_text.append(standard_pdf_text)
|
||||
|
||||
|
||||
"""
|
||||
在计算指标之前最好先确认基本统计信息是否一致
|
||||
"""
|
||||
|
||||
|
||||
'''
|
||||
计算pdf之间的总体编辑距离和bleu
|
||||
这里只计算正例的pdf
|
||||
'''
|
||||
|
||||
test_para_text=np.asarray(test_para_text, dtype = object)[inner_merge['pass_label']=='yes']
|
||||
standard_para_text=np.asarray(standard_para_text, dtype = object)[inner_merge['pass_label']=='yes']
|
||||
|
||||
pdf_dis=[]
|
||||
pdf_bleu=[]
|
||||
for a,b in zip(test_para_text,standard_para_text):
|
||||
a1=[ ''.join(i) for i in a]
|
||||
b1=[ ''.join(i) for i in b]
|
||||
pdf_dis.append(Levenshtein_Distance(a1,b1))
|
||||
pdf_bleu.append(sentence_bleu([a1],b1))
|
||||
overall_report['pdf间的平均编辑距离']=np.mean(pdf_dis)
|
||||
overall_report['pdf间的平均bleu']=np.mean(pdf_bleu)
|
||||
|
||||
|
||||
'''行内公式和行间公式的编辑距离和bleu'''
|
||||
inline_equations_edit_bleu=equations_indicator(test_inline_euqations_bboxs,standard_inline_euqations_bboxs,test_inline_equations,standard_inline_equations)
|
||||
interline_equations_edit_bleu=equations_indicator(test_interline_equations_bboxs,standard_interline_equations_bboxs,test_interline_equations,standard_interline_equations)
|
||||
|
||||
'''行内公式bbox匹配相关指标'''
|
||||
inline_equations_bbox_report=bbox_match_indicator(test_inline_euqations_bboxs,standard_inline_euqations_bboxs)
|
||||
|
||||
'''行间公式bbox匹配相关指标'''
|
||||
interline_equations_bbox_report=bbox_match_indicator(test_interline_equations_bboxs,standard_interline_equations_bboxs)
|
||||
|
||||
'''可以先检查page和bbox数量是否一致'''
|
||||
'''dropped_text_block的bbox匹配相关指标'''
|
||||
test_text_bbox=[]
|
||||
standard_text_bbox=[]
|
||||
test_tag=[]
|
||||
standard_tag=[]
|
||||
|
||||
index=0
|
||||
for a,b in zip(test_dropped_text_bboxes,standard_dropped_text_bboxes):
|
||||
test_page_tag=[]
|
||||
standard_page_tag=[]
|
||||
test_page_bbox=[]
|
||||
standard_page_bbox=[]
|
||||
if len(a)==0 and len(b)==0:
|
||||
pass
|
||||
else:
|
||||
for i in range(len(b)):
|
||||
judge=0
|
||||
standard_page_tag.append(standard_dropped_text_tag[index][i])
|
||||
standard_page_bbox.append(1)
|
||||
for j in range(len(a)):
|
||||
if bbox_offset(b[i],a[j]):
|
||||
judge=1
|
||||
test_page_tag.append(test_dropped_text_tag[index][j])
|
||||
test_page_bbox.append(1)
|
||||
break
|
||||
if judge==0:
|
||||
test_page_tag.append('None')
|
||||
test_page_bbox.append(0)
|
||||
|
||||
|
||||
if len(test_dropped_text_tag[index])+test_page_tag.count('None')>len(standard_dropped_text_tag[index]):#有多删的情况出现
|
||||
test_page_tag1=test_page_tag.copy()
|
||||
if 'None' in test_page_tag:
|
||||
test_page_tag1=test_page_tag1.remove('None')
|
||||
else:
|
||||
test_page_tag1=test_page_tag
|
||||
|
||||
diff=list((Counter(test_dropped_text_tag[index]) - Counter(test_page_tag1)).elements())
|
||||
|
||||
test_page_tag.extend(diff)
|
||||
standard_page_tag.extend(['None']*len(diff))
|
||||
test_page_bbox.extend([1]*len(diff))
|
||||
standard_page_bbox.extend([0]*len(diff))
|
||||
|
||||
test_tag.extend(test_page_tag)
|
||||
standard_tag.extend(standard_page_tag)
|
||||
test_text_bbox.extend(test_page_bbox)
|
||||
standard_text_bbox.extend(standard_page_bbox)
|
||||
|
||||
index+=1
|
||||
|
||||
|
||||
text_block_report = {}
|
||||
text_block_report['accuracy']=metrics.accuracy_score(standard_text_bbox,test_text_bbox)
|
||||
text_block_report['precision']=metrics.precision_score(standard_text_bbox,test_text_bbox)
|
||||
text_block_report['recall']=metrics.recall_score(standard_text_bbox,test_text_bbox)
|
||||
text_block_report['f1_score']=metrics.f1_score(standard_text_bbox,test_text_bbox)
|
||||
|
||||
'''删除的text_block的tag的准确率,召回率和f1-score'''
|
||||
text_block_tag_report = classification_report(y_true=standard_tag , y_pred=test_tag,output_dict=True)
|
||||
del text_block_tag_report['None']
|
||||
del text_block_tag_report["macro avg"]
|
||||
del text_block_tag_report["weighted avg"]
|
||||
|
||||
|
||||
'''dropped_image_block的bbox匹配相关指标'''
|
||||
'''有数据格式不一致的问题'''
|
||||
image_block_report=bbox_match_indicator(test_dropped_image_bboxes,standard_dropped_image_bboxes)
|
||||
|
||||
|
||||
'''dropped_table_block的bbox匹配相关指标'''
|
||||
table_block_report=bbox_match_indicator(test_dropped_table_bboxes,standard_dropped_table_bboxes)
|
||||
|
||||
|
||||
'''阅读顺序编辑距离的均值'''
|
||||
preproc_num_dis=[]
|
||||
for a,b in zip(test_preproc_num,standard_preproc_num):
|
||||
preproc_num_dis.append(Levenshtein_Distance(a,b))
|
||||
preproc_num_edit=np.mean(preproc_num_dis)
|
||||
|
||||
|
||||
'''分段准确率'''
|
||||
test_para_num=np.array(test_para_num)
|
||||
standard_para_num=np.array(standard_para_num)
|
||||
acc_para=np.mean(test_para_num==standard_para_num)
|
||||
|
||||
|
||||
output=pd.DataFrame()
|
||||
output['总体指标']=[overall_report]
|
||||
output['行内公式平均编辑距离']=[inline_equations_edit_bleu[0]]
|
||||
output['行内公式平均bleu']=[inline_equations_edit_bleu[1]]
|
||||
output['行间公式平均编辑距离']=[interline_equations_edit_bleu[0]]
|
||||
output['行间公式平均bleu']=[interline_equations_edit_bleu[1]]
|
||||
output['行内公式识别相关指标']=[inline_equations_bbox_report]
|
||||
output['行间公式识别相关指标']=[interline_equations_bbox_report]
|
||||
output['阅读顺序平均编辑距离']=[preproc_num_edit]
|
||||
output['分段准确率']=[acc_para]
|
||||
output['删除的text block的相关指标']=[text_block_report]
|
||||
output['删除的image block的相关指标']=[image_block_report]
|
||||
output['删除的table block的相关指标']=[table_block_report]
|
||||
output['删除的text block的tag相关指标']=[text_block_tag_report]
|
||||
|
||||
|
||||
return output
|
||||
|
||||
|
||||
|
||||
"""
|
||||
计算编辑距离
|
||||
"""
|
||||
def Levenshtein_Distance(str1, str2):
|
||||
matrix = [[ i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]
|
||||
for i in range(1, len(str1)+1):
|
||||
for j in range(1, len(str2)+1):
|
||||
if(str1[i-1] == str2[j-1]):
|
||||
d = 0
|
||||
else:
|
||||
d = 1
|
||||
matrix[i][j] = min(matrix[i-1][j]+1, matrix[i][j-1]+1, matrix[i-1][j-1]+d)
|
||||
return matrix[len(str1)][len(str2)]
|
||||
|
||||
|
||||
'''
|
||||
计算bbox偏移量是否符合标准的函数
|
||||
'''
|
||||
def bbox_offset(b_t,b_s):
|
||||
'''b_t是test_doc里的bbox,b_s是standard_doc里的bbox'''
|
||||
x1_t,y1_t,x2_t,y2_t=b_t
|
||||
x1_s,y1_s,x2_s,y2_s=b_s
|
||||
x1=max(x1_t,x1_s)
|
||||
x2=min(x2_t,x2_s)
|
||||
y1=max(y1_t,y1_s)
|
||||
y2=min(y2_t,y2_s)
|
||||
area_overlap=(x2-x1)*(y2-y1)
|
||||
area_t=(x2_t-x1_t)*(y2_t-y1_t)+(x2_s-x1_s)*(y2_s-y1_s)-area_overlap
|
||||
if area_t-area_overlap==0 or area_overlap/(area_t-area_overlap)>0.95:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
'''bbox匹配和对齐函数,输出相关指标'''
|
||||
'''输入的是以page为单位的bbox列表'''
|
||||
def bbox_match_indicator(test_bbox_list,standard_bbox_list):
|
||||
|
||||
test_bbox=[]
|
||||
standard_bbox=[]
|
||||
for a,b in zip(test_bbox_list,standard_bbox_list):
|
||||
|
||||
test_page_bbox=[]
|
||||
standard_page_bbox=[]
|
||||
if len(a)==0 and len(b)==0:
|
||||
pass
|
||||
else:
|
||||
for i in b:
|
||||
if len(i)!=4:
|
||||
continue
|
||||
else:
|
||||
judge=0
|
||||
standard_page_bbox.append(1)
|
||||
for j in a:
|
||||
if bbox_offset(i,j):
|
||||
judge=1
|
||||
test_page_bbox.append(1)
|
||||
break
|
||||
if judge==0:
|
||||
test_page_bbox.append(0)
|
||||
|
||||
diff_num=len(a)+test_page_bbox.count(0)-len(b)
|
||||
if diff_num>0:#有多删的情况出现
|
||||
test_page_bbox.extend([1]*diff_num)
|
||||
standard_page_bbox.extend([0]*diff_num)
|
||||
|
||||
|
||||
test_bbox.extend(test_page_bbox)
|
||||
standard_bbox.extend(standard_page_bbox)
|
||||
|
||||
|
||||
block_report = {}
|
||||
block_report['accuracy']=metrics.accuracy_score(standard_bbox,test_bbox)
|
||||
block_report['precision']=metrics.precision_score(standard_bbox,test_bbox)
|
||||
block_report['recall']=metrics.recall_score(standard_bbox,test_bbox)
|
||||
block_report['f1_score']=metrics.f1_score(standard_bbox,test_bbox)
|
||||
|
||||
return block_report
|
||||
|
||||
'''公式编辑距离和bleu'''
|
||||
def equations_indicator(test_euqations_bboxs,standard_euqations_bboxs,test_equations,standard_equations):
|
||||
test_match_equations=[]
|
||||
standard_match_equations=[]
|
||||
|
||||
index=0
|
||||
for a,b in zip(test_euqations_bboxs,standard_euqations_bboxs):
|
||||
if len(a)==0 and len(b)==0:
|
||||
pass
|
||||
else:
|
||||
for i in range(len(b)):
|
||||
for j in range(len(a)):
|
||||
if bbox_offset(b[i],a[j]):
|
||||
standard_match_equations.append(standard_equations[index][i])
|
||||
test_match_equations.append(test_equations[index][j])
|
||||
break
|
||||
index+=1
|
||||
|
||||
|
||||
dis=[]
|
||||
bleu=[]
|
||||
for a,b in zip(test_match_equations,standard_match_equations):
|
||||
if len(a)==0 and len(b)==0:
|
||||
continue
|
||||
else:
|
||||
if a==b:
|
||||
dis.append(0)
|
||||
bleu.append(1)
|
||||
else:
|
||||
dis.append(Levenshtein_Distance(a,b))
|
||||
bleu.append(sentence_bleu([a],b))
|
||||
|
||||
equations_edit=np.mean(dis)
|
||||
equations_bleu=np.mean(bleu)
|
||||
return (equations_edit,equations_bleu)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--test', type=str)
|
||||
parser.add_argument('--standard', type=str)
|
||||
args = parser.parse_args()
|
||||
pdf_json_test = args.test
|
||||
pdf_json_standard = args.standard
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
pdf_json_test = [json.loads(line)
|
||||
for line in open(pdf_json_test, 'r', encoding='utf-8')]
|
||||
pdf_json_standard = [json.loads(line)
|
||||
for line in open(pdf_json_standard, 'r', encoding='utf-8')]
|
||||
|
||||
overall_indicator=indicator_cal(pdf_json_standard,pdf_json_test)
|
||||
|
||||
'''计算的指标输出到overall_indicator_output.json中'''
|
||||
overall_indicator.to_json('overall_indicator_output.json',orient='records',lines=True,force_ascii=False)
|
||||
|
||||
@@ -3,7 +3,7 @@ import json
|
||||
import os
|
||||
from magic_pdf.libs.commons import fitz
|
||||
|
||||
from app.common.s3 import get_s3_config, get_s3_client
|
||||
from magic_pdf.spark import get_s3_config, get_s3_client
|
||||
from magic_pdf.libs.commons import join_path, json_dump_path, read_file, parse_bucket_key
|
||||
from loguru import logger
|
||||
|
||||
|
||||
Reference in New Issue
Block a user