Compare commits

...

14 Commits

Author SHA1 Message Date
kernel.h@qq.com
5db910260e add __init__.py 2024-03-27 15:11:19 +08:00
kernel.h@qq.com
433684c646 实现多模态markdown拼装 2024-03-27 14:46:56 +08:00
liusilu
fffee0ae97 Merge branch 'master' of https://github.com/myhloli/Magic-PDF 2024-03-27 10:03:46 +08:00
liusilu
e73606250e add pdf tools 2024-03-27 10:03:20 +08:00
kernel.h@qq.com
7162debc38 实现文本拼PDF解析结果装标准格式 2024-03-26 21:19:19 +08:00
赵小蒙
a343175d66 恢复pipeline 2024-03-26 18:10:53 +08:00
赵小蒙
671ce1d97c Merge remote-tracking branch 'origin/master' 2024-03-26 16:52:57 +08:00
赵小蒙
6f80beaa31 原pipeline拆分 2024-03-26 16:51:58 +08:00
许瑞
cb1b02e716 feat: disable auto include table title 2024-03-26 16:46:05 +08:00
赵小蒙
8ebb79a43a standard_format dump逻辑更新 2024-03-26 16:37:38 +08:00
赵小蒙
154eed1ade footnote drop逻辑更新 2024-03-26 16:37:07 +08:00
赵小蒙
b7652171ea make_standard_format_with_para逻辑更新 2024-03-26 16:36:45 +08:00
许瑞
f0c463ed6d Merge branch 'master' of https://github.com/myhloli/Magic-PDF 2024-03-26 10:17:05 +08:00
许瑞
efed5faa53 feat: modify foot note bbox tmp 2024-03-23 14:34:25 +08:00
10 changed files with 462 additions and 343 deletions

View File

@@ -11,7 +11,8 @@ from magic_pdf.dict2md.ocr_mkcontent import (
ocr_mk_nlp_markdown,
ocr_mk_mm_markdown,
ocr_mk_mm_standard_format,
ocr_mk_mm_markdown_with_para_and_pagination
ocr_mk_mm_markdown_with_para_and_pagination,
make_standard_format_with_para
)
from magic_pdf.libs.commons import join_path
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
@@ -83,10 +84,11 @@ def ocr_parse_core(book_name, ocr_pdf_path, ocr_pdf_model_info, start_page_id=0,
with open(text_content_save_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
standard_format = ocr_mk_mm_standard_format(pdf_info_dict)
standard_format = make_standard_format_with_para(pdf_info_dict)
standard_format_save_path = f"{save_path_with_bookname}/standard_format.txt"
with open(standard_format_save_path, "w", encoding="utf-8") as f:
f.write(str(standard_format))
# 将standard_format dump成json文本并保存
f.write(json.dumps(standard_format, ensure_ascii=False))
if __name__ == '__main__':

View File

@@ -109,12 +109,11 @@ def ocr_mk_mm_markdown_with_para_core(paras_of_layout, mode):
span_type = span.get('type')
content = ''
if span_type == ContentType.Text:
content = split_long_words(span['content'])
# content = span['content']
content = ocr_escape_special_markdown_char(split_long_words(span['content']))
elif span_type == ContentType.InlineEquation:
content = f"${span['content']}$"
content = f"${ocr_escape_special_markdown_char(span['content'])}$"
elif span_type == ContentType.InterlineEquation:
content = f"\n$$\n{span['content']}\n$$\n"
content = f"\n$$\n{ocr_escape_special_markdown_char(span['content'])}\n$$\n"
elif span_type in [ContentType.Image, ContentType.Table]:
if mode == 'mm':
content = f"\n![]({join_path(s3_image_save_path, span['image_path'])})\n"
@@ -129,16 +128,39 @@ def ocr_mk_mm_markdown_with_para_core(paras_of_layout, mode):
return page_markdown
def para_to_standard_format(para):
para_content = {}
if len(para) == 1:
para_content = line_to_standard_format(para[0])
elif len(para) > 1:
para_text = ''
inline_equation_num = 0
for line in para:
for span in line['spans']:
span_type = span.get('type')
if span_type == ContentType.Text:
content = ocr_escape_special_markdown_char(split_long_words(span['content']))
elif span_type == ContentType.InlineEquation:
content = f"${ocr_escape_special_markdown_char(span['content'])}$"
inline_equation_num += 1
para_text += content + ' '
para_content = {
'type': 'text',
'text': para_text,
'inline_equation_num': inline_equation_num
}
return para_content
def make_standard_format_with_para(pdf_info_dict: dict):
content_list = []
for _, page_info in pdf_info_dict.items():
paras = page_info.get("para_blocks")
if not paras:
paras_of_layout = page_info.get("para_blocks")
if not paras_of_layout:
continue
for para in paras:
for line in para:
content = line_to_standard_format(line)
content_list.append(content)
for paras in paras_of_layout:
for para in paras:
para_content = para_to_standard_format(para)
content_list.append(para_content)
return content_list

View File

@@ -220,7 +220,7 @@ def parse_pdf_for_train(
# 解析表格并对table_bboxes进行位置的微调,防止表格周围的文字被截断
table_bboxes = parse_tables(page_id, page, model_output_json)
table_bboxes = fix_tables(
page, table_bboxes, include_table_title=True, scan_line_num=2
page, table_bboxes, include_table_title=False, scan_line_num=2
) # 修正
table_bboxes = fix_table_text_block(
text_raw_blocks, table_bboxes

View File

@@ -3,12 +3,9 @@ import sys
import time
from urllib.parse import quote
from magic_pdf.dict2md.ocr_mkcontent import (
ocr_mk_nlp_markdown,
ocr_mk_mm_markdown,
ocr_mk_mm_standard_format,
ocr_mk_mm_markdown_with_para, ocr_mk_mm_markdown_with_para_and_pagination, ocr_mk_nlp_markdown_with_para,
)
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_mm_markdown, ocr_mk_nlp_markdown_with_para, \
ocr_mk_mm_markdown_with_para_and_pagination, ocr_mk_mm_markdown_with_para, ocr_mk_mm_standard_format, \
make_standard_format_with_para
from magic_pdf.libs.commons import (
read_file,
join_path,
@@ -23,19 +20,14 @@ from magic_pdf.pdf_parse_by_model import parse_pdf_by_model
from magic_pdf.filter.pdf_classify_by_type import classify
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
from loguru import logger
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
from magic_pdf.pdf_parse_for_train import parse_pdf_for_train
from magic_pdf.spark.base import exception_handler, get_data_source
from magic_pdf.train_utils.convert_to_train_format import convert_to_train_format
from app.common.s3 import get_s3_config, get_s3_client
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
def exception_handler(jso: dict, e):
logger.exception(e)
jso["need_drop"] = True
jso["drop_reason"] = DropReason.Exception
jso["exception"] = f"ERROR: {e}"
return jso
def get_data_type(jso: dict):
data_type = jso.get("data_type")
@@ -51,13 +43,6 @@ def get_bookid(jso: dict):
return book_id
def get_data_source(jso: dict):
data_source = jso.get("data_source")
if data_source is None:
data_source = jso.get("file_source")
return data_source
def meta_scan(jso: dict, doc_layout_check=True) -> dict:
s3_pdf_path = jso.get("file_location")
s3_config = get_s3_config(s3_pdf_path)
@@ -400,222 +385,6 @@ def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
return jso
# 专门用来跑被drop的pdf跑完之后需要把need_drop字段置为false
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if not jso.get("need_drop", False):
return jso
else:
jso = ocr_parse_pdf_core(
jso, start_page_id=start_page_id, debug_mode=debug_mode
)
jso["need_drop"] = False
return jso
def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
return jso
jso = ocr_parse_pdf_core(jso, start_page_id=start_page_id, debug_mode=debug_mode)
return jso
def ocr_parse_pdf_core(jso: dict, start_page_id=0, debug_mode=False) -> dict:
s3_pdf_path = jso.get("file_location")
s3_config = get_s3_config(s3_pdf_path)
model_output_json_list = jso.get("doc_layout_result")
data_source = get_data_source(jso)
file_id = jso.get("file_id")
book_name = f"{data_source}/{file_id}"
try:
save_path = s3_image_save_path
image_s3_config = get_s3_config(save_path)
start_time = time.time() # 记录开始时间
# 先打印一下book_name和解析开始的时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
pdf_info_dict = parse_pdf_by_ocr(
s3_pdf_path,
s3_config,
model_output_json_list,
save_path,
book_name,
pdf_model_profile=None,
image_s3_config=image_s3_config,
start_page_id=start_page_id,
debug_mode=debug_mode,
)
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
jso["pdf_intermediate_dict"] = pdf_info_dict
end_time = time.time() # 记录完成时间
parse_time = int(end_time - start_time) # 计算执行时间
# 解析完成后打印一下book_name和耗时
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
file=sys.stderr,
)
jso["parse_time"] = parse_time
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
# markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
markdown_content = ocr_mk_nlp_markdown_with_para(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown_with_para_and_pagination(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
# jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
# jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
jso: dict, debug_mode=False
) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
jso["content_ocr"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["mid_json_ocr"] = pdf_intermediate_dict
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
@@ -696,5 +465,243 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
return jso
# 专门用来跑被drop的pdf跑完之后需要把need_drop字段置为false
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if not jso.get("need_drop", False):
return jso
else:
jso = ocr_parse_pdf_core(
jso, start_page_id=start_page_id, debug_mode=debug_mode
)
jso["need_drop"] = False
return jso
def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
return jso
jso = ocr_parse_pdf_core(jso, start_page_id=start_page_id, debug_mode=debug_mode)
return jso
def ocr_parse_pdf_core(jso: dict, start_page_id=0, debug_mode=False) -> dict:
s3_pdf_path = jso.get("file_location")
s3_config = get_s3_config(s3_pdf_path)
model_output_json_list = jso.get("doc_layout_result")
data_source = get_data_source(jso)
file_id = jso.get("file_id")
book_name = f"{data_source}/{file_id}"
try:
save_path = s3_image_save_path
image_s3_config = get_s3_config(save_path)
start_time = time.time() # 记录开始时间
# 先打印一下book_name和解析开始的时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
pdf_info_dict = parse_pdf_by_ocr(
s3_pdf_path,
s3_config,
model_output_json_list,
save_path,
book_name,
pdf_model_profile=None,
image_s3_config=image_s3_config,
start_page_id=start_page_id,
debug_mode=debug_mode,
)
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
jso["pdf_intermediate_dict"] = pdf_info_dict
end_time = time.time() # 记录完成时间
parse_time = int(end_time - start_time) # 计算执行时间
# 解析完成后打印一下book_name和耗时
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
file=sys.stderr,
)
jso["parse_time"] = parse_time
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
# markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
markdown_content = ocr_mk_nlp_markdown_with_para(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown_with_para_and_pagination(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
# jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
# jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
jso: dict, debug_mode=False
) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
jso["content_ocr"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["mid_json_ocr"] = pdf_intermediate_dict
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = make_standard_format_with_para(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
if __name__ == "__main__":
pass

68
magic_pdf/pipeline_txt.py Normal file
View File

@@ -0,0 +1,68 @@
"""
文本型pdf转化为统一清洗格式
"""
# TODO 移动到spark/目录下
from loguru import logger
from magic_pdf.dict2md.mkcontent import mk_mm_markdown, mk_universal_format
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.json_compressor import JsonCompressor
from magic_pdf.spark.base import exception_handler, get_data_source
def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict:
"""
变成统一的标准格式
"""
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = mk_universal_format(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict:
"""
变成多模态的markdown格式
"""
if debug_mode:
pass
else: # 如果debug没开则检测是否有needdrop字段
if jso.get("need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = mk_universal_format(pdf_intermediate_dict)
mm_content = mk_mm_markdown(standard_format)
jso["content_list"] = mm_content
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso

View File

@@ -44,10 +44,15 @@ def remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict):
# logger.info(f"remove spans by bbox dict, drop_tag: {drop_tag}, removed_bboxes: {removed_bboxes}")
need_remove_spans = []
for span in spans:
# 通过判断span的bbox是否在removed_bboxes中, 判断是否需要删除该span
for removed_bbox in removed_bboxes:
if calculate_overlap_area_in_bbox1_area_ratio(span['bbox'], removed_bbox) > 0.5:
need_remove_spans.append(span)
break
# 当drop_tag为DropTag.FOOTNOTE时, 判断span是否在removed_bboxes中任意一个的下方如果是,则删除该span
elif drop_tag == DropTag.FOOTNOTE and (span['bbox'][1]+span['bbox'][3])/2 > removed_bbox[3] and removed_bbox[0] < (span['bbox'][0]+span['bbox'][2])/2 < removed_bbox[2]:
need_remove_spans.append(span)
break
for span in need_remove_spans:
spans.remove(span)

View File

21
magic_pdf/spark/base.py Normal file
View File

@@ -0,0 +1,21 @@
from loguru import logger
from magic_pdf.libs.drop_reason import DropReason
def get_data_source(jso: dict):
data_source = jso.get("data_source")
if data_source is None:
data_source = jso.get("file_source")
return data_source
def exception_handler(jso: dict, e):
logger.exception(e)
jso["need_drop"] = True
jso["drop_reason"] = DropReason.Exception
jso["exception"] = f"ERROR: {e}"
return jso

View File

@@ -54,8 +54,8 @@ def convert_to_train_format(jso: dict) -> []:
n_bbox = {"category_id": 10, "bbox": inter_equation["bbox"]}
bboxes.append(n_bbox)
for footnote in v['bak_footer_note_bboxes']:
n_bbox = {"category_id": 5, "bbox": footnote["bbox"]}
for footnote_bbox in v["bak_footer_note_bboxes"]:
n_bbox = {"category_id": 5, "bbox": list(footnote_bbox)}
bboxes.append(n_bbox)
info["bboxes"] = bboxes

View File

@@ -46,6 +46,8 @@ def indicator_cal(json_standard,json_test):
'''批量读取中间生成的json文件'''
test_inline_equations=[]
test_interline_equations=[]
test_inline_euqations_bboxs=[]
test_interline_equations_bboxs=[]
test_dropped_text_bboxes=[]
test_dropped_text_tag=[]
test_dropped_image_bboxes=[]
@@ -58,15 +60,20 @@ def indicator_cal(json_standard,json_test):
mid_json=pd.DataFrame(i)
mid_json=mid_json.iloc[:,:-1]
for j1 in mid_json.loc['inline_equations',:]:
page_in=[]
page_in_text=[]
page_in_bbox=[]
for k1 in j1:
page_in.append(k1['latex_text'])
test_inline_equations.append(page_in)
page_in_text.append(k1['latex_text'])
page_in_bbox.append(k1['bbox'])
test_inline_equations.append(page_in_text)
test_inline_euqations_bboxs.append(page_in_bbox)
for j2 in mid_json.loc['interline_equations',:]:
page_in=[]
page_in_text=[]
page_in_bbox=[]
for k2 in j2:
page_in.append(k2['latex_text'])
test_interline_equations.append(page_in)
page_in_text.append(k2['latex_text'])
test_interline_equations.append(page_in_text)
test_interline_equations_bboxs.append(page_in_bbox)
for j3 in mid_json.loc['droped_text_block',:]:
page_in_bbox=[]
@@ -101,6 +108,8 @@ def indicator_cal(json_standard,json_test):
standard_inline_equations=[]
standard_interline_equations=[]
standard_inline_euqations_bboxs=[]
standard_interline_equations_bboxs=[]
standard_dropped_text_bboxes=[]
standard_dropped_text_tag=[]
standard_dropped_image_bboxes=[]
@@ -113,15 +122,21 @@ def indicator_cal(json_standard,json_test):
mid_json=pd.DataFrame(i)
mid_json=mid_json.iloc[:,:-1]
for j1 in mid_json.loc['inline_equations',:]:
page_in=[]
page_in_text=[]
page_in_bbox=[]
for k1 in j1:
page_in.append(k1['latex_text'])
standard_inline_equations.append(page_in)
page_in_text.append(k1['latex_text'])
page_in_bbox.append(k1['bbox'])
standard_inline_equations.append(page_in_text)
standard_inline_euqations_bboxs.append(page_in_bbox)
for j2 in mid_json.loc['interline_equations',:]:
page_in=[]
page_in_text=[]
page_in_bbox=[]
for k2 in j2:
page_in.append(k2['latex_text'])
standard_interline_equations.append(page_in)
page_in_text.append(k2['latex_text'])
page_in_bbox.append(k2['bbox'])
standard_interline_equations.append(page_in_text)
standard_interline_equations_bboxs.append(page_in_bbox)
for j3 in mid_json.loc['droped_text_block',:]:
page_in_bbox=[]
page_in_tag=[]
@@ -195,6 +210,9 @@ def indicator_cal(json_standard,json_test):
inline_equations_edit=np.mean(dis1)
inline_equations_bleu=np.mean(bleu1)
'''行内公式bbox匹配相关指标'''
inline_equations_bbox_report=bbox_match_indicator(test_inline_euqations_bboxs,standard_inline_euqations_bboxs)
'''行间公式编辑距离和bleu'''
dis2=[]
@@ -217,6 +235,10 @@ def indicator_cal(json_standard,json_test):
interline_equations_bleu=np.mean(bleu2)
'''行间公式bbox匹配相关指标'''
interline_equations_bbox_report=bbox_match_indicator(test_interline_equations_bboxs,standard_interline_equations_bboxs)
'''可以先检查page和bbox数量是否一致'''
@@ -289,87 +311,11 @@ def indicator_cal(json_standard,json_test):
'''dropped_image_block的bbox匹配相关指标'''
'''有数据格式不一致的问题'''
image_block_report=bbox_match_indicator(test_dropped_image_bboxes,standard_dropped_image_bboxes)
test_image_bbox=[]
standard_image_bbox=[]
for a,b in zip(test_dropped_image_bboxes,standard_dropped_image_bboxes):
test_page_bbox=[]
standard_page_bbox=[]
if len(a)==0 and len(b)==0:
pass
else:
for i in b:
if len(i)!=4:
continue
else:
judge=0
standard_page_bbox.append(1)
for j in a:
if bbox_offset(i,j):
judge=1
test_page_bbox.append(1)
break
if judge==0:
test_page_bbox.append(0)
diff_num=len(a)+test_page_bbox.count(0)-len(b)
if diff_num>0:#有多删的情况出现
test_page_bbox.extend([1]*diff_num)
standard_page_bbox.extend([0]*diff_num)
test_image_bbox.extend(test_page_bbox)
standard_image_bbox.extend(standard_page_bbox)
image_block_report = {}
image_block_report['accuracy']=metrics.accuracy_score(standard_image_bbox,test_image_bbox)
image_block_report['precision']=metrics.precision_score(standard_image_bbox,test_image_bbox)
image_block_report['recall']=metrics.recall_score(standard_image_bbox,test_image_bbox)
image_block_report['f1_score']=metrics.f1_score(standard_image_bbox,test_image_bbox)
'''dropped_table_block的bbox匹配相关指标'''
test_table_bbox=[]
standard_table_bbox=[]
for a,b in zip(test_dropped_table_bboxes,standard_dropped_table_bboxes):
test_page_bbox=[]
standard_page_bbox=[]
if len(a)==0 and len(b)==0:
pass
else:
for i in b:
if len(i)!=4:
continue
else:
judge=0
standard_page_bbox.append(1)
for j in a:
if bbox_offset(i,j):
judge=1
test_page_bbox.append(1)
break
if judge==0:
test_page_bbox.append(0)
diff_num=len(a)+test_page_bbox.count(0)-len(b)
if diff_num>0:#有多删的情况出现
test_page_bbox.extend([1]*diff_num)
standard_page_bbox.extend([0]*diff_num)
test_table_bbox.extend(test_page_bbox)
standard_table_bbox.extend(standard_page_bbox)
table_block_report = {}
table_block_report['accuracy']=metrics.accuracy_score(standard_table_bbox,test_table_bbox)
table_block_report['precision']=metrics.precision_score(standard_table_bbox,test_table_bbox)
table_block_report['recall']=metrics.recall_score(standard_table_bbox,test_table_bbox)
table_block_report['f1_score']=metrics.f1_score(standard_table_bbox,test_table_bbox)
table_block_report=bbox_match_indicator(test_dropped_table_bboxes,standard_dropped_table_bboxes)
'''阅读顺序编辑距离的均值'''
@@ -392,6 +338,8 @@ def indicator_cal(json_standard,json_test):
output['行间公式平均编辑距离']=[interline_equations_edit]
output['行内公式平均bleu']=[inline_equations_bleu]
output['行间公式平均bleu']=[interline_equations_bleu]
output['行内公式识别相关指标']=[inline_equations_bbox_report]
output['行间公式识别相关指标']=[interline_equations_bbox_report]
output['阅读顺序平均编辑距离']=[preproc_num_edit]
output['分段准确率']=[acc_para]
output['删除的text block的相关指标']=[text_block_report]
@@ -434,6 +382,52 @@ def bbox_offset(b_t,b_s):
return True
else:
return False
'''bbox匹配和对齐函数输出相关指标'''
'''输入的是以page为单位的bbox列表'''
def bbox_match_indicator(test_bbox_list,standard_bbox_list):
test_bbox=[]
standard_bbox=[]
for a,b in zip(test_bbox_list,standard_bbox_list):
test_page_bbox=[]
standard_page_bbox=[]
if len(a)==0 and len(b)==0:
pass
else:
for i in b:
if len(i)!=4:
continue
else:
judge=0
standard_page_bbox.append(1)
for j in a:
if bbox_offset(i,j):
judge=1
test_page_bbox.append(1)
break
if judge==0:
test_page_bbox.append(0)
diff_num=len(a)+test_page_bbox.count(0)-len(b)
if diff_num>0:#有多删的情况出现
test_page_bbox.extend([1]*diff_num)
standard_page_bbox.extend([0]*diff_num)
test_bbox.extend(test_page_bbox)
standard_bbox.extend(standard_page_bbox)
block_report = {}
block_report['accuracy']=metrics.accuracy_score(standard_bbox,test_bbox)
block_report['precision']=metrics.precision_score(standard_bbox,test_bbox)
block_report['recall']=metrics.recall_score(standard_bbox,test_bbox)
block_report['f1_score']=metrics.f1_score(standard_bbox,test_bbox)
return block_report