mirror of
https://github.com/opendatalab/MinerU.git
synced 2026-03-27 02:58:54 +07:00
277 lines
13 KiB
Python
277 lines
13 KiB
Python
# Copyright (c) Opendatalab. All rights reserved.
|
||
import copy
|
||
import json
|
||
import os
|
||
from pathlib import Path
|
||
|
||
from loguru import logger
|
||
|
||
from mineru.cli.common import convert_pdf_bytes_to_bytes_by_pypdfium2, prepare_env, read_fn
|
||
from mineru.data.data_reader_writer import FileBasedDataWriter
|
||
from mineru.utils.draw_bbox import draw_layout_bbox, draw_span_bbox
|
||
from mineru.utils.engine_utils import get_vlm_engine
|
||
from mineru.utils.enum_class import MakeMode
|
||
from mineru.backend.vlm.vlm_analyze import doc_analyze as vlm_doc_analyze
|
||
from mineru.backend.pipeline.pipeline_analyze import doc_analyze as pipeline_doc_analyze
|
||
from mineru.backend.pipeline.pipeline_middle_json_mkcontent import union_make as pipeline_union_make
|
||
from mineru.backend.pipeline.model_json_to_middle_json import result_to_middle_json as pipeline_result_to_middle_json
|
||
from mineru.backend.vlm.vlm_middle_json_mkcontent import union_make as vlm_union_make
|
||
from mineru.backend.hybrid.hybrid_analyze import doc_analyze as hybrid_doc_analyze
|
||
from mineru.utils.guess_suffix_or_lang import guess_suffix_by_path
|
||
|
||
|
||
def do_parse(
|
||
output_dir, # Output directory for storing parsing results
|
||
pdf_file_names: list[str], # List of PDF file names to be parsed
|
||
pdf_bytes_list: list[bytes], # List of PDF bytes to be parsed
|
||
p_lang_list: list[str], # List of languages for each PDF, default is 'ch' (Chinese)
|
||
backend="hybrid-auto-engine", # The backend for parsing PDF, default is 'hybrid-auto-engine'
|
||
parse_method="auto", # The method for parsing PDF, default is 'auto'
|
||
formula_enable=True, # Enable formula parsing
|
||
table_enable=True, # Enable table parsing
|
||
server_url=None, # Server URL for vlm-http-client backend
|
||
f_draw_layout_bbox=True, # Whether to draw layout bounding boxes
|
||
f_draw_span_bbox=True, # Whether to draw span bounding boxes
|
||
f_dump_md=True, # Whether to dump markdown files
|
||
f_dump_middle_json=True, # Whether to dump middle JSON files
|
||
f_dump_model_output=True, # Whether to dump model output files
|
||
f_dump_orig_pdf=True, # Whether to dump original PDF files
|
||
f_dump_content_list=True, # Whether to dump content list files
|
||
f_make_md_mode=MakeMode.MM_MD, # The mode for making markdown content, default is MM_MD
|
||
start_page_id=0, # Start page ID for parsing, default is 0
|
||
end_page_id=None, # End page ID for parsing, default is None (parse all pages until the end of the document)
|
||
):
|
||
|
||
if backend == "pipeline":
|
||
for idx, pdf_bytes in enumerate(pdf_bytes_list):
|
||
new_pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, start_page_id, end_page_id)
|
||
pdf_bytes_list[idx] = new_pdf_bytes
|
||
|
||
infer_results, all_image_lists, all_pdf_docs, lang_list, ocr_enabled_list = pipeline_doc_analyze(pdf_bytes_list, p_lang_list, parse_method=parse_method, formula_enable=formula_enable,table_enable=table_enable)
|
||
|
||
for idx, model_list in enumerate(infer_results):
|
||
model_json = copy.deepcopy(model_list)
|
||
pdf_file_name = pdf_file_names[idx]
|
||
local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
|
||
image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(local_md_dir)
|
||
|
||
images_list = all_image_lists[idx]
|
||
pdf_doc = all_pdf_docs[idx]
|
||
_lang = lang_list[idx]
|
||
_ocr_enable = ocr_enabled_list[idx]
|
||
middle_json = pipeline_result_to_middle_json(model_list, images_list, pdf_doc, image_writer, _lang, _ocr_enable, formula_enable)
|
||
|
||
pdf_info = middle_json["pdf_info"]
|
||
|
||
pdf_bytes = pdf_bytes_list[idx]
|
||
_process_output(
|
||
pdf_info, pdf_bytes, pdf_file_name, local_md_dir, local_image_dir,
|
||
md_writer, f_draw_layout_bbox, f_draw_span_bbox, f_dump_orig_pdf,
|
||
f_dump_md, f_dump_content_list, f_dump_middle_json, f_dump_model_output,
|
||
f_make_md_mode, middle_json, model_json, is_pipeline=True
|
||
)
|
||
else:
|
||
f_draw_span_bbox = False
|
||
|
||
if backend.startswith("vlm-"):
|
||
backend = backend[4:]
|
||
|
||
if backend == "auto-engine":
|
||
backend = get_vlm_engine(inference_engine='auto', is_async=False)
|
||
|
||
parse_method = "vlm"
|
||
for idx, pdf_bytes in enumerate(pdf_bytes_list):
|
||
pdf_file_name = pdf_file_names[idx]
|
||
pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, start_page_id, end_page_id)
|
||
local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
|
||
image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(local_md_dir)
|
||
middle_json, infer_result = vlm_doc_analyze(pdf_bytes, image_writer=image_writer, backend=backend, server_url=server_url)
|
||
|
||
pdf_info = middle_json["pdf_info"]
|
||
|
||
_process_output(
|
||
pdf_info, pdf_bytes, pdf_file_name, local_md_dir, local_image_dir,
|
||
md_writer, f_draw_layout_bbox, f_draw_span_bbox, f_dump_orig_pdf,
|
||
f_dump_md, f_dump_content_list, f_dump_middle_json, f_dump_model_output,
|
||
f_make_md_mode, middle_json, infer_result, is_pipeline=False
|
||
)
|
||
elif backend.startswith("hybrid-"):
|
||
backend = backend[7:]
|
||
|
||
if backend == "auto-engine":
|
||
backend = get_vlm_engine(inference_engine='auto', is_async=False)
|
||
|
||
parse_method = f"hybrid_{parse_method}"
|
||
for idx, pdf_bytes in enumerate(pdf_bytes_list):
|
||
pdf_file_name = pdf_file_names[idx]
|
||
pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, start_page_id, end_page_id)
|
||
local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
|
||
image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(local_md_dir)
|
||
middle_json, infer_result, _vlm_ocr_enable = hybrid_doc_analyze(
|
||
pdf_bytes,
|
||
image_writer=image_writer,
|
||
backend=backend,
|
||
parse_method=parse_method,
|
||
language=p_lang_list[idx],
|
||
inline_formula_enable=formula_enable,
|
||
server_url=server_url,
|
||
)
|
||
|
||
pdf_info = middle_json["pdf_info"]
|
||
|
||
_process_output(
|
||
pdf_info, pdf_bytes, pdf_file_name, local_md_dir, local_image_dir,
|
||
md_writer, f_draw_layout_bbox, f_draw_span_bbox, f_dump_orig_pdf,
|
||
f_dump_md, f_dump_content_list, f_dump_middle_json, f_dump_model_output,
|
||
f_make_md_mode, middle_json, infer_result, is_pipeline=False
|
||
)
|
||
|
||
def _process_output(
|
||
pdf_info,
|
||
pdf_bytes,
|
||
pdf_file_name,
|
||
local_md_dir,
|
||
local_image_dir,
|
||
md_writer,
|
||
f_draw_layout_bbox,
|
||
f_draw_span_bbox,
|
||
f_dump_orig_pdf,
|
||
f_dump_md,
|
||
f_dump_content_list,
|
||
f_dump_middle_json,
|
||
f_dump_model_output,
|
||
f_make_md_mode,
|
||
middle_json,
|
||
model_output=None,
|
||
is_pipeline=True
|
||
):
|
||
"""处理输出文件"""
|
||
if f_draw_layout_bbox:
|
||
draw_layout_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_layout.pdf")
|
||
|
||
if f_draw_span_bbox:
|
||
draw_span_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_span.pdf")
|
||
|
||
if f_dump_orig_pdf:
|
||
md_writer.write(
|
||
f"{pdf_file_name}_origin.pdf",
|
||
pdf_bytes,
|
||
)
|
||
|
||
image_dir = str(os.path.basename(local_image_dir))
|
||
|
||
if f_dump_md:
|
||
make_func = pipeline_union_make if is_pipeline else vlm_union_make
|
||
md_content_str = make_func(pdf_info, f_make_md_mode, image_dir)
|
||
md_writer.write_string(
|
||
f"{pdf_file_name}.md",
|
||
md_content_str,
|
||
)
|
||
|
||
if f_dump_content_list:
|
||
make_func = pipeline_union_make if is_pipeline else vlm_union_make
|
||
content_list = make_func(pdf_info, MakeMode.CONTENT_LIST, image_dir)
|
||
md_writer.write_string(
|
||
f"{pdf_file_name}_content_list.json",
|
||
json.dumps(content_list, ensure_ascii=False, indent=4),
|
||
)
|
||
|
||
if f_dump_middle_json:
|
||
md_writer.write_string(
|
||
f"{pdf_file_name}_middle.json",
|
||
json.dumps(middle_json, ensure_ascii=False, indent=4),
|
||
)
|
||
|
||
if f_dump_model_output:
|
||
md_writer.write_string(
|
||
f"{pdf_file_name}_model.json",
|
||
json.dumps(model_output, ensure_ascii=False, indent=4),
|
||
)
|
||
|
||
logger.info(f"local output dir is {local_md_dir}")
|
||
|
||
|
||
def parse_doc(
|
||
path_list: list[Path],
|
||
output_dir,
|
||
lang="ch",
|
||
backend="hybrid-auto-engine",
|
||
method="auto",
|
||
server_url=None,
|
||
start_page_id=0,
|
||
end_page_id=None
|
||
):
|
||
"""
|
||
Parameter description:
|
||
path_list: List of document paths to be parsed, can be PDF or image files.
|
||
output_dir: Output directory for storing parsing results.
|
||
lang: Language option, default is 'ch', optional values include['ch', 'ch_server', 'ch_lite', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka', 'th', 'el',
|
||
'latin', 'arabic', 'east_slavic', 'cyrillic', 'devanagari']。
|
||
Input the languages in the pdf (if known) to improve OCR accuracy. Optional.
|
||
Adapted only for the case where the backend is set to 'pipeline' and 'hybrid-*'
|
||
backend: the backend for parsing pdf:
|
||
pipeline: More general.
|
||
vlm-auto-engine: High accuracy via local computing power.
|
||
vlm-http-client: High accuracy via remote computing power(client suitable for openai-compatible servers).
|
||
hybrid-auto-engine: Next-generation high accuracy solution via local computing power.
|
||
hybrid-http-client: High accuracy but requires a little local computing power(client suitable for openai-compatible servers).
|
||
Without method specified, hybrid-auto-engine will be used by default.
|
||
method: the method for parsing pdf:
|
||
auto: Automatically determine the method based on the file type.
|
||
txt: Use text extraction method.
|
||
ocr: Use OCR method for image-based PDFs.
|
||
Without method specified, 'auto' will be used by default.
|
||
Adapted only for the case where the backend is set to 'pipeline' and 'hybrid-*'.
|
||
server_url: When the backend is `http-client`, you need to specify the server_url, for example:`http://127.0.0.1:30000`
|
||
start_page_id: Start page ID for parsing, default is 0
|
||
end_page_id: End page ID for parsing, default is None (parse all pages until the end of the document)
|
||
"""
|
||
try:
|
||
file_name_list = []
|
||
pdf_bytes_list = []
|
||
lang_list = []
|
||
for path in path_list:
|
||
file_name = str(Path(path).stem)
|
||
pdf_bytes = read_fn(path)
|
||
file_name_list.append(file_name)
|
||
pdf_bytes_list.append(pdf_bytes)
|
||
lang_list.append(lang)
|
||
do_parse(
|
||
output_dir=output_dir,
|
||
pdf_file_names=file_name_list,
|
||
pdf_bytes_list=pdf_bytes_list,
|
||
p_lang_list=lang_list,
|
||
backend=backend,
|
||
parse_method=method,
|
||
server_url=server_url,
|
||
start_page_id=start_page_id,
|
||
end_page_id=end_page_id
|
||
)
|
||
except Exception as e:
|
||
logger.exception(e)
|
||
|
||
|
||
if __name__ == '__main__':
|
||
# args
|
||
__dir__ = os.path.dirname(os.path.abspath(__file__))
|
||
pdf_files_dir = os.path.join(__dir__, "pdfs")
|
||
output_dir = os.path.join(__dir__, "output")
|
||
pdf_suffixes = ["pdf"]
|
||
image_suffixes = ["png", "jpeg", "jp2", "webp", "gif", "bmp", "jpg"]
|
||
|
||
doc_path_list = []
|
||
for doc_path in Path(pdf_files_dir).glob('*'):
|
||
if guess_suffix_by_path(doc_path) in pdf_suffixes + image_suffixes:
|
||
doc_path_list.append(doc_path)
|
||
|
||
"""如果您由于网络问题无法下载模型,可以设置环境变量MINERU_MODEL_SOURCE为modelscope使用免代理仓库下载模型"""
|
||
# os.environ['MINERU_MODEL_SOURCE'] = "modelscope"
|
||
|
||
"""Use hybrid mode and local computing power to parse documents"""
|
||
parse_doc(doc_path_list, output_dir, backend="hybrid-auto-engine")
|
||
|
||
"""Other backends for parsing documents, you can uncomment and try"""
|
||
# parse_doc(doc_path_list, output_dir, backend="pipeline") # more general.
|
||
# parse_doc(doc_path_list, output_dir, backend="vlm-auto-engine") # high accuracy via local computing power.
|
||
# parse_doc(doc_path_list, output_dir, backend="vlm-http-client", server_url="http://127.0.0.1:30000") # high accuracy via remote computing power(client suitable for openai-compatible servers).
|
||
# parse_doc(doc_path_list, output_dir, backend="hybrid-http-client", server_url="http://127.0.0.1:30000") # high accuracy but requires a little local computing power(client suitable for openai-compatible servers). |