mirror of
https://github.com/opendatalab/MinerU.git
synced 2026-03-29 12:08:34 +07:00
145 lines
4.0 KiB
Python
145 lines
4.0 KiB
Python
"""用户输入: model数组,每个元素代表一个页面 pdf在s3的路径 截图保存的s3位置.
|
||
|
||
然后:
|
||
1)根据s3路径,调用spark集群的api,拿到ak,sk,endpoint,构造出s3PDFReader
|
||
2)根据用户输入的s3地址,调用spark集群的api,拿到ak,sk,endpoint,构造出s3ImageWriter
|
||
|
||
其余部分至于构造s3cli, 获取ak,sk都在code-clean里写代码完成。不要反向依赖!!!
|
||
"""
|
||
|
||
from loguru import logger
|
||
|
||
from magic_pdf.data.data_reader_writer import DataWriter
|
||
from magic_pdf.data.dataset import Dataset
|
||
from magic_pdf.libs.version import __version__
|
||
from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
|
||
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
|
||
from magic_pdf.pdf_parse_by_txt import parse_pdf_by_txt
|
||
from magic_pdf.config.constants import PARSE_TYPE_TXT, PARSE_TYPE_OCR
|
||
|
||
|
||
def parse_txt_pdf(
|
||
dataset: Dataset,
|
||
model_list: list,
|
||
imageWriter: DataWriter,
|
||
is_debug=False,
|
||
start_page_id=0,
|
||
end_page_id=None,
|
||
lang=None,
|
||
*args,
|
||
**kwargs
|
||
):
|
||
"""解析文本类pdf."""
|
||
pdf_info_dict = parse_pdf_by_txt(
|
||
dataset,
|
||
model_list,
|
||
imageWriter,
|
||
start_page_id=start_page_id,
|
||
end_page_id=end_page_id,
|
||
debug_mode=is_debug,
|
||
lang=lang,
|
||
)
|
||
|
||
pdf_info_dict['_parse_type'] = PARSE_TYPE_TXT
|
||
|
||
pdf_info_dict['_version_name'] = __version__
|
||
|
||
if lang is not None:
|
||
pdf_info_dict['_lang'] = lang
|
||
|
||
return pdf_info_dict
|
||
|
||
|
||
def parse_ocr_pdf(
|
||
dataset: Dataset,
|
||
model_list: list,
|
||
imageWriter: DataWriter,
|
||
is_debug=False,
|
||
start_page_id=0,
|
||
end_page_id=None,
|
||
lang=None,
|
||
*args,
|
||
**kwargs
|
||
):
|
||
"""解析ocr类pdf."""
|
||
pdf_info_dict = parse_pdf_by_ocr(
|
||
dataset,
|
||
model_list,
|
||
imageWriter,
|
||
start_page_id=start_page_id,
|
||
end_page_id=end_page_id,
|
||
debug_mode=is_debug,
|
||
lang=lang,
|
||
)
|
||
|
||
pdf_info_dict['_parse_type'] = PARSE_TYPE_OCR
|
||
|
||
pdf_info_dict['_version_name'] = __version__
|
||
|
||
if lang is not None:
|
||
pdf_info_dict['_lang'] = lang
|
||
|
||
return pdf_info_dict
|
||
|
||
|
||
def parse_union_pdf(
|
||
dataset: Dataset,
|
||
model_list: list,
|
||
imageWriter: DataWriter,
|
||
is_debug=False,
|
||
start_page_id=0,
|
||
end_page_id=None,
|
||
lang=None,
|
||
*args,
|
||
**kwargs
|
||
):
|
||
"""ocr和文本混合的pdf,全部解析出来."""
|
||
|
||
def parse_pdf(method):
|
||
try:
|
||
return method(
|
||
dataset,
|
||
model_list,
|
||
imageWriter,
|
||
start_page_id=start_page_id,
|
||
end_page_id=end_page_id,
|
||
debug_mode=is_debug,
|
||
lang=lang,
|
||
)
|
||
except Exception as e:
|
||
logger.exception(e)
|
||
return None
|
||
|
||
pdf_info_dict = parse_pdf(parse_pdf_by_txt)
|
||
if pdf_info_dict is None or pdf_info_dict.get('_need_drop', False):
|
||
logger.warning('parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr')
|
||
if len(model_list) == 0:
|
||
layout_model = kwargs.get('layout_model', None)
|
||
formula_enable = kwargs.get('formula_enable', None)
|
||
table_enable = kwargs.get('table_enable', None)
|
||
infer_res = doc_analyze(
|
||
dataset,
|
||
ocr=True,
|
||
start_page_id=start_page_id,
|
||
end_page_id=end_page_id,
|
||
lang=lang,
|
||
layout_model=layout_model,
|
||
formula_enable=formula_enable,
|
||
table_enable=table_enable,
|
||
)
|
||
model_list = infer_res.get_infer_res()
|
||
pdf_info_dict = parse_pdf(parse_pdf_by_ocr)
|
||
if pdf_info_dict is None:
|
||
raise Exception('Both parse_pdf_by_txt and parse_pdf_by_ocr failed.')
|
||
else:
|
||
pdf_info_dict['_parse_type'] = PARSE_TYPE_OCR
|
||
else:
|
||
pdf_info_dict['_parse_type'] = PARSE_TYPE_TXT
|
||
|
||
pdf_info_dict['_version_name'] = __version__
|
||
|
||
if lang is not None:
|
||
pdf_info_dict['_lang'] = lang
|
||
|
||
return pdf_info_dict
|