Compare commits

..

87 Commits

Author SHA1 Message Date
Xiaomeng Zhao
61248e2ec9 Merge pull request #4662 from Niujunbo2002/master
docs: add MinerU-Diffusion reference to README
2026-03-26 14:20:39 +08:00
Niujunbo2002
c717a1c83a docs: add MinerU-Diffusion reference to README 2026-03-26 11:15:48 +08:00
Niujunbo2002
daf970af0e docs: update citation entries in README files 2026-03-22 23:59:15 +08:00
Xiaomeng Zhao
077b3101b3 Update base image in mlu.Dockerfile 2026-03-02 17:23:44 +08:00
Xiaomeng Zhao
a12610fb3e Merge pull request #4526 from myhloli/dev
Dev
2026-02-09 17:44:40 +08:00
myhloli
53aad4c900 fix: improve formatting of VastAI reference in index.md 2026-02-09 17:41:50 +08:00
myhloli
345c46a457 fix: update documentation to include Biren platform details 2026-02-09 17:38:15 +08:00
Xiaomeng Zhao
e460f33c95 Merge pull request #4523 from boshi91/dev
feat: add Biren platform documentation for vLLM support
2026-02-09 16:14:06 +08:00
boshi91
e9091876b6 feat: add Biren platform documentation for vLLM support
Signed-off-by: boshi91 <boshi91@163.com>
2026-02-09 16:04:19 +08:00
Xiaomeng Zhao
c68dc3682a Merge pull request #4518 from myhloli/dev
Dev
2026-02-09 10:51:03 +08:00
myhloli
40796b9a7e Merge remote-tracking branch 'origin/dev' into dev 2026-02-09 10:50:23 +08:00
myhloli
31122e655b fix: update index.md to improve AMD reference formatting 2026-02-09 10:50:07 +08:00
Xiaomeng Zhao
3eef5157f8 Merge pull request #4513 from opendatalab/master
master->dev
2026-02-06 19:19:07 +08:00
myhloli
5cc95f3760 Update version.py with new version 2026-02-06 03:35:08 +00:00
Xiaomeng Zhao
e31c0ec34d Merge pull request #4508 from opendatalab/release-2.7.6
Release 2.7.6
2026-02-06 11:32:49 +08:00
Xiaomeng Zhao
3e51cb4e81 Merge pull request #4507 from myhloli/dev
fix: update README and index to reflect support for Kunlunxin and Tec…
2026-02-06 11:28:40 +08:00
myhloli
bc63b17ae4 fix: update README and index to reflect support for Kunlunxin and Tecorigin platforms 2026-02-06 10:59:38 +08:00
Xiaomeng Zhao
7f986fc1e3 Merge pull request #4505 from myhloli/dev
Dev
2026-02-06 01:10:28 +08:00
myhloli
5fb8d50b70 fix: update Tecorigin.md to reflect correct CPU and GPU support information 2026-02-05 20:46:59 +08:00
myhloli
3ce9500894 fix: update index.md to include Kunlunxin and reorder Tecorigin reference 2026-02-05 20:40:49 +08:00
Xiaomeng Zhao
142dc30a03 Merge pull request #4503 from myhloli/dev
Dev
2026-02-05 20:19:54 +08:00
myhloli
5e3db4a472 fix: update MinerU support references for Kunlunxin acceleration cards in documentation 2026-02-05 19:43:48 +08:00
myhloli
90b77a2809 feat: add chunked prefill and prefix caching options to utils.py 2026-02-05 18:10:25 +08:00
myhloli
948161c527 fix: remove outdated tips regarding MinerU support for Cambricon acceleration cards in Kunlunxin.md 2026-02-05 16:58:39 +08:00
Xiaomeng Zhao
5397c74a34 Merge pull request #4500 from myhloli/dev
Dev
2026-02-05 15:46:32 +08:00
myhloli
97450688d6 fix: update status indicators in documentation and improve config handling in utils.py 2026-02-05 15:09:24 +08:00
myhloli
6e7c6b082d feat: add interline region filtering option to batch_predict method 2026-02-05 14:51:05 +08:00
myhloli
6f281be4ff fix: remove outdated notes and unnecessary lines in Tecorigin.md 2026-02-05 14:40:44 +08:00
Xiaomeng Zhao
880cdd02b2 Merge branch 'opendatalab:dev' into dev 2026-02-05 14:30:19 +08:00
myhloli
73b31d1118 feat: add Kunlunxin platform documentation and Dockerfile for vLLM support 2026-02-05 14:25:43 +08:00
Xiaomeng Zhao
74ec4894e0 Merge pull request #4498 from Arrmsgt/master
fix: update TECOT100 accelerator card support and documentation
2026-02-05 14:24:26 +08:00
Arrmsgt
c1022fc3e2 update Tecorigin.md 2026-02-05 13:39:38 +08:00
Arrmsgt
6270b05d3a update Tecorigin.md 2026-02-05 13:39:13 +08:00
Xiaomeng Zhao
bbd214dbc3 Merge pull request #4475 from opendatalab/master
master->dev
2026-02-02 20:08:32 +08:00
myhloli
5fa66202a7 Update version.py with new version 2026-02-02 11:56:17 +00:00
Xiaomeng Zhao
4dc45f6621 Merge pull request #4474 from opendatalab/dev
Dev
2026-02-02 19:54:52 +08:00
Xiaomeng Zhao
65b3204d5a Merge pull request #4473 from myhloli/dev
Dev
2026-02-02 19:53:45 +08:00
myhloli
636bd89b38 fix: remove unnecessary blank line in os_env_config.py 2026-02-02 19:49:30 +08:00
myhloli
586a4fb06b feat: enhance PDF rendering options with thread count and timeout details 2026-02-02 19:30:03 +08:00
myhloli
951ebd8c04 feat: add support for configurable thread count in PDF rendering 2026-02-02 19:21:09 +08:00
Xiaomeng Zhao
30758634e3 Merge pull request #4471 from myhloli/dev
fix: update accelerator card usage instructions across multiple docum…
2026-02-02 15:21:42 +08:00
myhloli
aa960b105a fix: update accelerator card usage instructions across multiple documentation files 2026-02-02 15:17:59 +08:00
Xiaomeng Zhao
eba787c22b Merge pull request #4459 from opendatalab/dev
Dev
2026-01-30 23:36:54 +08:00
Xiaomeng Zhao
0a288743ba Merge pull request #4458 from myhloli/dev
Fix duplicate instruction in Cambricon.md
2026-01-30 23:35:01 +08:00
Xiaomeng Zhao
538280f589 Fix duplicate instruction in Cambricon.md
Removed duplicate instruction about entering the vllm virtual environment.
2026-01-30 23:32:43 +08:00
Xiaomeng Zhao
0af0080c85 Merge pull request #4457 from myhloli/dev
Dev
2026-01-30 23:21:15 +08:00
Xiaomeng Zhao
25058ea982 更新 IluvatarCorex.md 2026-01-30 23:17:58 +08:00
Xiaomeng Zhao
41f0e3e26d 更新 Cambricon.md 2026-01-30 23:17:38 +08:00
Xiaomeng Zhao
0624f7eb5b Merge pull request #4456 from opendatalab/master
master->dev
2026-01-30 21:56:20 +08:00
Xiaomeng Zhao
4fef9e863c Merge pull request #4455 from myhloli/dev
fix: update Cambricon documentation to correct accelerator card refer…
2026-01-30 21:55:43 +08:00
myhloli
97d1a9b1ed fix: update Cambricon documentation to correct accelerator card reference 2026-01-30 21:54:56 +08:00
Xiaomeng Zhao
d17a5ff7f2 Merge pull request #4454 from myhloli/dev
Dev
2026-01-30 21:48:06 +08:00
myhloli
47c207a906 Update version.py with new version 2026-01-30 13:45:23 +00:00
myhloli
a91c35137a fix: correct formatting in Cambricon documentation for clarity 2026-01-30 21:44:27 +08:00
Xiaomeng Zhao
c2c998ae11 Merge pull request #4453 from opendatalab/release-2.7.4
Release 2.7.4
2026-01-30 21:43:18 +08:00
Xiaomeng Zhao
b2aa762b7f Merge pull request #4452 from myhloli/dev
fix: correct release date for 2.7.4 in changelog
2026-01-30 21:42:51 +08:00
myhloli
60ba38e7b9 fix: correct release date for 2.7.4 in changelog 2026-01-30 21:41:48 +08:00
Xiaomeng Zhao
3fff71b76a Merge pull request #4451 from opendatalab/release-2.7.4
Release 2.7.4
2026-01-30 21:38:38 +08:00
Xiaomeng Zhao
fe87f871ef Merge pull request #4450 from myhloli/dev
Dev
2026-01-30 21:37:06 +08:00
myhloli
28433751fe fix: update mineru package version in mlu.Dockerfile to 2.7.4 2026-01-30 21:28:15 +08:00
myhloli
aabed4edc6 feat: add MLU support for compute capability detection in utils.py 2026-01-30 21:27:07 +08:00
myhloli
6d86ce8fcb feat: update changelog to include support for IluvatarCorex and Cambricon platforms 2026-01-30 21:23:50 +08:00
myhloli
a066861c4c fix: update Cambricon documentation for clarity and improve Dockerfile comments 2026-01-30 21:18:37 +08:00
myhloli
2037a59e3e feat: enhance Dockerfile and documentation for vllm and lmdeploy support 2026-01-30 20:42:55 +08:00
myhloli
d02e5b7be4 feat: add MLU support and update documentation for Cambricon integration 2026-01-30 15:19:58 +08:00
myhloli
3e5fa8770f feat: add MLU support and update documentation for Cambricon integration 2026-01-30 15:10:14 +08:00
myhloli
0d6211aa52 fix: add tip regarding vllm memory release issue in Iluvatar documentation 2026-01-30 10:43:41 +08:00
myhloli
eb31d307ae fix: update compilation configuration for corex device type in vlm_analyze.py 2026-01-30 03:08:49 +08:00
myhloli
6d2fe791a5 fix: update compilation configuration for corex device type in vlm_analyze.py 2026-01-30 02:53:03 +08:00
myhloli
b9d2b3de09 fix: update vllm engine configuration for corex device type in vlm_analyze.py 2026-01-30 02:50:41 +08:00
myhloli
56fca04b22 fix: update base image in corex Dockerfile to use the latest version 2026-01-30 02:07:41 +08:00
myhloli
92af1c405d fix: update MinerU support details for Iluvatar accelerator in documentation 2026-01-29 22:06:04 +08:00
myhloli
bdbee2b3ba feat: add Dockerfile for corex environment setup and update vllm server configurations 2026-01-29 22:05:03 +08:00
Xiaomeng Zhao
dc572f4c30 Merge pull request #4438 from myhloli/dev
fix: clarify usage instructions for 310p accelerator in Ascend.md
2026-01-29 16:52:51 +08:00
myhloli
e47dfd9b55 fix: clarify usage instructions for 310p accelerator in Ascend.md 2026-01-29 16:52:11 +08:00
Xiaomeng Zhao
b6f792ec2c Merge pull request #4435 from guguducken/add-clean-task
add background task for clean temp file in api
2026-01-29 16:42:18 +08:00
Xiaomeng Zhao
c77edb27bc Refactor fast_api.py for logging and concurrency 2026-01-29 16:39:50 +08:00
guguducken
df66af3f97 fix background_tasks arg location and move import 2026-01-29 16:37:45 +08:00
Xiaomeng Zhao
7f1639a29c Update mineru/cli/fast_api.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-29 16:21:10 +08:00
Xiaomeng Zhao
e5c4881a50 Update mineru/cli/fast_api.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-29 16:21:00 +08:00
Xiaomeng Zhao
4b88f2a25e Update mineru/cli/fast_api.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-29 16:20:13 +08:00
Xiaomeng Zhao
2697647837 Update mineru/cli/fast_api.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-29 16:19:40 +08:00
guguducken
c8c86184cd add background task for clean temp file in api 2026-01-29 15:24:54 +08:00
Xiaomeng Zhao
68ba0a124d Merge pull request #4434 from opendatalab/copilot/analyze-issue-4433 2026-01-29 12:32:57 +08:00
Xiaomeng Zhao
54c930cb24 Merge pull request #4421 from pgoslatara/actup/update-actions-1769428557
chore: Update outdated GitHub Actions versions
2026-01-27 15:42:13 +08:00
Padraic Slattery
2fed8f4f8f chore: Update outdated GitHub Actions versions 2026-01-26 12:55:57 +01:00
Xiaomeng Zhao
c2b3a3a5b2 Merge pull request #4420 from opendatalab/master
master->dev
2026-01-26 19:42:36 +08:00
37 changed files with 1255 additions and 527 deletions

View File

@@ -20,13 +20,13 @@ jobs:
steps:
- name: PDF cli
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
ref: dev
fetch-depth: 2
- name: install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@v7
- name: install&test
run: |

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout master
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
ref: dev
- name: Deploy docs

View File

@@ -16,13 +16,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
ref: master
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.10"
@@ -64,7 +64,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
ref: master
fetch-depth: 0
@@ -75,7 +75,7 @@ jobs:
cat mineru/version.py
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
@@ -95,7 +95,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
ref: master
fetch-depth: 0
@@ -110,7 +110,7 @@ jobs:
python -m build --wheel
- name: Upload artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: wheel-file
path: dist/*.whl
@@ -121,10 +121,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Download artifact
uses: actions/download-artifact@v4
uses: actions/download-artifact@v7
with:
name: wheel-file
path: dist

View File

@@ -45,16 +45,27 @@
# Changelog
- 2026/01/23 2.7.2 Release
- Added support for domestic computing platforms Hygon, Enflame, and Moore Threads. Currently, the officially supported domestic computing platforms include:
- [Ascend](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Ascend/)
- [T-Head](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/THead/)
- [METAX](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/METAX/)
- 2026/02/06 2.7.6 Release
- Added support for the domestic computing platforms Kunlunxin and Tecorigin; currently, the domestic computing platforms that have been adapted and supported by the official team and vendors include:
- [Ascend](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Ascend)
- [T-Head](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/THead)
- [METAX](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/METAX)
- [Hygon](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Hygon/)
- [Enflame](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Enflame/)
- [MooreThreads](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/MooreThreads/)
- MinerU continues to ensure compatibility with domestic hardware platforms, supporting mainstream chip architectures. With secure and reliable technology, we empower researchers, government, and enterprises to reach new heights in document digitization!
- Cross-page table merging optimization, improving merge success rate and merge quality
- [IluvatarCorex](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/IluvatarCorex/)
- [Cambricon](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Cambricon/)
- [Kunlunxin](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Kunlunxin/)
- [Tecorigin](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Tecorigin/)
- [Biren](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Biren/)
- MinerU continues to support domestic hardware platforms and mainstream chip architectures. With secure and reliable technology, it helps research, government, and enterprise users reach new heights in document digitization!
- 2026/01/30 2.7.4 Release
- Added support for domestic computing platforms IluvatarCorex and Cambricon.
- 2026/01/23 2.7.2 Release
- Added support for domestic computing platforms Hygon, Enflame, and Moore Threads.
- Cross-page table merging optimization, improving merge success rate and merge quality.
- 2026/01/06 2.7.1 Release
- fix bug: #4300
@@ -308,24 +319,25 @@ Currently, some models in this project are trained based on YOLO. However, since
# Citation
```bibtex
@misc{niu2025mineru25decoupledvisionlanguagemodel,
title={MinerU2.5: A Decoupled Vision-Language Model for Efficient High-Resolution Document Parsing},
author={Junbo Niu and Zheng Liu and Zhuangcheng Gu and Bin Wang and Linke Ouyang and Zhiyuan Zhao and Tao Chu and Tianyao He and Fan Wu and Qintong Zhang and Zhenjiang Jin and Guang Liang and Rui Zhang and Wenzheng Zhang and Yuan Qu and Zhifei Ren and Yuefeng Sun and Yuanhong Zheng and Dongsheng Ma and Zirui Tang and Boyu Niu and Ziyang Miao and Hejun Dong and Siyi Qian and Junyuan Zhang and Jingzhou Chen and Fangdong Wang and Xiaomeng Zhao and Liqun Wei and Wei Li and Shasha Wang and Ruiliang Xu and Yuanyuan Cao and Lu Chen and Qianqian Wu and Huaiyu Gu and Lindong Lu and Keming Wang and Dechen Lin and Guanlin Shen and Xuanhe Zhou and Linfeng Zhang and Yuhang Zang and Xiaoyi Dong and Jiaqi Wang and Bo Zhang and Lei Bai and Pei Chu and Weijia Li and Jiang Wu and Lijun Wu and Zhenxiang Li and Guangyu Wang and Zhongying Tu and Chao Xu and Kai Chen and Yu Qiao and Bowen Zhou and Dahua Lin and Wentao Zhang and Conghui He},
year={2025},
eprint={2509.22186},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2509.22186},
@article{dong2026minerudiffusion,
title={MinerU-Diffusion: Rethinking Document OCR as Inverse Rendering via Diffusion Decoding},
author={Dong, Hejun and Niu, Junbo and Wang, Bin and Zeng, Weijun and Zhang, Wentao and He, Conghui},
journal={arXiv preprint arXiv:2603.22458},
year={2026}
}
@misc{wang2024mineruopensourcesolutionprecise,
title={MinerU: An Open-Source Solution for Precise Document Content Extraction},
author={Bin Wang and Chao Xu and Xiaomeng Zhao and Linke Ouyang and Fan Wu and Zhiyuan Zhao and Rui Xu and Kaiwen Liu and Yuan Qu and Fukai Shang and Bo Zhang and Liqun Wei and Zhihao Sui and Wei Li and Botian Shi and Yu Qiao and Dahua Lin and Conghui He},
year={2024},
eprint={2409.18839},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2409.18839},
@article{niu2025mineru2,
title={Mineru2. 5: A decoupled vision-language model for efficient high-resolution document parsing},
author={Niu, Junbo and Liu, Zheng and Gu, Zhuangcheng and Wang, Bin and Ouyang, Linke and Zhao, Zhiyuan and Chu, Tao and He, Tianyao and Wu, Fan and Zhang, Qintong and others},
journal={arXiv preprint arXiv:2509.22186},
year={2025}
}
@article{wang2024mineru,
title={Mineru: An open-source solution for precise document content extraction},
author={Wang, Bin and Xu, Chao and Zhao, Xiaomeng and Ouyang, Linke and Wu, Fan and Zhao, Zhiyuan and Xu, Rui and Liu, Kaiwen and Qu, Yuan and Shang, Fukai and others},
journal={arXiv preprint arXiv:2409.18839},
year={2024}
}
@article{he2024opendatalab,
@@ -348,6 +360,7 @@ Currently, some models in this project are trained based on YOLO. However, since
# Links
- [MinerU-Diffusion: Rethinking Document OCR as Inverse Rendering via Diffusion Decoding](https://github.com/opendatalab/MinerU-Diffusion)
- [Easy Data Preparation with latest LLMs-based Operators and Pipelines](https://github.com/OpenDCAI/DataFlow)
- [Vis3 (OSS browser based on s3)](https://github.com/opendatalab/Vis3)
- [LabelU (A Lightweight Multi-modal Data Annotation Tool)](https://github.com/opendatalab/labelU)

View File

@@ -45,15 +45,26 @@
# 更新记录
- 2026/01/23 2.7.2 发布
- 新增国产算力平台海光、燧原、摩尔线程的适配支持,目前已由官方适配并支持的国产算力平台包括:
- 2026/02/06 2.7.6 发布
- 新增国产算力平台昆仑芯、太初元碁的适配支持,目前已由官方和厂商适配并支持的国产算力平台包括:
- [昇腾 Ascend](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Ascend)
- [平头哥 T-Head](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/THead)
- [沐曦 METAX](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/METAX)
- [海光 Hygon](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Hygon/)
- [燧原 Enflame](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Enflame/)
- [摩尔线程 MooreThreads](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/MooreThreads/)
- [天数智芯 IluvatarCorex](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/IluvatarCorex/)
- [寒武纪 Cambricon](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Cambricon/)
- [昆仑芯 Kunlunxin](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Kunlunxin/)
- [太初元碁 Tecorigin](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Tecorigin/)
- [壁仞 Biren](https://opendatalab.github.io/MinerU/zh/usage/acceleration_cards/Biren/)
- MinerU 持续兼容国产硬件平台,支持主流芯片架构。以安全可靠的技术,助力科研、政企用户迈向文档数字化新高度!
- 2026/01/30 2.7.4 发布
- 新增国产算力平台天数智芯、寒武纪的适配支持。
- 2026/01/23 2.7.2 发布
- 新增国产算力平台海光、燧原、摩尔线程的适配支持
- 跨页表合并优化,提升合并成功率与合并效果
- 2026/01/06 2.7.1 发布
@@ -315,24 +326,18 @@ mineru -p <input_path> -o <output_path> -b pipeline
# Citation
```bibtex
@misc{niu2025mineru25decoupledvisionlanguagemodel,
title={MinerU2.5: A Decoupled Vision-Language Model for Efficient High-Resolution Document Parsing},
author={Junbo Niu and Zheng Liu and Zhuangcheng Gu and Bin Wang and Linke Ouyang and Zhiyuan Zhao and Tao Chu and Tianyao He and Fan Wu and Qintong Zhang and Zhenjiang Jin and Guang Liang and Rui Zhang and Wenzheng Zhang and Yuan Qu and Zhifei Ren and Yuefeng Sun and Yuanhong Zheng and Dongsheng Ma and Zirui Tang and Boyu Niu and Ziyang Miao and Hejun Dong and Siyi Qian and Junyuan Zhang and Jingzhou Chen and Fangdong Wang and Xiaomeng Zhao and Liqun Wei and Wei Li and Shasha Wang and Ruiliang Xu and Yuanyuan Cao and Lu Chen and Qianqian Wu and Huaiyu Gu and Lindong Lu and Keming Wang and Dechen Lin and Guanlin Shen and Xuanhe Zhou and Linfeng Zhang and Yuhang Zang and Xiaoyi Dong and Jiaqi Wang and Bo Zhang and Lei Bai and Pei Chu and Weijia Li and Jiang Wu and Lijun Wu and Zhenxiang Li and Guangyu Wang and Zhongying Tu and Chao Xu and Kai Chen and Yu Qiao and Bowen Zhou and Dahua Lin and Wentao Zhang and Conghui He},
year={2025},
eprint={2509.22186},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2509.22186},
@article{niu2025mineru2,
title={Mineru2. 5: A decoupled vision-language model for efficient high-resolution document parsing},
author={Niu, Junbo and Liu, Zheng and Gu, Zhuangcheng and Wang, Bin and Ouyang, Linke and Zhao, Zhiyuan and Chu, Tao and He, Tianyao and Wu, Fan and Zhang, Qintong and others},
journal={arXiv preprint arXiv:2509.22186},
year={2025}
}
@misc{wang2024mineruopensourcesolutionprecise,
title={MinerU: An Open-Source Solution for Precise Document Content Extraction},
author={Bin Wang and Chao Xu and Xiaomeng Zhao and Linke Ouyang and Fan Wu and Zhiyuan Zhao and Rui Xu and Kaiwen Liu and Yuan Qu and Fukai Shang and Bo Zhang and Liqun Wei and Zhihao Sui and Wei Li and Botian Shi and Yu Qiao and Dahua Lin and Conghui He},
year={2024},
eprint={2409.18839},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2409.18839},
@article{wang2024mineru,
title={Mineru: An open-source solution for precise document content extraction},
author={Wang, Bin and Xu, Chao and Zhao, Xiaomeng and Ouyang, Linke and Wu, Fan and Zhao, Zhiyuan and Xu, Rui and Liu, Kaiwen and Qu, Yuan and Shang, Fukai and others},
journal={arXiv preprint arXiv:2409.18839},
year={2024}
}
@article{he2024opendatalab,

View File

@@ -0,0 +1,27 @@
# Base image containing the vLLM inference environment, requiring amd64(x86-64) CPU + iluvatar GPU.
FROM crpi-vofi3w62lkohhxsp.cn-shanghai.personal.cr.aliyuncs.com/opendatalab-mineru/corex:4.4.0_torch2.7.1_vllm0.11.2_py3.10
# Install Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \
fonts-noto-core \
fonts-noto-cjk \
fontconfig && \
fc-cache -fv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install mineru latest
RUN python3 -m pip install -U pip -i https://mirrors.aliyun.com/pypi/simple && \
python3 -m pip install 'mineru[core]>=2.7.4' \
numpy==1.26.4 \
opencv-python==4.11.0.86 \
-i https://mirrors.aliyun.com/pypi/simple && \
python3 -m pip cache purge
# Download models and update the configuration file
RUN /bin/bash -c "mineru-models-download -s modelscope -m all"
# Set the entry point to activate the virtual environment and run the command line tool
ENTRYPOINT ["/bin/bash", "-c", "export MINERU_MODEL_SOURCE=local && exec \"$@\"", "--"]

View File

@@ -2,7 +2,7 @@
FROM harbor.sourcefind.cn:5443/dcu/admin/base/vllm:0.9.2-ubuntu22.04-dtk25.04.2-1226-das1.7-py3.10-20251226
# Install libgl for opencv support & Noto fonts for Chinese characters
# Install Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \
fonts-noto-core \

View File

@@ -2,7 +2,7 @@
FROM crpi-vofi3w62lkohhxsp.cn-shanghai.personal.cr.aliyuncs.com/opendatalab-mineru/gcu:docker_images_topsrider_i3x_3.6.20260106_vllm0.11_pytorch2.8.0
# Install libgl for opencv support & Noto fonts for Chinese characters
# Install Noto fonts for Chinese characters
RUN echo 'deb http://mirrors.aliyun.com/ubuntu/ noble main restricted universe multiverse\n\
deb http://mirrors.aliyun.com/ubuntu/ noble-updates main restricted universe multiverse\n\
deb http://mirrors.aliyun.com/ubuntu/ noble-backports main restricted universe multiverse\n\

View File

@@ -0,0 +1,33 @@
# Base image containing the vLLM inference environment, requiring amd64(x86-64) CPU + Kunlun XPU.
FROM docker.1ms.run/wjie520/vllm_kunlun:v0.10.1.1rc1
# Install Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \
fonts-noto-core \
fonts-noto-cjk \
fontconfig && \
fc-cache -fv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install mineru latest
RUN python3 -m pip install -U pip -i https://mirrors.aliyun.com/pypi/simple && \
python3 -m pip install "mineru[api,gradio]>=2.7.6" \
"matplotlib>=3.10,<4" \
"ultralytics>=8.3.48,<9" \
"doclayout_yolo==0.0.4" \
"ftfy>=6.3.1,<7" \
"shapely>=2.0.7,<3" \
"pyclipper>=1.3.0,<2" \
"omegaconf>=2.3.0,<3" \
-i https://mirrors.aliyun.com/pypi/simple && \
sed -i '1,200{s/self\.act = act_layer()/self.act = nn.GELU()/;t;b};' /root/miniconda/envs/vllm_kunlun_0.10.1.1/lib/python3.10/site-packages/vllm_kunlun/models/qwen2_vl.py && \
python3 -m pip cache purge
# Download models and update the configuration file
RUN /bin/bash -c "mineru-models-download -s modelscope -m all"
# Set the entry point to activate the virtual environment and run the command line tool
ENTRYPOINT ["/bin/bash", "-c", "export MINERU_MODEL_SOURCE=local && exec \"$@\"", "--"]

View File

@@ -0,0 +1,42 @@
# 基础镜像配置 vLLM 或 LMDeploy ,请根据实际需要选择其中一个,要求 amd64(x86-64) CPU + Cambricon MLU.
# Base image containing the LMDEPLOY inference environment, requiring amd64(x86-64) CPU + Cambricon MLU.
FROM crpi-4crprmm5baj1v8iv.cn-hangzhou.personal.cr.aliyuncs.com/lmdeploy_dlinfer/camb:mineru25
ARG BACKEND=lmdeploy
# Base image containing the vLLM inference environment, requiring amd64(x86-64) CPU + Cambricon MLU.
# FROM crpi-vofi3w62lkohhxsp.cn-shanghai.personal.cr.aliyuncs.com/opendatalab-mineru/mlu:vllm0.8.3-torch2.6.0-torchmlu1.26.1-ubuntu22.04-py310
# ARG BACKEND=vllm
# Install Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \
fonts-noto-core \
fonts-noto-cjk \
fontconfig && \
fc-cache -fv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install mineru latest
RUN /bin/bash -c '\
if [ "$BACKEND" = "vllm" ]; then \
source /torch/venv3/pytorch_infer/bin/activate; \
fi && \
python3 -m pip install -U pip -i https://mirrors.aliyun.com/pypi/simple && \
python3 -m pip install "mineru[core]>=2.7.4" \
numpy==1.26.4 \
opencv-python==4.11.0.86 \
-i https://mirrors.aliyun.com/pypi/simple && \
python3 -m pip install $(if [ "$BACKEND" = "lmdeploy" ]; then echo "accelerate==1.2.0"; else echo "transformers==4.50.3"; fi) && \
python3 -m pip cache purge'
# Download models and update the configuration file
RUN /bin/bash -c '\
if [ "$BACKEND" = "vllm" ]; then \
source /torch/venv3/pytorch_infer/bin/activate; \
fi && \
mineru-models-download -s modelscope -m all'
WORKDIR /workspace
# Set the entry point to activate the virtual environment and run the command line tool
ENTRYPOINT ["/bin/bash", "-c", "export MINERU_MODEL_SOURCE=local && exec \"$@\"", "--"]

View File

@@ -77,7 +77,8 @@ Here are the environment variables and their descriptions:
- `MINERU_MODEL_SOURCE`:
* Used to specify model source
* supports `huggingface/modelscope/local`
* defaults to `huggingface`, can be switched to `modelscope` or local models through environment variables.
* Default is `huggingface`; you can switch via an environment variable to `modelscope` to use a domestic acceleration mirror, or switch to `local` to use a local model.
- `MINERU_TOOLS_CONFIG_JSON`:
* Used to specify configuration file path
@@ -101,8 +102,14 @@ Here are the environment variables and their descriptions:
* Default is `true`, can be set to `false` via environment variable to disable table merging functionality.
- `MINERU_PDF_RENDER_TIMEOUT`:
* Used to set the timeout period (in seconds) for rendering PDF to images
* Default is `300` seconds, can be set to other values via environment variable to adjust the image rendering timeout.
* Used to set the timeout (in seconds) for rendering PDFs to images.
* Default is `300` seconds; you can set a different value via an environment variable to adjust the rendering timeout.
* Only effective on Linux and macOS systems.
- `MINERU_PDF_RENDER_THREADS`:
* Used to set the number of threads used when rendering PDFs to images.
* Default is `4`; you can set a different value via an environment variable to adjust the number of threads for image rendering.
* Only effective on Linux and macOS systems.
- `MINERU_INTRA_OP_NUM_THREADS`:
* Used to set the intra_op thread count for ONNX models, affects the computation speed of individual operators

View File

@@ -86,7 +86,11 @@ docker run -u root --name mineru_docker --privileged=true \
您也可以直接通过替换`/bin/bash`为服务启动命令来启动MinerU服务详细说明请参考[通过命令启动服务](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuihttp-clientserver)。
>[!NOTE]
> 由于310p加速卡不支持bf16精度因此在使用该加速卡时执行任意与`vllm`相关命令需追加`--enforce-eager --dtype float16`参数。
> 由于310p加速卡不支持图模式与bf16精度因此在使用该加速卡时执行任意与`vllm`相关命令需追加`--enforce-eager --dtype float16`参数。
> 例如:
> ```bash
> mineru-openai-server --port 30000 --enforce-eager --dtype float16
> ```
## 4. 注意事项
@@ -171,4 +175,5 @@ docker run -u root --name mineru_docker --privileged=true \
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
>NPU加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[ASCEND_RT_VISIBLE_DEVICES](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/850alpha001/maintenref/envvar/envref_07_0028.html)
> - NPU加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[ASCEND_RT_VISIBLE_DEVICES](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/850alpha001/maintenref/envvar/envref_07_0028.html)
> - 在Ascend平台可以通过`npu-smi info`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -0,0 +1,112 @@
## 1. 测试平台
以下为本指南测试使用的平台信息,供参考:
```
os: Ubuntu 22.04.4 LTS
cpu: Intel x86-64
gpu: Biren 106C
driver: 1.10.0
docker: 28.0.4
```
## 2. 环境准备
### 2.1 下载并加载镜像 vllm
```bash
wget http://birentech.com/xxx/MinerU/mineru-vllm.tar 链接获取请联系壁仞内部人员邮箱MonaLiu@birentech.com
docker load -i mineru-vllm.tar
```
## 3. 启动 Docker 容器
```bash
docker run -it --name mineru_docker \
--privileged \
--network=host \
--shm-size=100G \
-e MINERU_MODEL_SOURCE=local \
-e MINERU_DEVICE_MODEL=supa \
-e SHAPE_TRANSFORM_GRANK=true \
mineru:biren-vllm-latest \
/bin/bash
```
执行该命令后您将进入到Docker容器的交互式终端您可以直接在容器内运行MinerU相关命令来使用MinerU的功能。
您也可以直接通过替换`/bin/bash`为服务启动命令来启动MinerU服务详细说明请参考[通过命令启动服务](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuihttp-clientserver)。
## 4. 注意事项
不同环境下MinerU对Biren加速卡的支持情况如下表所示
<table border="1">
<thead>
<tr>
<th rowspan="2" colspan="2">使用场景</th>
<th colspan="2">容器环境</th>
</tr>
<tr>
<th>vllm</th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="3">命令行工具(mineru)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">fastapi服务(mineru-api)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">gradio界面(mineru-gradio)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">openai-server服务mineru-openai-server</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">数据并行 (--data-parallel-size)</td>
<td>🔴</td>
</tr>
</tbody>
</table>
注:
🟢: 支持运行较稳定精度与Nvidia GPU基本一致
🟡: 支持但较不稳定,在某些场景下可能出现异常,或精度存在一定差异
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
> - Biren加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明,
>将环境变量`CUDA_VISIBLE_DEVICES`替换为`SUPA_VISIBLE_DEVICES`即可。
> - 在壁仞平台可以通过`brsmi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -1,253 +1,160 @@
# MinerU
## 1. 环境准备
容器启动方式见第3节
### 1.1 获取代码
## 1. 测试平台
以下为本指南测试使用的平台信息,供参考:
```
git clone https://github.com/opendatalab/MinerU.git
git checkout fa1149cd4abf9db5e0f13e4e074cdb568be189f4
```
### 1.2 安装依赖
```
source /torch/venv3/pytorch_infer/bin/activate
pip install accelerate==1.11.0 doclayout_yolo==0.0.4 thop==0.1.1.post2209072238 ultralytics-thop==2.0.18 ultralytics==8.3.228
# requirements_check.txt具体内容在下面
pip install -r requirements_check.txt
cd MinerU
pip install -e .[core] --no-deps
```
requirements_check.txt
```
# triton==3.0.0+mlu1.3.1
# torch==2.5.0+cpu
# torchvision==0.20.0+cpu
# === 1. 已安装且版本相同 ===
# (这些包已满足要求, 无需操作)
# === 2. 已安装但版本不同 ===
# (运行 pip install -r 将强制更新到左侧的目标版本)
# accelerate==1.11.0 # 0.33.0
beautifulsoup4==4.14.2 # 4.12.3
cffi==2.0.0 # 1.17.1
huggingface-hub==0.36.0 # 0.25.2
jiter==0.12.0 # 0.8.2
openai==2.8.0 # 1.59.7
pillow==11.3.0 # 10.4.0
sympy==1.14.0 # 1.13.1
tokenizers==0.22.1 # 0.21.0
# torch==2.9.1 # 2.5.0+cpu
# torchvision==0.24.1 # 0.20.0+cpu
transformers==4.57.1 # 4.48.0
# triton==3.5.1 # 3.0.0+mlu1.3.1
typing-extensions==4.15.0 # 4.12.2
# === 3. 未安装 ===
# (运行 pip install -r 将安装这些包)
aiofiles==24.1.0
albucore==0.0.24
albumentations==2.0.8
antlr4-python3-runtime==4.9.3
brotli==1.2.0
coloredlogs==15.0.1
colorlog==6.10.1
cryptography==46.0.3
# doclayout_yolo==0.0.4
fast-langdetect==0.2.5
fasttext-predict==0.9.2.4
ffmpy==1.0.0
flatbuffers==25.9.23
ftfy==6.3.1
gradio-client==1.13.3
gradio-pdf==0.0.22
gradio==5.49.1
groovy==0.1.2
hf-xet==1.2.0
httpx-retries==0.4.5
humanfriendly==10.0
imageio==2.37.2
json-repair==0.53.0
magika==0.6.3
markdown-it-py==4.0.0
mdurl==0.1.2
mineru-vl-utils==0.1.15
mineru==2.6.4
modelscope==1.31.0
# nvidia-cublas-cu12==12.8.4.1
# nvidia-cuda-cupti-cu12==12.8.90
# nvidia-cuda-nvrtc-cu12==12.8.93
# nvidia-cuda-runtime-cu12==12.8.90
# nvidia-cudnn-cu12==9.10.2.21
# nvidia-cufft-cu12==11.3.3.83
# nvidia-cufile-cu12==1.13.1.3
# nvidia-curand-cu12==10.3.9.90
# nvidia-cusolver-cu12==11.7.3.90
# nvidia-cusparse-cu12==12.5.8.93
# nvidia-cusparselt-cu12==0.7.1
# nvidia-nccl-cu12==2.27.5
# nvidia-nvjitlink-cu12==12.8.93
# nvidia-nvshmem-cu12==3.3.20
# nvidia-nvtx-cu12==12.8.90
omegaconf==2.3.0
onnxruntime==1.23.2
orjson==3.11.4
pdfminer.six==20250506
pdftext==0.6.3
polars-runtime-32==1.35.2
polars==1.35.2
pyclipper==1.3.0.post6
pydantic-settings==2.12.0
pydub==0.25.1
pypdf==6.2.0
pypdfium2==4.30.0
python-multipart==0.0.20
reportlab==4.4.4
rich==14.2.0
robust-downloader==0.0.2
ruff==0.14.5
safehttpx==0.1.7
scikit-image==0.25.2
seaborn==0.13.2
semantic-version==2.10.0
shapely==2.1.2
shellingham==1.5.4
simsimd==6.5.3
stringzilla==4.2.3
# thop==0.1.1.post2209072238
tifffile==2025.5.10
typer==0.20.0
typing-inspection==0.4.2
# ultralytics-thop==2.0.18
# ultralytics==8.3.228
```
### 1.3 修改代码
/raid_data/home/yqk/mineru-251114/MinerU/mineru/backend/pipeline/pipeline_analyze.py, line 1
添加代码
```
# 添加MLU支持
import torch_mlu.utils.gpu_migration
# 高版本镜像为
# import torch.mlu.utils.gpu_migration
os: Ubuntu 22.04.5 LTS
cpu: Hygon Hygon C86 7490
mlu: MLU590-M9D
driver: v6.2.11
docker: 28.3.0
```
## 2. 使用方法
```
export HF_ENDPOINT=https://hf-mirror.com
mineru-api --host 0.0.0.0 --port 8009
## 2. 环境准备
>[!NOTE]
>Cambricon加速卡支持使用`lmdeploy``vllm`进行VLM模型推理加速。请根据实际需求选择安装和使用其中之一:
### 2.1 使用 Dockerfile 构建镜像 lmdeploy
```bash
wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/china/mlu.Dockerfile
docker build --network=host -t mineru:mlu-lmdeploy-latest -f mlu.Dockerfile .
```
## 3. 其他
### 2.2 使用 Dockerfile 构建镜像 vllm
### 3.1 Dify插件配置问题
给Dify的MinerU插件使用时需将Dify的.env文件中FILES_URL设置为http://{ip}:{dify的网页访问端口}。
根据网上找到的很多回答可能是要暴露5001并将FILES_URL设置为http://{ip}:5001并暴露5001端口但其实设置为dify的网页访问端口即可。
### 3.2 容器启动方式
```
export MY_CONTAINER="[容器名称]"
num=`docker ps -a|grep "$MY_CONTAINER" | wc -l`
echo $num
echo $MY_CONTAINER
if [ 0 -eq $num ];then
docker run -d \
--privileged \
--pid=host \
--net=host \
--shm-size 64g \
--device /dev/cambricon_dev0 \
--device /dev/cambricon_ipcm0 \
--device /dev/cambricon_ctl \
--name $MY_CONTAINER \
-v [/path/to/your/data:/path/to/your/data] \
-v /usr/bin/cnmon:/usr/bin/cnmon \
[镜像名称] \
sleep infinity
docker exec -ti $MY_CONTAINER /bin/bash
else
docker start $MY_CONTAINER
docker exec -ti $MY_CONTAINER /bin/bash
fi
```bash
wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/china/mlu.Dockerfile
# 将基础镜像从 lmdeploy 切换为 vllm
sed -i -e '3,4s/^/# /' -e '6,7s/^# //' mlu.Dockerfile
docker build --network=host -t mineru:mlu-vllm-latest -f mlu.Dockerfile .
```
### 3.3 将上面的过程进行打包
## 3. 启动 Docker 容器
准备好前面的requirements_check.txt
Dockerfile
```
# 1. 使用指定的基础镜像
FROM cambricon-base/pytorch:v25.01-torch2.5.0-torchmlu1.24.1-ubuntu22.04-py310
# 2. 设置环境变量
ENV HF_ENDPOINT=https://hf-mirror.com
# 3. 定义 venv_pip 路径以便复用
# 基础镜像中的虚拟环境路径
ARG VENV_PIP=/torch/venv3/pytorch_infer/bin/pip
# 4. 设置工作目录
WORKDIR /app
# 5. 安装 git (基础镜像可能不包含)
RUN apt-get update && apt-get install -y git && \
rm -rf /var/lib/apt/lists/*
# 6. 复制 requirements_check.txt 到镜像中
# (这个文件需要您在宿主机上和 Dockerfile 放在同一目录下)
COPY requirements_check.txt .
# 7. 步骤 1.1 & 1.2: 获取代码并安装所有依赖
# 在一个 RUN 层中执行所有安装,以优化镜像大小
RUN \
# 1.1 获取代码
echo "Cloning MinerU repository..." && \
git clone https://gh-proxy.org/https://github.com/opendatalab/MinerU.git && \
cd MinerU && \
git checkout fa1149cd4abf9db5e0f13e4e074cdb568be189f4 && \
cd .. && \
\
# 1.2 安装依赖
# 第1个pip install (来自您的步骤)
echo "Installing initial dependencies..." && \
${VENV_PIP} install accelerate==1.11.0 doclayout_yolo==0.0.4 thop==0.1.1.post2209072238 ultralytics-thop==2.0.18 ultralytics==8.3.228 && \
\
# 第2个pip install (来自 requirements_check.txt)
echo "Installing dependencies from requirements_check.txt..." && \
# 注意:基础镜像已包含 torch 和 tritonrequirements_check.txt 中的注释行会被 pip 自动忽略
${VENV_PIP} install -r requirements_check.txt && \
\
# 第3个pip install (本地安装 MinerU)
echo "Installing MinerU in editable mode..." && \
cd MinerU && \
${VENV_PIP} install -e .[core] --no-deps
# 8. 步骤 1.3: 修改代码
# 将 MLU 支持代码添加到指定文件的开头
RUN echo "Applying MLU patch to pipeline_analyze.py..." && \
sed -i '1i# 添加MLU支持\nimport torch_mlu.utils.gpu_migration\n# 高版本镜像为\n# import torch.mlu.utils.gpu_migration\n' \
/app/MinerU/mineru/backend/pipeline/pipeline_analyze.py
```bash
docker run --name mineru_docker \
--privileged \
--ipc=host \
--network=host \
--shm-size=400g \
--ulimit memlock=-1 \
-v /dev:/dev \
-v /lib/modules:/lib/modules:ro \
-v /usr/bin/cnmon:/usr/bin/cnmon \
-e MINERU_MODEL_SOURCE=local \
-e MINERU_LMDEPLOY_DEVICE=camb \
-it mineru:mlu-lmdeploy-latest \
/bin/bash
```
该镜像的启动
>[!TIP]
> 请根据实际情况选择使用`vllm`或`lmdeploy`版本的镜像,如需使用`vllm`,请执行以下操作:
>
> - 替换上述命令中的`mineru:mlu-lmdeploy-latest`为`mineru:mlu-vllm-latest`
>
> - 进入容器后通过以下命令切换venv环境
> ```bash
> source /torch/venv3/pytorch_infer/bin/activate
> ```
>
> - 切换成功后,您可以在命令行前看到`(pytorch_infer)`的标识,这表示您已成功进入`vllm`的虚拟环境。
```
docker run -d --restart=always \
--privileged \
--pid=host \
--net=host \
--shm-size 64g \
--device /dev/cambricon_dev0 \
--device /dev/cambricon_ipcm0 \
--device /dev/cambricon_ctl \
--name mineru_service \
mineru-mlu:latest \
/torch/venv3/pytorch_infer/bin/python /app/MinerU/mineru/cli/fast_api.py --host 0.0.0.0 --port 8009
```
执行该命令后您将进入到Docker容器的交互式终端您可以直接在容器内运行MinerU相关命令来使用MinerU的功能。
您也可以直接通过替换`/bin/bash`为服务启动命令来启动MinerU服务详细说明请参考[通过命令启动服务](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuihttp-clientserver)。
## 4. 注意事项
>[!NOTE]
> **兼容性说明**由于寒武纪Cambricon目前对 vLLM v1 引擎的支持尚待完善MinerU 现阶段采用 v0 引擎作为适配方案。
> 受此限制vLLM 的异步引擎Async Engine功能存在兼容性问题可能导致部分使用场景无法正常运行。
> 我们将持续跟进寒武纪对 vLLM v1 引擎的支持进展,并及时在 MinerU 中进行相应的适配与优化。
不同环境下MinerU对Cambricon加速卡的支持情况如下表所示
>[!TIP]
> - `lmdeploy`黄灯问题为不能输入文件夹使用批量解析功能,输入单个文件时表现正常。
> - `vllm`黄灯问题为在精度未对齐,在部分场景下可能出现预期外结果。
<table border="1">
<thead>
<tr>
<th rowspan="2" colspan="2">使用场景</th>
<th colspan="2">容器环境</th>
</tr>
<tr>
<th>vllm</th>
<th>lmdeploy</th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="3">命令行工具(mineru)</td>
<td>pipeline</td>
<td>🟢</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟡</td>
<td>🟡</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟡</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">fastapi服务(mineru-api)</td>
<td>pipeline</td>
<td>🟢</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🔴</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟡</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">gradio界面(mineru-gradio)</td>
<td>pipeline</td>
<td>🟢</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🔴</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟡</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">openai-server服务mineru-openai-server</td>
<td>🟡</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">数据并行 (--data-parallel-size/--dp)</td>
<td>🔴</td>
<td>🔴</td>
</tr>
</tbody>
</table>
注:
🟢: 支持运行较稳定精度与Nvidia GPU基本一致
🟡: 支持但较不稳定,在某些场景下可能出现异常,或精度存在一定差异
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
> - Cambricon加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明,
>将环境变量`CUDA_VISIBLE_DEVICES`替换为`MLU_VISIBLE_DEVICES`即可。
> - 在Cambricon平台可以通过`cnmon`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -105,5 +105,6 @@ docker run -u root --name mineru_docker \
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
>GCU加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明,
>将环境变量`CUDA_VISIBLE_DEVICES`替换为`TOPS_VISIBLE_DEVICES`即可。
> - GCU加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明,
>将环境变量`CUDA_VISIBLE_DEVICES`替换为`TOPS_VISIBLE_DEVICES`即可。
> - 在Enflame平台可以通过`efsmi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -2,7 +2,7 @@
以下为本指南测试使用的平台信息,供参考:
```
os: Ubuntu 22.04.3 LTS
cpu: Hygon Hygon C86-4G(x86-64)
cpu: Hygon C86-4G(x86-64)
dcu: BW200
driver: 6.3.13-V1.12.0a
docker: 20.10.24
@@ -112,4 +112,5 @@ docker run -u root --name mineru_docker \
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
>DCU加速卡指定可用加速卡的方式与AMD GPU类似请参考[GPU isolation techniques](https://rocm.docs.amd.com/en/docs-6.2.4/conceptual/gpu-isolation.html)
> - DCU加速卡指定可用加速卡的方式与AMD GPU类似请参考[GPU isolation techniques](https://rocm.docs.amd.com/en/docs-6.2.4/conceptual/gpu-isolation.html)
> - 在Hygon平台可以通过`hy-smi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -0,0 +1,123 @@
## 1. 测试平台
以下为本指南测试使用的平台信息,供参考:
```
os: Ubuntu 22.04.5 LTS
cpu: Intel x86-64
gpu: Iluvatar BI-V150
driver: 4.4.0
docker: 28.1.1
```
## 2. 环境准备
### 2.1 使用 Dockerfile 构建镜像
```bash
wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/china/corex.Dockerfile
docker build --network=host -t mineru:corex-vllm-latest -f corex.Dockerfile .
```
## 3. 启动 Docker 容器
```bash
docker run --name mineru_docker \
-v /usr/src:/usr/src \
-v /lib/modules:/lib/modules \
-v /dev:/dev \
--privileged \
--cap-add=ALL \
--pid=host \
--group-add video \
--network=host \
--shm-size '400gb' \
--ulimit memlock=-1 \
--security-opt seccomp=unconfined \
--security-opt apparmor=unconfined \
-e VLLM_ENFORCE_CUDA_GRAPH=1 \
-e MINERU_MODEL_SOURCE=local \
-e MINERU_VLLM_DEVICE=corex \
-it mineru:corex-vllm-latest \
/bin/bash
```
执行该命令后您将进入到Docker容器的交互式终端您可以直接在容器内运行MinerU相关命令来使用MinerU的功能。
您也可以直接通过替换`/bin/bash`为服务启动命令来启动MinerU服务详细说明请参考[通过命令启动服务](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuihttp-clientserver)。
## 4. 注意事项
>[!TIP]
>目前Iluvatar方案使用vllm作为推理引擎时可能出现服务停止后显存无法正常释放的问题如果遇到该问题请重启Docker容器以释放显存。
不同环境下MinerU对Iluvatar加速卡的支持情况如下表所示
<table border="1">
<thead>
<tr>
<th rowspan="2" colspan="2">使用场景</th>
<th colspan="2">容器环境</th>
</tr>
<tr>
<th>vllm</th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="3">命令行工具(mineru)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">fastapi服务(mineru-api)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">gradio界面(mineru-gradio)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">openai-server服务mineru-openai-server</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">数据并行 (--data-parallel-size)</td>
<td>🟢</td>
</tr>
</tbody>
</table>
注:
🟢: 支持运行较稳定精度与Nvidia GPU基本一致
🟡: 支持但较不稳定,在某些场景下可能出现异常,或精度存在一定差异
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
> - Iluvatar加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明
> - 在Iluvatar平台可以通过`ixsmi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -0,0 +1,124 @@
## 1. 测试平台
以下为本指南测试使用的平台信息,供参考:
```
os: Ubuntu 22.04.5 LTS
cpu: Intel x86-64
xpu: P800
driver: 515.58
docker: 20.10.5
```
## 2. 环境准备
### 2.1 使用 Dockerfile 构建镜像 vllm
```bash
wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/china/kxpu.Dockerfile
docker build --network=host -t mineru:kxpu-vllm-latest -f kxpu.Dockerfile .
```
## 3. 启动 Docker 容器
```bash
docker run -u root --name mineru_docker \
--device=/dev/xpu0:/dev/xpu0 \
--device=/dev/xpu1:/dev/xpu1 \
--device=/dev/xpu2:/dev/xpu2 \
--device=/dev/xpu3:/dev/xpu3 \
--device=/dev/xpu4:/dev/xpu4 \
--device=/dev/xpu5:/dev/xpu5 \
--device=/dev/xpu6:/dev/xpu6 \
--device=/dev/xpu7:/dev/xpu7 \
--device=/dev/xpuctrl:/dev/xpuctrl \
--net=host \
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--tmpfs /dev/shm:rw,nosuid,nodev,exec,size=32g \
--cap-add=SYS_PTRACE \
-v /home/users/vllm-kunlun:/home/vllm-kunlun \
-v /usr/local/bin/xpu-smi:/usr/local/bin/xpu-smi \
-w /workspace \
-e MINERU_MODEL_SOURCE=local \
-e MINERU_FORMULA_CH_SUPPORT=true \
-e MINERU_VLLM_DEVICE=kxpu \
-it mineru:kxpu-vllm-latest \
/bin/bash
```
执行该命令后您将进入到Docker容器的交互式终端您可以直接在容器内运行MinerU相关命令来使用MinerU的功能。
您也可以直接通过替换`/bin/bash`为服务启动命令来启动MinerU服务详细说明请参考[通过命令启动服务](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuihttp-clientserver)。
## 4. 注意事项
不同环境下MinerU对Kunlunxin加速卡的支持情况如下表所示
<table border="1">
<thead>
<tr>
<th rowspan="2" colspan="2">使用场景</th>
<th colspan="2">容器环境</th>
</tr>
<tr>
<th>vllm</th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="3">命令行工具(mineru)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">fastapi服务(mineru-api)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">gradio界面(mineru-gradio)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">openai-server服务mineru-openai-server</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">数据并行 (--data-parallel-size)</td>
<td>🔴</td>
</tr>
</tbody>
</table>
注:
🟢: 支持运行较稳定精度与Nvidia GPU基本一致
🟡: 支持但较不稳定,在某些场景下可能出现异常,或精度存在一定差异
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
> - Kunlunxin加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明,
>将环境变量`CUDA_VISIBLE_DEVICES`替换为`XPU_VISIBLE_DEVICES`即可。
> - 在Kunlunxin平台可以通过`xpu-smi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -148,4 +148,5 @@ docker run --ipc host \
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
>MACA加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明。
> - MACA加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明。
> - 在METAX平台可以通过`mx-smi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -27,6 +27,7 @@ docker run -u root --name mineru_docker \
--shm-size=80g \
--privileged \
-e MTHREADS_VISIBLE_DEVICES=all \
-e MINERU_VLLM_DEVICE=musa \
-e MINERU_MODEL_SOURCE=local \
-it mineru:musa-vllm-latest \
/bin/bash
@@ -112,4 +113,5 @@ docker run -u root --name mineru_docker \
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
>MooreThreads加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[GPU 枚举](https://docs.mthreads.com/cloud-native/cloud-native-doc-online/install_guide/#gpu-%E6%9E%9A%E4%B8%BE)
> - MooreThreads加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[GPU 枚举](https://docs.mthreads.com/cloud-native/cloud-native-doc-online/install_guide/#gpu-%E6%9E%9A%E4%B8%BE)
> - 在MooreThreads平台可以通过`mthreads-gmi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -127,7 +127,7 @@ docker run --privileged=true \
</tr>
<tr>
<td colspan="2">数据并行 (--data-parallel-size/--dp)</td>
<td>🟡</td>
<td>🔴</td>
<td>🔴</td>
</tr>
</tbody>
@@ -139,4 +139,5 @@ docker run --privileged=true \
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
>PPU加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明。
> - PPU加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明。
> - 在T-Head平台可以通过`ppu-smi`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -1,73 +1,120 @@
# TECO适配
## 1. 测试平台
以下为本指南测试使用的平台信息,供参考:
```
os: Ubuntu 22.04.5 LTS
cpu: AMD EPYC (amd64)
gpu: T100
driver: 3.0.0
docker: 28.0.4
```
## 快速开始
使用本工具执行推理的主要流程如下:
1. 基础环境安装:介绍推理前需要完成的基础环境检查和安装。
3. 构建Docker环境介绍如何使用Dockerfile创建模型推理时所需的Docker环境。
4. 启动推理:介绍如何启动推理。
## 2. 环境准备
### 1 基础环境安装
请参考[Teco用户手册的安装准备章节](http://docs.tecorigin.com/release/torch_2.4/v2.2.0/#fc980a30f1125aa88bad4246ff0cedcc),完成训练前的基础环境检查和安装。
### 2.1 下载并加载镜像 vllm
### 2 构建docker
#### 2.1 执行以下命令下载Docker镜像至本地Docker镜像包pytorch-3.0.0-torch_sdaa3.0.0.tar
```bash
wget http://wb.tecorigin.com:8082/repository/teco-customer-repo/Course/MinerU/mineru-vllm.tar
wget 镜像下载链接(链接获取请联系太初内部人员)
docker load -i mineru-vllm.tar
```
#### 2.2 校验Docker镜像包执行以下命令生成MD5码是否与官方MD5码b2a7f60508c0d199a99b8b6b35da3954一致
## 3. 启动 Docker 容器
md5sum pytorch-3.0.0-torch_sdaa3.0.0.tar
```bash
docker run -dit --name mineru_docker \
--privileged \
--cap-add SYS_PTRACE \
--cap-add SYS_ADMIN \
--network=host \
--shm-size=500G \
mineru:sdaa-vllm-latest \
/bin/bash
```
#### 2.3 执行以下命令导入Docker镜像
>[!TIP]
> 如需使用`vllm`环境,请执行以下操作:
> - 进入容器后通过以下命令切换到conda环境
> ```bash
> conda activate vllm_env_py310
> ```
>
> - 切换成功后,您可以在命令行前看到`(vllm_env_py310)`的标识,这表示您已成功进入`vllm`的虚拟环境。
docker load < pytorch-3.0.0-torch_sdaa3.0.0.tar
#### 2.4 执行以下命令构建名为MinerU的Docker容器
docker run -itd --name="MinerU" --net=host --device=/dev/tcaicard0 --device=/dev/tcaicard1 --device=/dev/tcaicard2 --device=/dev/tcaicard3 --cap-add SYS_PTRACE --cap-add SYS_ADMIN --shm-size 64g jfrog.tecorigin.net/tecotp-docker/release/ubuntu22.04/x86_64/pytorch:3.0.0-torch_sdaa3.0.0 /bin/bash
#### 2.5 执行以下命令进入名称为tecopytorch_docker的Docker容器。
docker exec -it MinerU bash
执行该命令后您将进入到Docker容器的交互式终端您可以直接在容器内运行MinerU相关命令来使用MinerU的功能。
您也可以直接通过替换`/bin/bash`为服务启动命令来启动MinerU服务详细说明请参考[通过命令启动服务](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuihttp-clientserver)。
### 3 执行以下命令安装MinerU
- 安装前的准备
```
cd <MinerU>
pip install --upgrade pip
pip install uv
```
- 由于镜像中安装了torch并且不需要安装nvidia-nccl-cu12、nvidia-cudnn-cu12等包因此需要注释掉一部分安装依赖。
- 请注释掉<MinerU>/pyproject.toml文件中所有的"doclayout_yolo==0.0.4"依赖并且将torch开头的包也注释掉。
- 执行以下命令安装MinerU
```
uv pip install -e .[core]
```
- 下载安装doclayout_yolo==0.0.4
```
pip install doclayout_yolo==0.0.4 --no-deps
```
- 下载安装其他包(doclayout_yolo==0.0.4的依赖)
```
pip install albumentations py-cpuinfo seaborn thop numpy==1.24.4
```
- 由于部分张量内部内存分布不连续,需要修改如下两个文件
<ultralytics安装路径>/ultralytics/utils/tal.py(330行左右,将view --> reshape)
<doclayout_yolo安装路径>/doclayout_yolo/utils/tal.py(375行左右,将view --> reshape)
### 4 执行推理
- 开启sdaa环境
```
export TORCH_SDAA_AUTOLOAD=cuda_migrate
```
- 首次运行推理命令前请添加以下环境下载模型权重
```
export HF_ENDPOINT=https://hf-mirror.com
```
- 运行以下命令执行推理
```
mineru -p 'input path' -o 'output_path' --lang 'model_name'
```
其中model_name可从'ch', 'ch_server', 'ch_lite', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka', 'latin', 'arabic', 'east_slavic', 'cyrillic', 'devanagari'选择
### 5 适配用到的软件栈版本列表
使用v3.0.0软件栈版本适配,获取方式联系太初内部人员
## 4. 注意事项
不同环境下MinerU对Tecorigin加速卡的支持情况如下表所示
<table border="1">
<thead>
<tr>
<th rowspan="2" colspan="2">使用场景</th>
<th colspan="2">容器环境</th>
</tr>
<tr>
<th>vllm</th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="3">命令行工具(mineru)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">fastapi服务(mineru-api)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td rowspan="3">gradio界面(mineru-gradio)</td>
<td>pipeline</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-auto-engine</td>
<td>🟢</td>
</tr>
<tr>
<td>&lt;vlm/hybrid&gt;-http-client</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">openai-server服务mineru-openai-server</td>
<td>🟢</td>
</tr>
<tr>
<td colspan="2">数据并行 (--data-parallel-size)</td>
<td>🔴</td>
</tr>
</tbody>
</table>
注:
🟢: 支持运行较稳定精度与Nvidia GPU基本一致
🟡: 支持但较不稳定,在某些场景下可能出现异常,或精度存在一定差异
🔴: 不支持,无法运行,或精度存在较大差异
>[!TIP]
> - Tecorigin加速卡指定可用加速卡的方式与NVIDIA GPU类似请参考[使用指定GPU设备](https://opendatalab.github.io/MinerU/zh/usage/advanced_cli_parameters/#cuda_visible_devices)章节说明,
>将环境变量`CUDA_VISIBLE_DEVICES`替换为`SDAA_VISIBLE_DEVICES`即可。
> - 在太初平台可以通过`teco-smi -c`命令查看加速卡的使用情况并根据需要指定空闲的加速卡ID以避免资源冲突。

View File

@@ -72,7 +72,7 @@ MinerU命令行工具的某些参数存在相同功能的环境变量配置
- `MINERU_MODEL_SOURCE`
* 用于指定模型来源
* 支持`huggingface/modelscope/local`
* 默认为`huggingface`可通过环境变量切换为`modelscope`使用本地模型。
* 默认为`huggingface`可通过环境变量切换为`modelscope`使用国内加速源或切换至`local`使用本地模型。
- `MINERU_TOOLS_CONFIG_JSON`
* 用于指定配置文件路径
@@ -98,6 +98,12 @@ MinerU命令行工具的某些参数存在相同功能的环境变量配置
- `MINERU_PDF_RENDER_TIMEOUT`
* 用于设置将PDF渲染为图片的超时时间
* 默认为`300`秒,可通过环境变量设置为其他值以调整渲染图片的超时时间。
* 仅在linux和macOS系统中生效。
- `MINERU_PDF_RENDER_THREADS`
* 用于设置将PDF渲染为图片时使用的线程数
* 默认为`4`,可通过环境变量设置为其他值以调整渲染图片时的线程数。
* 仅在linux和macOS系统中生效。
- `MINERU_INTRA_OP_NUM_THREADS`
* 用于设置onnx模型的intra_op线程数影响单个算子的计算速度

View File

@@ -15,10 +15,13 @@
* [海光 Hygon](acceleration_cards/Hygon.md) 🚀
* [燧原 Enflame](acceleration_cards/Enflame.md) 🚀
* [摩尔线程 MooreThreads](acceleration_cards/MooreThreads.md) 🚀
* [AMD](acceleration_cards/AMD.md) [#3662](https://github.com/opendatalab/MinerU/discussions/3662) ❤️
* [太初元碁 Tecorigin](acceleration_cards/Tecorigin.md) [#3767](https://github.com/opendatalab/MinerU/pull/3767) ❤️
* [寒武纪 Cambricon](acceleration_cards/Cambricon.md) [#4004](https://github.com/opendatalab/MinerU/discussions/4004) ❤️
* [瀚博 VastAI](acceleration_cards/VastAI.md) [#4237](https://github.com/opendatalab/MinerU/discussions/4237)❤️
* [天数智芯 IluvatarCorex](acceleration_cards/IluvatarCorex.md) 🚀
* [寒武纪 Cambricon](acceleration_cards/Cambricon.md) 🚀
* [昆仑芯 Kunlunxin](acceleration_cards/Kunlunxin.md) 🚀
* [太初元碁 Tecorigin](acceleration_cards/Tecorigin.md) ❤️
* [壁仞 Biren](acceleration_cards/Biren.md) ❤️
* [AMD #3662](https://github.com/opendatalab/MinerU/discussions/3662) ❤️
* [瀚博 VastAI #4237](https://github.com/opendatalab/MinerU/discussions/4237) ❤️
- 插件与生态
* [Cherry Studio](plugin/Cherry_Studio.md)
* [Sider](plugin/Sider.md)

View File

@@ -297,7 +297,14 @@ def ocr_det_batch_setting(device):
# 检测torch的版本号
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse("2.8.0") or str(device).startswith('mps'):
device_type = os.getenv("MINERU_LMDEPLOY_DEVICE", "")
if (
version.parse(torch.__version__) >= version.parse("2.8.0")
or str(device).startswith('mps')
or device_type.lower() in ["corex"]
):
enable_ocr_det_batch = False
else:
enable_ocr_det_batch = True

View File

@@ -193,7 +193,12 @@ def batch_image_analyze(
# 检测torch的版本号
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse("2.8.0") or str(device).startswith('mps'):
device_type = os.getenv("MINERU_LMDEPLOY_DEVICE", "")
if (
version.parse(torch.__version__) >= version.parse("2.8.0")
or str(device).startswith('mps')
or device_type.lower() in ["corex"]
):
enable_ocr_det_batch = False
else:
enable_ocr_det_batch = True

View File

@@ -22,6 +22,11 @@ def enable_custom_logits_processors() -> bool:
compute_capability = "8.0"
elif hasattr(torch, 'musa') and torch.musa.is_available():
compute_capability = "8.0"
elif hasattr(torch, 'mlu') and torch.mlu.is_available():
compute_capability = "8.0"
elif hasattr(torch, 'sdaa') and torch.sdaa.is_available():
compute_capability = "8.0"
else:
logger.info("CUDA not available, disabling custom_logits_processors")
return False
@@ -100,4 +105,128 @@ def set_default_batch_size() -> int:
except Exception as e:
logger.warning(f'Error determining VRAM: {e}, using default batch_ratio: 1')
batch_size = 1
return batch_size
return batch_size
def _get_device_config(device_type: str) -> dict | None:
"""获取不同设备类型的配置参数"""
# 各设备类型的配置定义
DEVICE_CONFIGS = {
# "musa": {
# "compilation_config_dict": {
# "cudagraph_capture_sizes": [1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30],
# "simple_cuda_graph": True
# },
# "block_size": 32,
# },
"corex": {
"compilation_config_dict": {
"cudagraph_mode": "FULL_DECODE_ONLY",
"level": 0
},
},
"kxpu": {
"compilation_config_dict": {
"splitting_ops": [
"vllm.unified_attention", "vllm.unified_attention_with_output",
"vllm.unified_attention_with_output_kunlun", "vllm.mamba_mixer2",
"vllm.mamba_mixer", "vllm.short_conv", "vllm.linear_attention",
"vllm.plamo2_mamba_mixer", "vllm.gdn_attention", "vllm.sparse_attn_indexer"
]
},
"block_size": 128,
"dtype": "float16",
"distributed_executor_backend": "mp",
"enable_chunked_prefill": False,
"enable_prefix_caching": False,
},
}
return DEVICE_CONFIGS.get(device_type.lower())
def _check_server_arg_exists(args: list, arg_name: str) -> bool:
"""检查命令行参数列表中是否已存在指定参数"""
return any(arg == f"--{arg_name}" or arg.startswith(f"--{arg_name}=") for arg in args)
def _add_server_arg_if_missing(args: list, arg_name: str, value: str) -> None:
"""如果参数不存在,则添加到命令行参数列表"""
if not _check_server_arg_exists(args, arg_name):
args.extend([f"--{arg_name}", value])
def _add_server_flag_if_missing(args: list, flag_name: str) -> None:
"""如果 flag 不存在,则添加到命令行参数列表"""
if not _check_server_arg_exists(args, flag_name):
args.append(f"--{flag_name}")
def _add_engine_kwarg_if_missing(kwargs: dict, key: str, value) -> None:
"""如果参数不存在,则添加到 kwargs 字典"""
if key not in kwargs:
kwargs[key] = value
def mod_kwargs_by_device_type(kwargs_or_args: dict | list, vllm_mode: str) -> dict | list:
"""根据设备类型修改 vllm 配置参数
Args:
kwargs_or_args: 配置参数server 模式为 listengine 模式为 dict
vllm_mode: vllm 运行模式 ("server", "sync_engine", "async_engine")
Returns:
修改后的配置参数
"""
device_type = os.getenv("MINERU_VLLM_DEVICE", "")
config = _get_device_config(device_type)
if config is None:
return kwargs_or_args
if vllm_mode == "server":
_apply_server_config(kwargs_or_args, config)
else:
_apply_engine_config(kwargs_or_args, config, vllm_mode)
return kwargs_or_args
def _apply_server_config(args: list, config: dict) -> None:
"""应用 server 模式的配置"""
import json
for key, value in config.items():
if key == "compilation_config_dict":
_add_server_arg_if_missing(
args, "compilation-config",
json.dumps(value, separators=(',', ':'))
)
else:
# 转换 key 格式: block_size -> block-size
arg_name = key.replace("_", "-")
if arg_name in {"enable-chunked-prefill", "enable-prefix-caching"} and value is False:
_add_server_flag_if_missing(args, f"no-{arg_name}")
continue
_add_server_arg_if_missing(args, arg_name, str(value))
def _apply_engine_config(kwargs: dict, config: dict, vllm_mode: str) -> None:
"""应用 engine 模式的配置"""
try:
from vllm.config import CompilationConfig
except ImportError:
raise ImportError("Please install vllm to use the vllm-async-engine backend.")
for key, value in config.items():
if key == "compilation_config_dict":
if vllm_mode == "sync_engine":
compilation_config = value
elif vllm_mode == "async_engine":
compilation_config = CompilationConfig(**value)
else:
continue
_add_engine_kwarg_if_missing(kwargs, "compilation_config", compilation_config)
else:
_add_engine_kwarg_if_missing(kwargs, key, value)

View File

@@ -6,7 +6,7 @@ import json
from loguru import logger
from .utils import enable_custom_logits_processors, set_default_gpu_memory_utilization, set_default_batch_size, \
set_lmdeploy_backend
set_lmdeploy_backend, mod_kwargs_by_device_type
from .model_output_to_middle_json import result_to_middle_json
from ...data.data_reader_writer import DataWriter
from mineru.utils.pdf_image_tools import load_images_from_pdf
@@ -101,20 +101,7 @@ class ModelSingleton:
except ImportError:
raise ImportError("Please install vllm to use the vllm-engine backend.")
"""
# musa vllm v1 引擎特殊配置
device = get_device()
if device.startswith("musa"):
import torch
if torch.musa.is_available():
compilation_config = {
"cudagraph_capture_sizes": [1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30],
"simple_cuda_graph": True
}
block_size = 32
kwargs["compilation_config"] = compilation_config
kwargs["block_size"] = block_size
"""
kwargs = mod_kwargs_by_device_type(kwargs, vllm_mode="sync_engine")
if "compilation_config" in kwargs:
if isinstance(kwargs["compilation_config"], str):
@@ -141,20 +128,7 @@ class ModelSingleton:
except ImportError:
raise ImportError("Please install vllm to use the vllm-async-engine backend.")
"""
# musa vllm v1 引擎特殊配置
device = get_device()
if device.startswith("musa"):
import torch
if torch.musa.is_available():
compilation_config = CompilationConfig(
cudagraph_capture_sizes=[1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30],
simple_cuda_graph=True
)
block_size = 32
kwargs["compilation_config"] = compilation_config
kwargs["block_size"] = block_size
"""
kwargs = mod_kwargs_by_device_type(kwargs, vllm_mode="async_engine")
if "compilation_config" in kwargs:
if isinstance(kwargs["compilation_config"], dict):

View File

@@ -7,12 +7,12 @@ import asyncio
import uvicorn
import click
import zipfile
import shutil
from pathlib import Path
import glob
from fastapi import Depends, FastAPI, HTTPException, UploadFile, File, Form
from fastapi import Depends, FastAPI, HTTPException, UploadFile, File, Form, BackgroundTasks
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.responses import JSONResponse, FileResponse
from starlette.background import BackgroundTask
from typing import List, Optional
from loguru import logger
@@ -30,23 +30,30 @@ from mineru.version import __version__
# 并发控制器
_request_semaphore: Optional[asyncio.Semaphore] = None
# 并发控制依赖函数
async def limit_concurrency():
if _request_semaphore is not None:
if _request_semaphore.locked():
# 检查信号量是否已用尽,如果是则拒绝请求
if _request_semaphore._value == 0:
raise HTTPException(
status_code=503,
detail=f"Server is at maximum capacity: {os.getenv('MINERU_API_MAX_CONCURRENT_REQUESTS', 'unset')}. Please try again later."
detail=f"Server is at maximum capacity: {os.getenv('MINERU_API_MAX_CONCURRENT_REQUESTS', 'unset')}. Please try again later.",
)
async with _request_semaphore:
yield
else:
yield
def create_app():
# By default, the OpenAPI documentation endpoints (openapi_url, docs_url, redoc_url) are enabled.
# To disable the FastAPI docs and schema endpoints, set the environment variable MINERU_API_ENABLE_FASTAPI_DOCS=0.
enable_docs = str(os.getenv("MINERU_API_ENABLE_FASTAPI_DOCS", "1")).lower() in ("1", "true", "yes")
enable_docs = str(os.getenv("MINERU_API_ENABLE_FASTAPI_DOCS", "1")).lower() in (
"1",
"true",
"yes",
)
app = FastAPI(
openapi_url="/openapi.json" if enable_docs else None,
docs_url="/docs" if enable_docs else None,
@@ -56,7 +63,9 @@ def create_app():
# 初始化并发控制器从环境变量MINERU_API_MAX_CONCURRENT_REQUESTS读取
global _request_semaphore
try:
max_concurrent_requests = int(os.getenv("MINERU_API_MAX_CONCURRENT_REQUESTS", "0"))
max_concurrent_requests = int(
os.getenv("MINERU_API_MAX_CONCURRENT_REQUESTS", "0")
)
except ValueError:
max_concurrent_requests = 0
@@ -67,6 +76,7 @@ def create_app():
app.add_middleware(GZipMiddleware, minimum_size=1000)
return app
app = create_app()
@@ -76,27 +86,34 @@ def sanitize_filename(filename: str) -> str:
移除路径遍历字符, 保留 Unicode 字母、数字、._-
禁止隐藏文件
"""
sanitized = re.sub(r'[/\\\.]{2,}|[/\\]', '', filename)
sanitized = re.sub(r'[^\w.-]', '_', sanitized, flags=re.UNICODE)
if sanitized.startswith('.'):
sanitized = '_' + sanitized[1:]
return sanitized or 'unnamed'
sanitized = re.sub(r"[/\\.]{2,}|[/\\]", "", filename)
sanitized = re.sub(r"[^\w.-]", "_", sanitized, flags=re.UNICODE)
if sanitized.startswith("."):
sanitized = "_" + sanitized[1:]
return sanitized or "unnamed"
def cleanup_file(file_path: str) -> None:
"""清理临时 zip 文件"""
"""清理临时文件或目录"""
try:
if os.path.exists(file_path):
os.remove(file_path)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
logger.warning(f"fail clean file {file_path}: {e}")
def encode_image(image_path: str) -> str:
"""Encode image using base64"""
with open(image_path, "rb") as f:
return b64encode(f.read()).decode()
def get_infer_result(file_suffix_identifier: str, pdf_name: str, parse_dir: str) -> Optional[str]:
def get_infer_result(
file_suffix_identifier: str, pdf_name: str, parse_dir: str
) -> Optional[str]:
"""从结果文件中读取推理结果"""
result_file_path = os.path.join(parse_dir, f"{pdf_name}{file_suffix_identifier}")
if os.path.exists(result_file_path):
@@ -107,11 +124,14 @@ def get_infer_result(file_suffix_identifier: str, pdf_name: str, parse_dir: str)
@app.post(path="/file_parse", dependencies=[Depends(limit_concurrency)])
async def parse_pdf(
files: List[UploadFile] = File(..., description="Upload pdf or image files for parsing"),
output_dir: str = Form("./output", description="Output local directory"),
lang_list: List[str] = Form(
["ch"],
description="""(Adapted only for pipeline and hybrid backend)Input the languages in the pdf to improve OCR accuracy.Options:
background_tasks: BackgroundTasks,
files: List[UploadFile] = File(
..., description="Upload pdf or image files for parsing"
),
output_dir: str = Form("./output", description="Output local directory"),
lang_list: List[str] = Form(
["ch"],
description="""(Adapted only for pipeline and hybrid backend)Input the languages in the pdf to improve OCR accuracy.Options:
- ch: Chinese, English, Chinese Traditional.
- ch_lite: Chinese, English, Chinese Traditional, Japanese.
- ch_server: Chinese, English, Chinese Traditional, Japanese.
@@ -129,41 +149,54 @@ async def parse_pdf(
- east_slavic: Russian, Belarusian, Ukrainian, English.
- cyrillic: Russian, Belarusian, Ukrainian, Serbian (Cyrillic), Bulgarian, Mongolian, Abkhazian, Adyghe, Kabardian, Avar, Dargin, Ingush, Chechen, Lak, Lezgin, Tabasaran, Kazakh, Kyrgyz, Tajik, Macedonian, Tatar, Chuvash, Bashkir, Malian, Moldovan, Udmurt, Komi, Ossetian, Buryat, Kalmyk, Tuvan, Sakha, Karakalpak, English.
- devanagari: Hindi, Marathi, Nepali, Bihari, Maithili, Angika, Bhojpuri, Magahi, Santali, Newari, Konkani, Sanskrit, Haryanvi, English.
"""
),
backend: str = Form(
"hybrid-auto-engine",
description="""The backend for parsing:
""",
),
backend: str = Form(
"hybrid-auto-engine",
description="""The backend for parsing:
- pipeline: More general, supports multiple languages, hallucination-free.
- vlm-auto-engine: High accuracy via local computing power, supports Chinese and English documents only.
- vlm-http-client: High accuracy via remote computing power(client suitable for openai-compatible servers), supports Chinese and English documents only.
- hybrid-auto-engine: Next-generation high accuracy solution via local computing power, supports multiple languages.
- hybrid-http-client: High accuracy via remote computing power but requires a little local computing power(client suitable for openai-compatible servers), supports multiple languages."""
),
parse_method: str = Form(
"auto",
description="""(Adapted only for pipeline and hybrid backend)The method for parsing PDF:
- hybrid-http-client: High accuracy via remote computing power but requires a little local computing power(client suitable for openai-compatible servers), supports multiple languages.""",
),
parse_method: str = Form(
"auto",
description="""(Adapted only for pipeline and hybrid backend)The method for parsing PDF:
- auto: Automatically determine the method based on the file type
- txt: Use text extraction method
- ocr: Use OCR method for image-based PDFs
"""
),
formula_enable: bool = Form(True, description="Enable formula parsing."),
table_enable: bool = Form(True, description="Enable table parsing."),
server_url: Optional[str] = Form(
None,
description="(Adapted only for <vlm/hybrid>-http-client backend)openai compatible server url, e.g., http://127.0.0.1:30000"
),
return_md: bool = Form(True, description="Return markdown content in response"),
return_middle_json: bool = Form(False, description="Return middle JSON in response"),
return_model_output: bool = Form(False, description="Return model output JSON in response"),
return_content_list: bool = Form(False, description="Return content list JSON in response"),
return_images: bool = Form(False, description="Return extracted images in response"),
response_format_zip: bool = Form(False, description="Return results as a ZIP file instead of JSON"),
start_page_id: int = Form(0, description="The starting page for PDF parsing, beginning from 0"),
end_page_id: int = Form(99999, description="The ending page for PDF parsing, beginning from 0"),
""",
),
formula_enable: bool = Form(True, description="Enable formula parsing."),
table_enable: bool = Form(True, description="Enable table parsing."),
server_url: Optional[str] = Form(
None,
description="(Adapted only for <vlm/hybrid>-http-client backend)openai compatible server url, e.g., http://127.0.0.1:30000",
),
return_md: bool = Form(True, description="Return markdown content in response"),
return_middle_json: bool = Form(
False, description="Return middle JSON in response"
),
return_model_output: bool = Form(
False, description="Return model output JSON in response"
),
return_content_list: bool = Form(
False, description="Return content list JSON in response"
),
return_images: bool = Form(
False, description="Return extracted images in response"
),
response_format_zip: bool = Form(
False, description="Return results as a ZIP file instead of JSON"
),
start_page_id: int = Form(
0, description="The starting page for PDF parsing, beginning from 0"
),
end_page_id: int = Form(
99999, description="The ending page for PDF parsing, beginning from 0"
),
):
# 获取命令行配置参数
config = getattr(app.state, "config", {})
@@ -171,6 +204,7 @@ async def parse_pdf(
# 创建唯一的输出目录
unique_dir = os.path.join(output_dir, str(uuid.uuid4()))
os.makedirs(unique_dir, exist_ok=True)
background_tasks.add_task(cleanup_file, unique_dir)
# 处理上传的PDF文件
pdf_file_names = []
@@ -196,20 +230,21 @@ async def parse_pdf(
except Exception as e:
return JSONResponse(
status_code=400,
content={"error": f"Failed to load file: {str(e)}"}
content={"error": f"Failed to load file: {str(e)}"},
)
else:
return JSONResponse(
status_code=400,
content={"error": f"Unsupported file type: {file_suffix}"}
content={"error": f"Unsupported file type: {file_suffix}"},
)
# 设置语言列表,确保与文件数量一致
actual_lang_list = lang_list
if len(actual_lang_list) != len(pdf_file_names):
# 如果语言列表长度不匹配,使用第一个语言或默认"ch"
actual_lang_list = [actual_lang_list[0] if actual_lang_list else "ch"] * len(pdf_file_names)
actual_lang_list = [
actual_lang_list[0] if actual_lang_list else "ch"
] * len(pdf_file_names)
# 调用异步处理函数
await aio_do_parse(
@@ -231,13 +266,15 @@ async def parse_pdf(
f_dump_content_list=return_content_list,
start_page_id=start_page_id,
end_page_id=end_page_id,
**config
**config,
)
# 根据 response_format_zip 决定返回类型
if response_format_zip:
zip_fd, zip_path = tempfile.mkstemp(suffix=".zip", prefix="mineru_results_")
os.close(zip_fd)
background_tasks.add_task(cleanup_file, zip_path)
with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED) as zf:
for pdf_name in pdf_file_names:
safe_pdf_name = sanitize_filename(pdf_name)
@@ -247,7 +284,13 @@ async def parse_pdf(
elif backend.startswith("vlm"):
parse_dir = os.path.join(unique_dir, pdf_name, "vlm")
elif backend.startswith("hybrid"):
parse_dir = os.path.join(unique_dir, pdf_name, f"hybrid_{parse_method}")
parse_dir = os.path.join(
unique_dir, pdf_name, f"hybrid_{parse_method}"
)
else:
# 未知 backend跳过此文件
logger.warning(f"Unknown backend type: {backend}, skipping {pdf_name}")
continue
if not os.path.exists(parse_dir):
continue
@@ -256,35 +299,63 @@ async def parse_pdf(
if return_md:
path = os.path.join(parse_dir, f"{pdf_name}.md")
if os.path.exists(path):
zf.write(path, arcname=os.path.join(safe_pdf_name, f"{safe_pdf_name}.md"))
zf.write(
path,
arcname=os.path.join(
safe_pdf_name, f"{safe_pdf_name}.md"
),
)
if return_middle_json:
path = os.path.join(parse_dir, f"{pdf_name}_middle.json")
if os.path.exists(path):
zf.write(path, arcname=os.path.join(safe_pdf_name, f"{safe_pdf_name}_middle.json"))
zf.write(
path,
arcname=os.path.join(
safe_pdf_name, f"{safe_pdf_name}_middle.json"
),
)
if return_model_output:
path = os.path.join(parse_dir, f"{pdf_name}_model.json")
if os.path.exists(path):
zf.write(path, arcname=os.path.join(safe_pdf_name, os.path.basename(path)))
zf.write(
path,
arcname=os.path.join(
safe_pdf_name, f"{safe_pdf_name}_model.json"
),
)
if return_content_list:
path = os.path.join(parse_dir, f"{pdf_name}_content_list.json")
if os.path.exists(path):
zf.write(path, arcname=os.path.join(safe_pdf_name, f"{safe_pdf_name}_content_list.json"))
zf.write(
path,
arcname=os.path.join(
safe_pdf_name, f"{safe_pdf_name}_content_list.json"
),
)
# 写入图片
if return_images:
images_dir = os.path.join(parse_dir, "images")
image_paths = glob.glob(os.path.join(glob.escape(images_dir), "*.jpg"))
image_paths = glob.glob(
os.path.join(glob.escape(images_dir), "*.jpg")
)
for image_path in image_paths:
zf.write(image_path, arcname=os.path.join(safe_pdf_name, "images", os.path.basename(image_path)))
zf.write(
image_path,
arcname=os.path.join(
safe_pdf_name,
"images",
os.path.basename(image_path),
),
)
return FileResponse(
path=zip_path,
media_type="application/zip",
filename="results.zip",
background=BackgroundTask(cleanup_file, zip_path)
)
else:
# 构建 JSON 结果
@@ -298,17 +369,31 @@ async def parse_pdf(
elif backend.startswith("vlm"):
parse_dir = os.path.join(unique_dir, pdf_name, "vlm")
elif backend.startswith("hybrid"):
parse_dir = os.path.join(unique_dir, pdf_name, f"hybrid_{parse_method}")
parse_dir = os.path.join(
unique_dir, pdf_name, f"hybrid_{parse_method}"
)
else:
# 未知 backend跳过此文件
logger.warning(f"Unknown backend type: {backend}, skipping {pdf_name}")
continue
if os.path.exists(parse_dir):
if return_md:
data["md_content"] = get_infer_result(".md", pdf_name, parse_dir)
data["md_content"] = get_infer_result(
".md", pdf_name, parse_dir
)
if return_middle_json:
data["middle_json"] = get_infer_result("_middle.json", pdf_name, parse_dir)
data["middle_json"] = get_infer_result(
"_middle.json", pdf_name, parse_dir
)
if return_model_output:
data["model_output"] = get_infer_result("_model.json", pdf_name, parse_dir)
data["model_output"] = get_infer_result(
"_model.json", pdf_name, parse_dir
)
if return_content_list:
data["content_list"] = get_infer_result("_content_list.json", pdf_name, parse_dir)
data["content_list"] = get_infer_result(
"_content_list.json", pdf_name, parse_dir
)
if return_images:
images_dir = os.path.join(parse_dir, "images")
safe_pattern = os.path.join(glob.escape(images_dir), "*.jpg")
@@ -325,24 +410,24 @@ async def parse_pdf(
content={
"backend": backend,
"version": __version__,
"results": result_dict
}
"results": result_dict,
},
)
except Exception as e:
logger.exception(e)
return JSONResponse(
status_code=500,
content={"error": f"Failed to process file: {str(e)}"}
status_code=500, content={"error": f"Failed to process file: {str(e)}"}
)
@click.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True))
@click.command(
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True)
)
@click.pass_context
@click.option('--host', default='127.0.0.1', help='Server host (default: 127.0.0.1)')
@click.option('--port', default=8000, type=int, help='Server port (default: 8000)')
@click.option('--reload', is_flag=True, help='Enable auto-reload (development mode)')
@click.option("--host", default="127.0.0.1", help="Server host (default: 127.0.0.1)")
@click.option("--port", default=8000, type=int, help="Server port (default: 8000)")
@click.option("--reload", is_flag=True, help="Enable auto-reload (development mode)")
def main(ctx, host, port, reload, **kwargs):
kwargs.update(arg_parse(ctx))
# 将配置参数存储到应用状态中
@@ -359,12 +444,7 @@ def main(ctx, host, port, reload, **kwargs):
print(f"Start MinerU FastAPI Service: http://{host}:{port}")
print(f"API documentation: http://{host}:{port}/docs")
uvicorn.run(
"mineru.cli.fast_api:app",
host=host,
port=port,
reload=reload
)
uvicorn.run("mineru.cli.fast_api:app", host=host, port=port, reload=reload)
if __name__ == "__main__":

View File

@@ -89,7 +89,11 @@ class FormulaRecognizer(BaseOCRV20):
return rec_formula
def batch_predict(
self, images_mfd_res: list, images: list, batch_size: int = 64
self,
images_mfd_res: list,
images: list,
batch_size: int = 64,
interline_enable: bool = True,
) -> list:
images_formula_list = []
mf_image_list = []
@@ -105,6 +109,8 @@ class FormulaRecognizer(BaseOCRV20):
for idx, (xyxy, conf, cla) in enumerate(
zip(mfd_res.boxes.xyxy, mfd_res.boxes.conf, mfd_res.boxes.cls)
):
if not interline_enable and cla.item() == 1:
continue # Skip interline regions if not enabled
xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy]
new_item = {
"category_id": 13 + int(cla.item()),

View File

@@ -1,8 +1,8 @@
import os
import sys
from mineru.backend.vlm.utils import set_default_gpu_memory_utilization, enable_custom_logits_processors
from mineru.utils.config_reader import get_device
from mineru.backend.vlm.utils import set_default_gpu_memory_utilization, enable_custom_logits_processors, \
mod_kwargs_by_device_type
from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
from vllm.entrypoints.cli.main import main as vllm_main
@@ -14,8 +14,6 @@ def main():
has_port_arg = False
has_gpu_memory_utilization_arg = False
has_logits_processors_arg = False
has_block_size_arg = False
has_compilation_config = False
model_path = None
model_arg_indices = []
@@ -27,10 +25,6 @@ def main():
has_gpu_memory_utilization_arg = True
if arg == "--logits-processors" or arg.startswith("--logits-processors="):
has_logits_processors_arg = True
if arg == "--block-size" or arg.startswith("--block-size="):
has_block_size_arg = True
if arg == "--compilation-config" or arg.startswith("--compilation-config="):
has_compilation_config = True
if arg == "--model":
if i + 1 < len(args):
model_path = args[i + 1]
@@ -56,17 +50,8 @@ def main():
model_path = auto_download_and_get_model_root_path("/", "vlm")
if (not has_logits_processors_arg) and custom_logits_processors:
args.extend(["--logits-processors", "mineru_vl_utils:MinerULogitsProcessor"])
"""
# musa vllm v1 引擎特殊配置
device = get_device()
if device.startswith("musa"):
import torch
if torch.musa.is_available():
if not has_block_size_arg:
args.extend(["--block-size", "32"])
if not has_compilation_config:
args.extend(["--compilation-config", '{"cudagraph_capture_sizes": [1,2,3,4,5,6,7,8,10,12,14,16,18,20,24,28,30], "simple_cuda_graph": true}'])
"""
args = mod_kwargs_by_device_type(args, vllm_mode="server")
# 重构参数,将模型路径作为位置参数
sys.argv = [sys.argv[0]] + ["serve", model_path] + args

View File

@@ -198,6 +198,14 @@ def model_init(model_name: str):
if hasattr(torch, 'npu') and torch.npu.is_available():
if torch.npu.is_bf16_supported():
bf_16_support = True
elif device_name.startswith("mlu"):
if hasattr(torch, 'mlu') and torch.mlu.is_available():
if torch.mlu.is_bf16_supported():
bf_16_support = True
elif device_name.startswith("sdaa"):
if hasattr(torch, 'sdaa') and torch.sdaa.is_available():
if torch.sdaa.is_bf16_supported():
bf_16_support = True
if model_name == 'layoutreader':
# 检测modelscope的缓存目录是否存在

View File

@@ -94,7 +94,16 @@ def get_device():
if torch.musa.is_available():
return "musa"
except Exception as e:
pass
try:
if torch.mlu.is_available():
return "mlu"
except Exception as e:
try:
if torch.sdaa.is_available():
return "sdaa"
except Exception as e:
pass
return "cpu"

View File

@@ -429,6 +429,12 @@ def clean_memory(device='cuda'):
elif str(device).startswith("musa"):
if torch.musa.is_available():
torch.musa.empty_cache()
elif str(device).startswith("mlu"):
if torch.mlu.is_available():
torch.mlu.empty_cache()
elif str(device).startswith("sdaa"):
if torch.sdaa.is_available():
torch.sdaa.empty_cache()
gc.collect()
@@ -470,5 +476,11 @@ def get_vram(device) -> int:
elif str(device).startswith("musa"):
if torch.musa.is_available():
total_memory = round(torch.musa.get_device_properties(device).total_memory / (1024 ** 3)) # 转为 GB
elif str(device).startswith("mlu"):
if torch.mlu.is_available():
total_memory = round(torch.mlu.get_device_properties(device).total_memory / (1024 ** 3)) # 转为 GB
elif str(device).startswith("sdaa"):
if torch.sdaa.is_available():
total_memory = round(torch.sdaa.get_device_properties(device).total_memory / (1024 ** 3)) # 转为 GB
return total_memory

View File

@@ -11,6 +11,11 @@ def get_load_images_timeout() -> int:
return get_value_from_string(env_value, 300)
def get_load_images_threads() -> int:
env_value = os.getenv('MINERU_PDF_RENDER_THREADS', None)
return get_value_from_string(env_value, 4)
def get_value_from_string(env_value: str, default_value: int) -> int:
if env_value is not None:
try:

View File

@@ -1,5 +1,7 @@
# Copyright (c) Opendatalab. All rights reserved.
import os
import signal
import time
from io import BytesIO
import numpy as np
@@ -9,13 +11,13 @@ from PIL import Image, ImageOps
from mineru.data.data_reader_writer import FileBasedDataWriter
from mineru.utils.check_sys_env import is_windows_environment
from mineru.utils.os_env_config import get_load_images_timeout
from mineru.utils.os_env_config import get_load_images_timeout, get_load_images_threads
from mineru.utils.pdf_reader import image_to_b64str, image_to_bytes, page_to_image
from mineru.utils.enum_class import ImageType
from mineru.utils.hash_utils import str_sha256
from mineru.utils.pdf_page_id import get_end_page_id
from concurrent.futures import ProcessPoolExecutor, TimeoutError as FuturesTimeoutError
from concurrent.futures import ProcessPoolExecutor, wait, ALL_COMPLETED
def pdf_page_to_image(page: pdfium.PdfPage, dpi=200, image_type=ImageType.PIL) -> dict:
@@ -57,7 +59,7 @@ def load_images_from_pdf(
end_page_id=None,
image_type=ImageType.PIL,
timeout=None,
threads=4,
threads=None,
):
"""带超时控制的 PDF 转图片函数,支持多进程加速
@@ -67,8 +69,8 @@ def load_images_from_pdf(
start_page_id (int, optional): 起始页码. Defaults to 0.
end_page_id (int | None, optional): 结束页码. Defaults to None.
image_type (ImageType, optional): 图片类型. Defaults to ImageType.PIL.
timeout (int | None, optional): 超时时间(秒)。如果为 None则从环境变量 MINERU_PDF_LOAD_IMAGES_TIMEOUT 读取,若未设置则默认为 300 秒。
threads (int): 进程数,默认 4
timeout (int | None, optional): 超时时间(秒)。如果为 None则从环境变量 MINERU_PDF_RENDER_TIMEOUT 读取,若未设置则默认为 300 秒。
threads (int): 进程数, 如果为 None则从环境变量 MINERU_PDF_RENDER_THREADS 读取,若未设置则默认 4.
Raises:
TimeoutError: 当转换超时时抛出
@@ -86,6 +88,9 @@ def load_images_from_pdf(
else:
if timeout is None:
timeout = get_load_images_timeout()
if threads is None:
threads = get_load_images_threads()
end_page_id = get_end_page_id(end_page_id, len(pdf_doc))
# 计算总页数
@@ -108,11 +113,13 @@ def load_images_from_pdf(
page_ranges.append((range_start, range_end))
# logger.debug(f"PDF to images using {actual_threads} processes, page ranges: {page_ranges}")
logger.debug(f"PDF to images using {actual_threads} processes, page ranges: {page_ranges}")
with ProcessPoolExecutor(max_workers=actual_threads) as executor:
executor = ProcessPoolExecutor(max_workers=actual_threads)
try:
# 提交所有任务
futures = []
future_to_range = {}
for range_start, range_end in page_ranges:
future = executor.submit(
_load_images_from_pdf_worker,
@@ -122,27 +129,68 @@ def load_images_from_pdf(
range_end,
image_type,
)
futures.append((range_start, future))
futures.append(future)
future_to_range[future] = range_start
try:
# 收集结果并按页码排序
all_results = []
for range_start, future in futures:
images_list = future.result(timeout=timeout)
all_results.append((range_start, images_list))
# 使用 wait() 设置单一全局超时
done, not_done = wait(futures, timeout=timeout, return_when=ALL_COMPLETED)
# 按起始页码排序并合并结果
all_results.sort(key=lambda x: x[0])
images_list = []
for _, imgs in all_results:
images_list.extend(imgs)
return images_list, pdf_doc
except FuturesTimeoutError:
# 检查是否有未完成的任务(超时情况)
if not_done:
# 超时:强制终止所有子进程
_terminate_executor_processes(executor)
pdf_doc.close()
executor.shutdown(wait=False, cancel_futures=True)
raise TimeoutError(f"PDF to images conversion timeout after {timeout}s")
# 所有任务完成,收集结果
all_results = []
for future in futures:
range_start = future_to_range[future]
# 这里不需要 timeout因为任务已完成
images_list = future.result()
all_results.append((range_start, images_list))
# 按起始页码排序并合并结果
all_results.sort(key=lambda x: x[0])
images_list = []
for _, imgs in all_results:
images_list.extend(imgs)
return images_list, pdf_doc
except Exception as e:
# 发生任何异常时,确保清理子进程
_terminate_executor_processes(executor)
pdf_doc.close()
if isinstance(e, TimeoutError):
raise
raise
finally:
executor.shutdown(wait=False, cancel_futures=True)
def _terminate_executor_processes(executor):
"""强制终止 ProcessPoolExecutor 中的所有子进程"""
if hasattr(executor, '_processes'):
for pid, process in executor._processes.items():
if process.is_alive():
try:
# 先发送 SIGTERM 允许优雅退出
os.kill(pid, signal.SIGTERM)
except (ProcessLookupError, OSError):
pass
# 给子进程一点时间响应 SIGTERM
time.sleep(0.1)
# 对仍然存活的进程发送 SIGKILL 强制终止
for pid, process in executor._processes.items():
if process.is_alive():
try:
os.kill(pid, signal.SIGKILL)
except (ProcessLookupError, OSError):
pass
def load_images_from_pdf_core(
pdf_bytes: bytes,

View File

@@ -1 +1 @@
__version__ = "2.7.3"
__version__ = "2.7.6"