Compare commits

...

56 Commits

Author SHA1 Message Date
Xiaomeng Zhao
57ba7fab01 Merge pull request #3155 from opendatalab/dev
Dev
2025-07-23 15:48:37 +08:00
Xiaomeng Zhao
c39606c188 Merge pull request #3154 from myhloli/dev
chore: update version number to 2.1.4 in README files
2025-07-23 15:48:08 +08:00
myhloli
318bdf0d7c chore: update version number to 2.1.4 in README files 2025-07-23 15:47:16 +08:00
Xiaomeng Zhao
3da26d1c6b Merge pull request #3150 from myhloli/dev
fix: remove unused import in pipeline_magic_model.py
2025-07-23 15:33:46 +08:00
myhloli
832f37271e fix: remove unused import in pipeline_magic_model.py 2025-07-23 15:20:35 +08:00
Xiaomeng Zhao
a5583ff4fb fix: improve candidate sorting logic in vlm_magic_model.py
fix: improve candidate sorting logic in vlm_magic_model.py
2025-07-23 15:14:52 +08:00
myhloli
1906643c67 refactor: streamline bbox processing and enhance category tying logic in magic_model_utils.py 2025-07-23 15:03:56 +08:00
myhloli
ee6d557fcc fix: correct comment formatting for batch_size logic in Unimernet.py 2025-07-23 11:21:17 +08:00
myhloli
a636b34324 fix: ensure batch_size is at least 1 when sorted_images is empty in Unimernet.py 2025-07-23 11:20:39 +08:00
Xiaomeng Zhao
9e5cb12967 Update mineru/utils/magic_model_utils.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-23 11:05:15 +08:00
myhloli
9bf8be9861 feat: add utility functions for bounding box processing in magic_model_utils.py 2025-07-23 10:59:14 +08:00
myhloli
4f612cbc1d fix: improve candidate sorting logic in vlm_magic_model.py 2025-07-23 09:58:26 +08:00
Xiaomeng Zhao
beacccb614 Merge pull request #3145 from opendatalab/master
master->dev
2025-07-23 00:30:20 +08:00
myhloli
dcd18c67a8 Update version.py with new version 2025-07-22 16:24:10 +00:00
Xiaomeng Zhao
308f1b0424 Merge pull request #3144 from opendatalab/release-2.1.3
Release 2.1.3
2025-07-23 00:21:28 +08:00
Xiaomeng Zhao
fae9fb9820 Update mineru/model/mfr/unimernet/Unimernet.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-23 00:20:08 +08:00
Xiaomeng Zhao
58268df3ce Merge pull request #3143 from myhloli/dev
docs: update changelog for version 2.1.3 with bug fixes and improvements
2025-07-23 00:12:51 +08:00
myhloli
f1ae0afcd6 docs: update changelog for version 2.1.3 with bug fixes and improvements 2025-07-23 00:11:52 +08:00
myhloli
139fd3ca65 Update version.py with new version 2025-07-22 14:38:29 +00:00
Xiaomeng Zhao
07f6ba7299 Merge pull request #3139 from opendatalab/release-2.1.2
Release 2.1.2
2025-07-22 22:27:57 +08:00
Xiaomeng Zhao
973443d6d4 Merge branch 'master' into release-2.1.2 2025-07-22 22:27:37 +08:00
Xiaomeng Zhao
7714e4c41a Merge pull request #3141 from opendatalab/dev
Dev
2025-07-22 22:23:57 +08:00
Xiaomeng Zhao
4618ff6fea Merge pull request #3140 from myhloli/dev
docs: update changelog for version 2.1.2 with bug fixes
2025-07-22 22:23:24 +08:00
myhloli
e4639e1321 docs: update changelog for version 2.1.2 with bug fixes 2025-07-22 22:22:12 +08:00
Xiaomeng Zhao
1c2b233f5d Merge pull request #3138 from myhloli/dev
fix: update GITHUB_TOKEN usage in CLA action and change branch to 'cla'
2025-07-22 21:15:38 +08:00
Xiaomeng Zhao
38af25776f Merge branch 'dev' into dev 2025-07-22 21:15:17 +08:00
myhloli
fffb68797c fix: update GITHUB_TOKEN usage in CLA action and change branch to 'cla' 2025-07-22 21:12:42 +08:00
Xiaomeng Zhao
7e8cb108f9 Merge pull request #3129 from huazZeng/master
improve the caption match algorithm
2025-07-22 21:08:06 +08:00
Xiaomeng Zhao
2bf2337e76 @myhloli has signed the CLA in opendatalab/MinerU#3129 2025-07-22 21:06:30 +08:00
Xiaomeng Zhao
300b691713 Update mineru/backend/pipeline/pipeline_magic_model.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-22 21:06:21 +08:00
Xiaomeng Zhao
b25f42a2b3 Merge pull request #3137 from myhloli/dev
fix: adjust batch sizes and improve performance settings in various modules
2025-07-22 21:04:41 +08:00
myhloli
2c0d971e24 fix: update CLA Assistant action version and adjust token usage 2025-07-22 21:03:22 +08:00
myhloli
a5e4179139 fix: adjust batch sizes and improve performance settings in various modules 2025-07-22 20:57:09 +08:00
huazZeng
725fdbff1c improve the caption match algorithm 2025-07-22 10:58:10 +08:00
Xiaomeng Zhao
86bef485b5 Merge pull request #3125 from opendatalab/dev
Dev
2025-07-21 17:20:43 +08:00
Xiaomeng Zhao
a7ce4fca35 Merge branch 'master' into dev 2025-07-21 17:20:14 +08:00
Xiaomeng Zhao
f7f351892a Merge pull request #3124 from myhloli/dev
Dev
2025-07-21 17:12:48 +08:00
Xiaomeng Zhao
b77f8fdc39 Merge branch 'dev' into dev 2025-07-21 17:12:23 +08:00
myhloli
21c4b3fe93 Merge remote-tracking branch 'origin/dev' into dev 2025-07-21 17:02:56 +08:00
myhloli
f522c67b71 fix: update project URLs in pyproject.toml for consistency and clarity 2025-07-21 17:02:37 +08:00
Xiaomeng Zhao
989fb14965 Merge pull request #3105 from Sidney233/dev
test: remove huigui.yml
2025-07-18 16:19:58 +08:00
Sidney233
2d23d70e7b Merge remote-tracking branch 'origin/dev' into dev 2025-07-18 15:15:21 +08:00
Sidney233
2683991e32 test: remove huigui.yml 2025-07-18 15:14:16 +08:00
Xiaomeng Zhao
3facd62832 Merge pull request #3100 from opendatalab/dev
docs: update README files with new documentation links for usage instructions
2025-07-18 00:27:52 +08:00
Xiaomeng Zhao
40c09296ed Merge pull request #3099 from myhloli/dev
docs: update README files with new documentation links for usage instructions
2025-07-18 00:26:42 +08:00
myhloli
bd6c58f31d docs: update README files with new documentation links for usage instructions 2025-07-18 00:24:57 +08:00
Xiaomeng Zhao
1842ab520e Merge pull request #3096 from opendatalab/dev
fix: improve Dockerfile by adding fontconfig and purging pip cache
2025-07-17 18:33:05 +08:00
Xiaomeng Zhao
886b2dbd20 Merge pull request #3095 from myhloli/dev
fix: improve Dockerfile by adding fontconfig and purging pip cache
2025-07-17 18:29:36 +08:00
myhloli
707ef452f5 fix: update Dockerfile to install fonts without no-install-recommends option 2025-07-17 18:28:05 +08:00
myhloli
306c9b7a9c fix: improve Dockerfile by adding fontconfig and purging pip cache 2025-07-17 18:19:05 +08:00
Xiaomeng Zhao
c86a07d564 Merge pull request #3093 from myhloli/dev
fix: update docker deployment instructions for service profiles
2025-07-17 17:22:15 +08:00
myhloli
1895f67161 fix: update docker deployment instructions for service profiles 2025-07-17 17:20:42 +08:00
myhloli
c7dac10bfd feat: add new files for package structure and entry points 2025-07-17 09:52:47 +08:00
Xiaomeng Zhao
107308ac66 Merge pull request #3077 from opendatalab/master
master->dev
2025-07-16 18:18:14 +08:00
myhloli
fd49b9f7bf Update version.py with new version 2025-07-16 10:17:30 +00:00
Xiaomeng Zhao
0bdbdffc99 Merge pull request #3076 from opendatalab/release-2.1.1
Release 2.1.1
2025-07-16 18:12:41 +08:00
20 changed files with 326 additions and 436 deletions

View File

@@ -18,9 +18,9 @@ jobs:
steps:
- name: "CLA Assistant"
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
uses: contributor-assistant/github-action@v2.5.0
uses: contributor-assistant/github-action@v2.6.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# the below token should have repo scope and must be manually added by you in the repository's secret
# This token is required only if you have configured to store the signatures in a remote repository/organization
PERSONAL_ACCESS_TOKEN: ${{ secrets.RELEASE_TOKEN }}
@@ -28,7 +28,7 @@ jobs:
path-to-signatures: 'signatures/version1/cla.json'
path-to-document: 'https://github.com/opendatalab/MinerU/blob/master/MinerU_CLA.md' # e.g. a CLA or a DCO document
# branch should not be protected
branch: 'master'
branch: 'cla'
allowlist: myhloli,dt-yy,Focusshang,renpengli01,icecraft,drunkpig,wangbinDL,qiangqiang199,GDDGCZ518,papayalove,conghui,quyuan,LollipopsAndWine,Sidney233
# the followings are the optional inputs - If the optional inputs are not given, then default values will be taken

View File

@@ -1,16 +1,15 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: mineru
name: mineru-cli-test
on:
pull_request:
push:
branches:
- "master"
- "dev"
paths-ignore:
- "cmds/**"
- "**.md"
workflow_dispatch:
jobs:
cli-test:
if: github.repository == 'opendatalab/MinerU'
@@ -20,31 +19,30 @@ jobs:
fail-fast: true
steps:
- name: PDF cli
uses: actions/checkout@v4
with:
ref: dev
fetch-depth: 2
- name: PDF cli
uses: actions/checkout@v4
with:
ref: dev
fetch-depth: 2
- name: install uv
uses: astral-sh/setup-uv@v5
- name: install&test
run: |
uv --version
uv venv --python 3.12
source .venv/bin/activate
uv pip install .[test]
cd $GITHUB_WORKSPACE && python tests/clean_coverage.py
cd $GITHUB_WORKSPACE && coverage run
cd $GITHUB_WORKSPACE && python tests/get_coverage.py
- name: install uv
uses: astral-sh/setup-uv@v5
- name: install&test
run: |
uv --version
uv venv --python 3.12
source .venv/bin/activate
uv pip install .[test]
cd $GITHUB_WORKSPACE && python tests/clean_coverage.py
cd $GITHUB_WORKSPACE && coverage run
cd $GITHUB_WORKSPACE && python tests/get_coverage.py
notify_to_feishu:
if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure')}}
needs: cli-test
runs-on: ubuntu-latest
steps:
- name: notify
run: |
curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"'${{ github.repository }}' GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"}]]}}}}' ${{ secrets.FEISHU_WEBHOOK_URL }}
- name: notify
run: |
curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"'${{ github.repository }}' GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"}]]}}}}' ${{ secrets.FEISHU_WEBHOOK_URL }}

View File

@@ -1,48 +0,0 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: mineru
on:
push:
branches:
- "master"
- "dev"
paths-ignore:
- "cmds/**"
- "**.md"
jobs:
cli-test:
if: github.repository == 'opendatalab/MinerU'
runs-on: ubuntu-latest
timeout-minutes: 240
strategy:
fail-fast: true
steps:
- name: PDF cli
uses: actions/checkout@v4
with:
ref: dev
fetch-depth: 2
- name: install uv
uses: astral-sh/setup-uv@v5
- name: install&test
run: |
uv --version
uv venv --python 3.12
source .venv/bin/activate
uv pip install .[test]
cd $GITHUB_WORKSPACE && python tests/clean_coverage.py
cd $GITHUB_WORKSPACE && coverage run
cd $GITHUB_WORKSPACE && python tests/get_coverage.py
notify_to_feishu:
if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure')}}
needs: cli-test
runs-on: ubuntu-latest
steps:
- name: notify
run: |
curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"'${{ github.repository }}' GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"}]]}}}}' ${{ secrets.FEISHU_WEBHOOK_URL }}

View File

@@ -43,7 +43,10 @@
</div>
# Changelog
- 2025/07/23 2.1.4 Released
- Bug Fixes
- Fixed the issue of excessive memory consumption during the `MFR` step in the `pipeline` backend under certain scenarios #2771
- Fixed the inaccurate matching between `image`/`table` and `caption`/`footnote` under certain conditions #3129
- 2025/07/16 2.1.1 Released
- Bug fixes
- Fixed text block content loss issue that could occur in certain `pipeline` scenarios #3005
@@ -59,10 +62,10 @@
- Greatly enhanced post-processing speed when the `pipeline` backend handles batch processing of documents with fewer pages (<10 pages).
- Layout analysis speed of the `pipeline` backend has been increased by approximately 20%.
- **Experience Enhancements:**
- Built-in ready-to-use `fastapi service` and `gradio webui`. For detailed usage instructions, please refer to [Documentation](#3-api-calls-or-visual-invocation).
- Built-in ready-to-use `fastapi service` and `gradio webui`. For detailed usage instructions, please refer to [Documentation](https://opendatalab.github.io/MinerU/usage/quick_usage/#advanced-usage-via-api-webui-sglang-clientserver).
- Adapted to `sglang` version `0.4.8`, significantly reducing the GPU memory requirements for the `vlm-sglang` backend. It can now run on graphics cards with as little as `8GB GPU memory` (Turing architecture or newer).
- Added transparent parameter passing for all commands related to `sglang`, allowing the `sglang-engine` backend to receive all `sglang` parameters consistently with the `sglang-server`.
- Supports feature extensions based on configuration files, including `custom formula delimiters`, `enabling heading classification`, and `customizing local model directories`. For detailed usage instructions, please refer to [Documentation](#4-extending-mineru-functionality-through-configuration-files).
- Supports feature extensions based on configuration files, including `custom formula delimiters`, `enabling heading classification`, and `customizing local model directories`. For detailed usage instructions, please refer to [Documentation](https://opendatalab.github.io/MinerU/usage/quick_usage/#extending-mineru-functionality-with-configuration-files).
- **New Features:**
- Updated the `pipeline` backend with the PP-OCRv5 multilingual text recognition model, supporting text recognition in 37 languages such as French, Spanish, Portuguese, Russian, and Korean, with an average accuracy improvement of over 30%. [Details](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html)
- Introduced limited support for vertical text layout in the `pipeline` backend.

View File

@@ -43,6 +43,10 @@
</div>
# 更新记录
- 2025/07/23 2.1.4发布
- bug修复
- 修复`pipeline`后端中`MFR`步骤在某些情况下显存消耗过大的问题 #2771
- 修复某些情况下`image`/`table``caption`/`footnote`匹配不准确的问题 #3129
- 2025/07/16 2.1.1发布
- bug修复
- 修复`pipeline`在某些情况可能发生的文本块内容丢失问题 #3005
@@ -56,12 +60,12 @@
- 性能优化:
- 大幅提升某些特定分辨率长边2000像素左右文档的预处理速度
- 大幅提升`pipeline`后端批量处理大量页数较少(<10文档时的后处理速度
- `pipline`后端的layout分析速度提升约20%
- `pipeline`后端的layout分析速度提升约20%
- 体验优化:
- 内置开箱即用的`fastapi服务``gradio webui`,详细使用方法请参考[文档](#3-api-调用-或-可视化调用)
- 内置开箱即用的`fastapi服务``gradio webui`,详细使用方法请参考[文档](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuisglang-clientserver)
- `sglang`适配`0.4.8`版本,大幅降低`vlm-sglang`后端的显存要求,最低可在`8G显存`(Turing及以后架构)的显卡上运行
- 对所有命令增加`sglang`的参数透传,使得`sglang-engine`后端可以与`sglang-server`一致,接收`sglang`的所有参数
- 支持基于配置文件的功能扩展,包含`自定义公式标识符``开启标题分级功能``自定义本地模型目录`,详细使用方法请参考[文档](#4-基于配置文件扩展-mineru-功能)
- 支持基于配置文件的功能扩展,包含`自定义公式标识符``开启标题分级功能``自定义本地模型目录`,详细使用方法请参考[文档](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#mineru_1)
- 新特性:
- `pipeline`后端更新 PP-OCRv5 多语种文本识别模型,支持法语、西班牙语、葡萄牙语、俄语、韩语等 37 种语言的文字识别平均精度涨幅超30%。[详情](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html)
- `pipeline`后端增加对竖排文本的有限支持

View File

@@ -3,14 +3,18 @@ FROM lmsysorg/sglang:v0.4.8.post1-cu126
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y fonts-noto-core fonts-noto-cjk && \
apt-get install -y libgl1 && \
apt-get clean && \
apt-get install -y \
fonts-noto-core \
fonts-noto-cjk \
fontconfig \
libgl1 && \
fc-cache -fv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install mineru latest
RUN python3 -m pip install -U 'mineru[core]' -i https://mirrors.aliyun.com/pypi/simple --break-system-packages
RUN python3 -m pip install -U 'mineru[core]' -i https://mirrors.aliyun.com/pypi/simple --break-system-packages && \
python3 -m pip cache purge
# Download models and update the configuration file
RUN /bin/bash -c "mineru-models-download -s modelscope -m all"

View File

@@ -1,16 +1,20 @@
# Use the official sglang image
FROM lmsysorg/sglang:v0.4.8.post1-cu126
# Install libgl for opencv support
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y fonts-noto-core fonts-noto-cjk && \
apt-get install -y libgl1 && \
apt-get clean && \
apt-get install -y \
fonts-noto-core \
fonts-noto-cjk \
fontconfig \
libgl1 && \
fc-cache -fv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install mineru latest
RUN python3 -m pip install -U 'mineru[core]' --break-system-packages
RUN python3 -m pip install -U 'mineru[core]' --break-system-packages && \
python3 -m pip cache purge
# Download models and update the configuration file
RUN /bin/bash -c "mineru-models-download -s huggingface -m all"

View File

@@ -26,7 +26,7 @@ MinerU's Docker uses `lmsysorg/sglang` as the base image, so it includes the `sg
>
> If your device doesn't meet the above requirements, you can still use other features of MinerU, but cannot use `sglang` to accelerate VLM model inference, meaning you cannot use the `vlm-sglang-engine` backend or start the `vlm-sglang-server` service.
## Start Docker Container:
## Start Docker Container
```bash
docker run --gpus all \
@@ -60,7 +60,7 @@ wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/compose.yaml
### Start sglang-server service
connect to `sglang-server` via `vlm-sglang-client` backend
```bash
docker compose -f compose.yaml --profile mineru-sglang-server up -d
docker compose -f compose.yaml --profile sglang-server up -d
```
>[!TIP]
>In another terminal, connect to sglang server via sglang client (only requires CPU and network, no sglang environment needed)
@@ -72,7 +72,7 @@ connect to `sglang-server` via `vlm-sglang-client` backend
### Start Web API service
```bash
docker compose -f compose.yaml --profile mineru-api up -d
docker compose -f compose.yaml --profile api up -d
```
>[!TIP]
>Access `http://<server_ip>:8000/docs` in your browser to view the API documentation.
@@ -81,7 +81,7 @@ connect to `sglang-server` via `vlm-sglang-client` backend
### Start Gradio WebUI service
```bash
docker compose -f compose.yaml --profile mineru-gradio up -d
docker compose -f compose.yaml --profile gradio up -d
```
>[!TIP]
>

View File

@@ -25,7 +25,7 @@ Mineru的docker使用了`lmsysorg/sglang`作为基础镜像因此在docker中
>
> 如果您的设备不满足上述条件您仍然可以使用MinerU的其他功能但无法使用`sglang`加速VLM模型推理即无法使用`vlm-sglang-engine`后端和启动`vlm-sglang-server`服务。
## 启动 Docker 容器
## 启动 Docker 容器
```bash
docker run --gpus all \
@@ -58,7 +58,7 @@ wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/compose.yaml
### 启动 sglang-server 服务
并通过`vlm-sglang-client`后端连接`sglang-server`
```bash
docker compose -f compose.yaml --profile mineru-sglang-server up -d
docker compose -f compose.yaml --profile sglang-server up -d
```
>[!TIP]
>在另一个终端中通过sglang client连接sglang server只需cpu与网络不需要sglang环境
@@ -70,7 +70,7 @@ wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/compose.yaml
### 启动 Web API 服务
```bash
docker compose -f compose.yaml --profile mineru-api up -d
docker compose -f compose.yaml --profile api up -d
```
>[!TIP]
>在浏览器中访问 `http://<server_ip>:8000/docs` 查看API文档。
@@ -79,7 +79,7 @@ wget https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/docker/compose.yaml
### 启动 Gradio WebUI 服务
```bash
docker compose -f compose.yaml --profile mineru-gradio up -d
docker compose -f compose.yaml --profile gradio up -d
```
>[!TIP]
>

View File

@@ -12,6 +12,7 @@ from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, O
YOLO_LAYOUT_BASE_BATCH_SIZE = 8
MFD_BASE_BATCH_SIZE = 1
MFR_BASE_BATCH_SIZE = 16
OCR_DET_BASE_BATCH_SIZE = 16
class BatchAnalyze:
@@ -170,9 +171,9 @@ class BatchAnalyze:
batch_images.append(padded_img)
# 批处理检测
batch_size = min(len(batch_images), self.batch_ratio * 16) # 增加批处理大小
# logger.debug(f"OCR-det batch: {batch_size} images, target size: {target_h}x{target_w}")
batch_results = ocr_model.text_detector.batch_predict(batch_images, batch_size)
det_batch_size = min(len(batch_images), self.batch_ratio * OCR_DET_BASE_BATCH_SIZE) # 增加批处理大小
# logger.debug(f"OCR-det batch: {det_batch_size} images, target size: {target_h}x{target_w}")
batch_results = ocr_model.text_detector.batch_predict(batch_images, det_batch_size)
# 处理批处理结果
for i, (crop_info, (dt_boxes, elapse)) in enumerate(zip(group_crops, batch_results)):

View File

@@ -74,10 +74,10 @@ def doc_analyze(
table_enable=True,
):
"""
适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能可能会增加显存使用量
可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置默认值为128
适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能更大的 MIN_BATCH_INFERENCE_SIZE会消耗更多内存
可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置默认值为384
"""
min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 128))
min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 384))
# 收集所有页面信息
all_pages_info = [] # 存储(dataset_index, page_index, img, ocr, lang, width, height)

View File

@@ -1,5 +1,6 @@
from mineru.utils.boxbase import bbox_relative_pos, calculate_iou, bbox_distance, is_in, get_minbox_if_overlap_by_ratio
from mineru.utils.boxbase import bbox_relative_pos, calculate_iou, bbox_distance, get_minbox_if_overlap_by_ratio
from mineru.utils.enum_class import CategoryId, ContentType
from mineru.utils.magic_model_utils import tie_up_category_by_distance_v3, reduct_overlap
class MagicModel:
@@ -208,168 +209,39 @@ class MagicModel:
return bbox_distance(bbox1, bbox2)
def __reduct_overlap(self, bboxes):
N = len(bboxes)
keep = [True] * N
for i in range(N):
for j in range(N):
if i == j:
continue
if is_in(bboxes[i]['bbox'], bboxes[j]['bbox']):
keep[i] = False
return [bboxes[i] for i in range(N) if keep[i]]
def __tie_up_category_by_distance_v3(
self,
subject_category_id: int,
object_category_id: int,
):
subjects = self.__reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == subject_category_id,
self.__page_model_info['layout_dets'],
),
)
)
)
objects = self.__reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == object_category_id,
self.__page_model_info['layout_dets'],
),
)
)
)
ret = []
N, M = len(subjects), len(objects)
subjects.sort(key=lambda x: x['bbox'][0] ** 2 + x['bbox'][1] ** 2)
objects.sort(key=lambda x: x['bbox'][0] ** 2 + x['bbox'][1] ** 2)
OBJ_IDX_OFFSET = 10000
SUB_BIT_KIND, OBJ_BIT_KIND = 0, 1
all_boxes_with_idx = [(i, SUB_BIT_KIND, sub['bbox'][0], sub['bbox'][1]) for i, sub in enumerate(subjects)] + [(i + OBJ_IDX_OFFSET , OBJ_BIT_KIND, obj['bbox'][0], obj['bbox'][1]) for i, obj in enumerate(objects)]
seen_idx = set()
seen_sub_idx = set()
while N > len(seen_sub_idx):
candidates = []
for idx, kind, x0, y0 in all_boxes_with_idx:
if idx in seen_idx:
continue
candidates.append((idx, kind, x0, y0))
if len(candidates) == 0:
break
left_x = min([v[2] for v in candidates])
top_y = min([v[3] for v in candidates])
candidates.sort(key=lambda x: (x[2]-left_x) ** 2 + (x[3] - top_y) ** 2)
fst_idx, fst_kind, left_x, top_y = candidates[0]
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y)**2)
nxt = None
for i in range(1, len(candidates)):
if candidates[i][1] ^ fst_kind == 1:
nxt = candidates[i]
break
if nxt is None:
break
if fst_kind == SUB_BIT_KIND:
sub_idx, obj_idx = fst_idx, nxt[0] - OBJ_IDX_OFFSET
else:
sub_idx, obj_idx = nxt[0], fst_idx - OBJ_IDX_OFFSET
pair_dis = bbox_distance(subjects[sub_idx]['bbox'], objects[obj_idx]['bbox'])
nearest_dis = float('inf')
for i in range(N):
if i in seen_idx or i == sub_idx:continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]['bbox'], objects[obj_idx]['bbox']))
if pair_dis >= 3*nearest_dis:
seen_idx.add(sub_idx)
continue
seen_idx.add(sub_idx)
seen_idx.add(obj_idx + OBJ_IDX_OFFSET)
seen_sub_idx.add(sub_idx)
ret.append(
{
'sub_bbox': {
'bbox': subjects[sub_idx]['bbox'],
'score': subjects[sub_idx]['score'],
},
'obj_bboxes': [
{'score': objects[obj_idx]['score'], 'bbox': objects[obj_idx]['bbox']}
],
'sub_idx': sub_idx,
}
)
for i in range(len(objects)):
j = i + OBJ_IDX_OFFSET
if j in seen_idx:
continue
seen_idx.add(j)
nearest_dis, nearest_sub_idx = float('inf'), -1
for k in range(len(subjects)):
dis = bbox_distance(objects[i]['bbox'], subjects[k]['bbox'])
if dis < nearest_dis:
nearest_dis = dis
nearest_sub_idx = k
for k in range(len(subjects)):
if k != nearest_sub_idx: continue
if k in seen_sub_idx:
for kk in range(len(ret)):
if ret[kk]['sub_idx'] == k:
ret[kk]['obj_bboxes'].append({'score': objects[i]['score'], 'bbox': objects[i]['bbox']})
break
else:
ret.append(
{
'sub_bbox': {
'bbox': subjects[k]['bbox'],
'score': subjects[k]['score'],
},
'obj_bboxes': [
{'score': objects[i]['score'], 'bbox': objects[i]['bbox']}
],
'sub_idx': k,
}
def __tie_up_category_by_distance_v3(self, subject_category_id, object_category_id):
# 定义获取主体和客体对象的函数
def get_subjects():
return reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == subject_category_id,
self.__page_model_info['layout_dets'],
),
)
seen_sub_idx.add(k)
seen_idx.add(k)
for i in range(len(subjects)):
if i in seen_sub_idx:
continue
ret.append(
{
'sub_bbox': {
'bbox': subjects[i]['bbox'],
'score': subjects[i]['score'],
},
'obj_bboxes': [],
'sub_idx': i,
}
)
)
def get_objects():
return reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == object_category_id,
self.__page_model_info['layout_dets'],
),
)
)
)
return ret
# 调用通用方法
return tie_up_category_by_distance_v3(
get_subjects,
get_objects
)
def get_imgs(self):
with_captions = self.__tie_up_category_by_distance_v3(

View File

@@ -3,10 +3,10 @@ from typing import Literal
from loguru import logger
from mineru.utils.boxbase import bbox_distance, is_in
from mineru.utils.enum_class import ContentType, BlockType, SplitFlag
from mineru.backend.vlm.vlm_middle_json_mkcontent import merge_para_with_text
from mineru.utils.format_utils import convert_otsl_to_html
from mineru.utils.magic_model_utils import reduct_overlap, tie_up_category_by_distance_v3
class MagicModel:
@@ -251,175 +251,39 @@ def latex_fix(latex):
return latex
def __reduct_overlap(bboxes):
N = len(bboxes)
keep = [True] * N
for i in range(N):
for j in range(N):
if i == j:
continue
if is_in(bboxes[i]["bbox"], bboxes[j]["bbox"]):
keep[i] = False
return [bboxes[i] for i in range(N) if keep[i]]
def __tie_up_category_by_distance_v3(
blocks: list,
subject_block_type: str,
object_block_type: str,
):
subjects = __reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == subject_block_type,
blocks,
),
)
)
)
objects = __reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == object_block_type,
blocks,
),
)
)
)
ret = []
N, M = len(subjects), len(objects)
subjects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
objects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
OBJ_IDX_OFFSET = 10000
SUB_BIT_KIND, OBJ_BIT_KIND = 0, 1
all_boxes_with_idx = [(i, SUB_BIT_KIND, sub["bbox"][0], sub["bbox"][1]) for i, sub in enumerate(subjects)] + [
(i + OBJ_IDX_OFFSET, OBJ_BIT_KIND, obj["bbox"][0], obj["bbox"][1]) for i, obj in enumerate(objects)
]
seen_idx = set()
seen_sub_idx = set()
while N > len(seen_sub_idx):
candidates = []
for idx, kind, x0, y0 in all_boxes_with_idx:
if idx in seen_idx:
continue
candidates.append((idx, kind, x0, y0))
if len(candidates) == 0:
break
left_x = min([v[2] for v in candidates])
top_y = min([v[3] for v in candidates])
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y) ** 2)
fst_idx, fst_kind, left_x, top_y = candidates[0]
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y) ** 2)
nxt = None
for i in range(1, len(candidates)):
if candidates[i][1] ^ fst_kind == 1:
nxt = candidates[i]
break
if nxt is None:
break
if fst_kind == SUB_BIT_KIND:
sub_idx, obj_idx = fst_idx, nxt[0] - OBJ_IDX_OFFSET
else:
sub_idx, obj_idx = nxt[0], fst_idx - OBJ_IDX_OFFSET
pair_dis = bbox_distance(subjects[sub_idx]["bbox"], objects[obj_idx]["bbox"])
nearest_dis = float("inf")
for i in range(N):
if i in seen_idx or i == sub_idx:
continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]["bbox"], objects[obj_idx]["bbox"]))
if pair_dis >= 3 * nearest_dis:
seen_idx.add(sub_idx)
continue
seen_idx.add(sub_idx)
seen_idx.add(obj_idx + OBJ_IDX_OFFSET)
seen_sub_idx.add(sub_idx)
ret.append(
{
"sub_bbox": {
"bbox": subjects[sub_idx]["bbox"],
"lines": subjects[sub_idx]["lines"],
"index": subjects[sub_idx]["index"],
},
"obj_bboxes": [
{"bbox": objects[obj_idx]["bbox"], "lines": objects[obj_idx]["lines"], "index": objects[obj_idx]["index"]}
],
"sub_idx": sub_idx,
}
)
for i in range(len(objects)):
j = i + OBJ_IDX_OFFSET
if j in seen_idx:
continue
seen_idx.add(j)
nearest_dis, nearest_sub_idx = float("inf"), -1
for k in range(len(subjects)):
dis = bbox_distance(objects[i]["bbox"], subjects[k]["bbox"])
if dis < nearest_dis:
nearest_dis = dis
nearest_sub_idx = k
for k in range(len(subjects)):
if k != nearest_sub_idx:
continue
if k in seen_sub_idx:
for kk in range(len(ret)):
if ret[kk]["sub_idx"] == k:
ret[kk]["obj_bboxes"].append(
{"bbox": objects[i]["bbox"], "lines": objects[i]["lines"], "index": objects[i]["index"]}
)
break
else:
ret.append(
{
"sub_bbox": {
"bbox": subjects[k]["bbox"],
"lines": subjects[k]["lines"],
"index": subjects[k]["index"],
},
"obj_bboxes": [
{"bbox": objects[i]["bbox"], "lines": objects[i]["lines"], "index": objects[i]["index"]}
],
"sub_idx": k,
}
def __tie_up_category_by_distance_v3(blocks, subject_block_type, object_block_type):
# 定义获取主体和客体对象的函数
def get_subjects():
return reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == subject_block_type,
blocks,
),
)
seen_sub_idx.add(k)
seen_idx.add(k)
for i in range(len(subjects)):
if i in seen_sub_idx:
continue
ret.append(
{
"sub_bbox": {
"bbox": subjects[i]["bbox"],
"lines": subjects[i]["lines"],
"index": subjects[i]["index"],
},
"obj_bboxes": [],
"sub_idx": i,
}
)
)
return ret
def get_objects():
return reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == object_block_type,
blocks,
),
)
)
)
# 调用通用方法
return tie_up_category_by_distance_v3(
get_subjects,
get_objects
)
def get_type_blocks(blocks, block_type: Literal["image", "table"]):

View File

@@ -104,6 +104,10 @@ class UnimernetModel(object):
# Create dataset with sorted images
dataset = MathDataset(sorted_images, transform=self.model.transform)
# 如果batch_size > len(sorted_images)则设置为不超过len(sorted_images)的2的幂
batch_size = min(batch_size, max(1, 2 ** (len(sorted_images).bit_length() - 1))) if sorted_images else 1
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0)
# Process batches and store results
@@ -115,7 +119,7 @@ class UnimernetModel(object):
mf_img = mf_img.to(dtype=self.model.dtype)
mf_img = mf_img.to(self.device)
with torch.no_grad():
output = self.model.generate({"image": mf_img})
output = self.model.generate({"image": mf_img}, batch_size=batch_size)
mfr_res.extend(output["fixed_str"])
# 更新进度条每次增加batch_size但要注意最后一个batch可能不足batch_size

View File

@@ -468,7 +468,7 @@ class UnimernetModel(VisionEncoderDecoderModel):
).loss
return {"loss": loss}
def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95):
def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95, batch_size=64):
pixel_values = samples["image"]
num_channels = pixel_values.shape[1]
if num_channels == 1:
@@ -478,7 +478,13 @@ class UnimernetModel(VisionEncoderDecoderModel):
if do_sample:
kwargs["temperature"] = temperature
kwargs["top_p"] = top_p
if self.tokenizer.tokenizer.model_max_length > 1152:
if batch_size <= 32:
self.tokenizer.tokenizer.model_max_length = 1152 # 6g
else:
self.tokenizer.tokenizer.model_max_length = 1344 # 8g
outputs = super().generate(
pixel_values=pixel_values,
max_new_tokens=self.tokenizer.tokenizer.model_max_length, # required

View File

@@ -88,7 +88,7 @@ class PytorchPaddleOCR(TextSystem):
kwargs['det_model_path'] = det_model_path
kwargs['rec_model_path'] = rec_model_path
kwargs['rec_char_dict_path'] = os.path.join(root_dir, 'pytorchocr', 'utils', 'resources', 'dict', dict_file)
# kwargs['rec_batch_num'] = 8
kwargs['rec_batch_num'] = 16
kwargs['device'] = device

View File

@@ -0,0 +1,168 @@
"""
包含两个MagicModel类中重复使用的方法和逻辑
"""
from typing import List, Dict, Any, Callable
from mineru.utils.boxbase import bbox_distance, is_in
def reduct_overlap(bboxes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
去除重叠的bbox保留不被其他bbox包含的bbox
Args:
bboxes: 包含bbox信息的字典列表
Returns:
去重后的bbox列表
"""
N = len(bboxes)
keep = [True] * N
for i in range(N):
for j in range(N):
if i == j:
continue
if is_in(bboxes[i]['bbox'], bboxes[j]['bbox']):
keep[i] = False
return [bboxes[i] for i in range(N) if keep[i]]
def tie_up_category_by_distance_v3(
get_subjects_func: Callable,
get_objects_func: Callable,
extract_subject_func: Callable = None,
extract_object_func: Callable = None
):
"""
通用的类别关联方法,用于将主体对象与客体对象进行关联
参数:
get_subjects_func: 函数,提取主体对象
get_objects_func: 函数,提取客体对象
extract_subject_func: 函数自定义提取主体属性默认使用bbox和其他属性
extract_object_func: 函数自定义提取客体属性默认使用bbox和其他属性
返回:
关联后的对象列表
"""
subjects = get_subjects_func()
objects = get_objects_func()
# 如果没有提供自定义提取函数,使用默认函数
if extract_subject_func is None:
extract_subject_func = lambda x: x
if extract_object_func is None:
extract_object_func = lambda x: x
ret = []
N, M = len(subjects), len(objects)
subjects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
objects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
OBJ_IDX_OFFSET = 10000
SUB_BIT_KIND, OBJ_BIT_KIND = 0, 1
all_boxes_with_idx = [(i, SUB_BIT_KIND, sub["bbox"][0], sub["bbox"][1]) for i, sub in enumerate(subjects)] + [
(i + OBJ_IDX_OFFSET, OBJ_BIT_KIND, obj["bbox"][0], obj["bbox"][1]) for i, obj in enumerate(objects)
]
seen_idx = set()
seen_sub_idx = set()
while N > len(seen_sub_idx):
candidates = []
for idx, kind, x0, y0 in all_boxes_with_idx:
if idx in seen_idx:
continue
candidates.append((idx, kind, x0, y0))
if len(candidates) == 0:
break
left_x = min([v[2] for v in candidates])
top_y = min([v[3] for v in candidates])
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y) ** 2)
fst_idx, fst_kind, left_x, top_y = candidates[0]
fst_bbox = subjects[fst_idx]['bbox'] if fst_kind == SUB_BIT_KIND else objects[fst_idx - OBJ_IDX_OFFSET]['bbox']
candidates.sort(
key=lambda x: bbox_distance(fst_bbox, subjects[x[0]]['bbox']) if x[1] == SUB_BIT_KIND else bbox_distance(
fst_bbox, objects[x[0] - OBJ_IDX_OFFSET]['bbox']))
nxt = None
for i in range(1, len(candidates)):
if candidates[i][1] ^ fst_kind == 1:
nxt = candidates[i]
break
if nxt is None:
break
if fst_kind == SUB_BIT_KIND:
sub_idx, obj_idx = fst_idx, nxt[0] - OBJ_IDX_OFFSET
else:
sub_idx, obj_idx = nxt[0], fst_idx - OBJ_IDX_OFFSET
pair_dis = bbox_distance(subjects[sub_idx]["bbox"], objects[obj_idx]["bbox"])
nearest_dis = float("inf")
for i in range(N):
# 取消原先算法中 1对1 匹配的偏置
# if i in seen_idx or i == sub_idx:continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]["bbox"], objects[obj_idx]["bbox"]))
if pair_dis >= 3 * nearest_dis:
seen_idx.add(sub_idx)
continue
seen_idx.add(sub_idx)
seen_idx.add(obj_idx + OBJ_IDX_OFFSET)
seen_sub_idx.add(sub_idx)
ret.append(
{
"sub_bbox": extract_subject_func(subjects[sub_idx]),
"obj_bboxes": [extract_object_func(objects[obj_idx])],
"sub_idx": sub_idx,
}
)
for i in range(len(objects)):
j = i + OBJ_IDX_OFFSET
if j in seen_idx:
continue
seen_idx.add(j)
nearest_dis, nearest_sub_idx = float("inf"), -1
for k in range(len(subjects)):
dis = bbox_distance(objects[i]["bbox"], subjects[k]["bbox"])
if dis < nearest_dis:
nearest_dis = dis
nearest_sub_idx = k
for k in range(len(subjects)):
if k != nearest_sub_idx:
continue
if k in seen_sub_idx:
for kk in range(len(ret)):
if ret[kk]["sub_idx"] == k:
ret[kk]["obj_bboxes"].append(extract_object_func(objects[i]))
break
else:
ret.append(
{
"sub_bbox": extract_subject_func(subjects[k]),
"obj_bboxes": [extract_object_func(objects[i])],
"sub_idx": k,
}
)
seen_sub_idx.add(k)
seen_idx.add(k)
for i in range(len(subjects)):
if i in seen_sub_idx:
continue
ret.append(
{
"sub_bbox": extract_subject_func(subjects[i]),
"obj_bboxes": [],
"sub_idx": i,
}
)
return ret

View File

@@ -1 +1 @@
__version__ = "2.1.0"
__version__ = "2.1.3"

View File

@@ -109,8 +109,10 @@ pipeline_old_linux = [
]
[project.urls]
Home = "https://mineru.net/"
Repository = "https://github.com/opendatalab/MinerU"
homepage = "https://mineru.net/"
documentation = "https://opendatalab.github.io/MinerU/"
repository = "https://github.com/opendatalab/MinerU"
issues = "https://github.com/opendatalab/MinerU/issues"
[project.scripts]
mineru = "mineru.cli:client.main"

View File

@@ -391,6 +391,14 @@
"created_at": "2025-07-16T08:53:24Z",
"repoId": 765083837,
"pullRequestNo": 3070
},
{
"name": "huazZeng",
"id": 125243371,
"comment_id": 3100630363,
"created_at": "2025-07-22T03:04:40Z",
"repoId": 765083837,
"pullRequestNo": 3129
}
]
}