Compare commits

..

40 Commits

Author SHA1 Message Date
Xiaomeng Zhao
e96e4a0ce4 Merge pull request #3557 from opendatalab/dev
Dev
2025-09-20 15:30:40 +08:00
Xiaomeng Zhao
c7bde0ab39 Merge pull request #3556 from myhloli/dev
Refactor batch image orientation classification logic for improved cl…
2025-09-20 15:30:08 +08:00
myhloli
8754c24e42 Refactor batch image orientation classification logic for improved clarity and performance 2025-09-20 15:24:28 +08:00
Xiaomeng Zhao
4f8c00cc34 Merge pull request #3555 from opendatalab/dev
Dev
2025-09-20 15:18:19 +08:00
Xiaomeng Zhao
89681f98ad Merge pull request #3554 from myhloli/dev
Fix formatting in changelog sections of README.md and README_zh-CN.md…
2025-09-20 15:14:16 +08:00
myhloli
66d328dbc5 Fix formatting in changelog sections of README.md and README_zh-CN.md for improved readability 2025-09-20 15:13:29 +08:00
Xiaomeng Zhao
f0c1318545 Merge pull request #3553 from myhloli/dev
Fix formatting in changelog sections of README.md and README_zh-CN.md…
2025-09-20 15:11:43 +08:00
myhloli
6e97f3cf70 Fix formatting in changelog sections of README.md and README_zh-CN.md for improved readability 2025-09-20 15:10:25 +08:00
Xiaomeng Zhao
aede62167e Merge pull request #3552 from opendatalab/dev
Dev
2025-09-20 15:08:40 +08:00
Xiaomeng Zhao
5f2740f743 Merge pull request #3551 from myhloli/dev
Fix compute capability comparison in custom_logits_processors.py for …
2025-09-20 15:08:14 +08:00
myhloli
a888d2b625 Fix compute capability comparison in custom_logits_processors.py for correct version handling 2025-09-20 15:06:49 +08:00
Xiaomeng Zhao
4275876331 Merge pull request #3550 from opendatalab/dev
Dev
2025-09-20 15:01:39 +08:00
Xiaomeng Zhao
ec9f7f54ab Merge pull request #3549 from myhloli/dev
Update README.md and README_zh-CN.md to include changelog for v2.5.3 …
2025-09-20 15:00:50 +08:00
myhloli
7861e5e369 Remove redundant newline in README.md for improved formatting 2025-09-20 15:00:12 +08:00
myhloli
159f3a89a3 Update README.md and README_zh-CN.md to include changelog for v2.5.3 release with compatibility fixes and performance adjustments 2025-09-20 14:57:54 +08:00
Xiaomeng Zhao
d9452bbeb9 Merge pull request #3546 from myhloli/dev
Update docker_deployment.md for improved clarity on base image usage …
2025-09-20 14:48:50 +08:00
myhloli
d808a32c0b Update docker_deployment.md for improved clarity on base image usage and GPU support 2025-09-20 13:52:16 +08:00
Xiaomeng Zhao
12ce3bd024 Merge pull request #3544 from myhloli/dev
Dev
2025-09-20 13:26:18 +08:00
myhloli
e3d7aece50 Remove warning log for default VLLM_USE_V1 value in custom_logits_processors.py 2025-09-20 13:25:11 +08:00
Xiaomeng Zhao
7c55a0ea65 Update mineru/backend/vlm/custom_logits_processors.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-09-20 13:22:40 +08:00
myhloli
f1659eb7a7 Refactor logits processor handling in server.py and vlm_analyze.py for improved clarity and consistency 2025-09-20 13:21:05 +08:00
myhloli
c6bffd9382 Restrict vllm version to <0.11 for compatibility 2025-09-20 11:49:06 +08:00
myhloli
857dcb2ef5 Update docker_deployment.md to clarify GPU model support and base image options for vLLM 2025-09-20 11:45:33 +08:00
myhloli
ef69f98cd6 Update Dockerfile to include comments for GPU architecture compatibility based on Compute Capability 2025-09-20 03:15:58 +08:00
myhloli
6d5d1cf26b Refactor image rotation handling in batch_analyze.py and paddle_ori_cls.py for improved compatibility with torch versions 2025-09-20 03:07:47 +08:00
myhloli
7c481796f8 Refactor custom logits processors to include vllm version checks and improve logging 2025-09-20 01:22:06 +08:00
myhloli
7d62b7b7cc Update mineru-vl-utils dependency version to 0.1.8 2025-09-20 00:31:14 +08:00
myhloli
5a0cf9af7f Enhance custom logits processors with improved compute capability checks and environment variable handling 2025-09-20 00:21:43 +08:00
myhloli
f5e0e67545 Add custom logits processors functionality with compute capability check 2025-09-19 19:21:56 +08:00
myhloli
a4cac624df Add compute capability check for custom logits processors in server.py and vlm_analyze.py 2025-09-19 19:00:41 +08:00
Xiaomeng Zhao
e1eb318b9b Merge pull request #3535 from opendatalab/master
master->dev
2025-09-19 16:51:13 +08:00
myhloli
31834b1e68 Update version.py with new version 2025-09-19 08:48:17 +00:00
Xiaomeng Zhao
100ace2e99 Merge pull request #3534 from opendatalab/release-2.5.2
Release 2.5.2
2025-09-19 16:45:57 +08:00
Xiaomeng Zhao
6aac639686 Merge pull request #3533 from myhloli/dev
Update ModelScope link in README_zh-CN.md for MinerU2.5 release
2025-09-19 16:39:40 +08:00
myhloli
82f94a9a84 Update ModelScope link in README_zh-CN.md for MinerU2.5 release 2025-09-19 16:36:42 +08:00
Xiaomeng Zhao
d928334c61 Merge pull request #3532 from myhloli/dev
Fix formatting in vlm_middle_json_mkcontent.py to ensure proper line breaks in list items
2025-09-19 16:34:29 +08:00
myhloli
ebad82bd8c Update version in README to 2.5.2 for MinerU2.5 release 2025-09-19 16:31:30 +08:00
myhloli
b03c5fb449 Fix formatting in vlm_middle_json_mkcontent.py to ensure proper line breaks in list items 2025-09-19 16:30:43 +08:00
myhloli
c343afd20c Update version.py with new version 2025-09-19 03:45:08 +00:00
Xiaomeng Zhao
6586c7c01e Merge pull request #3529 from opendatalab/release-2.5.1
Release 2.5.1
2025-09-19 11:43:51 +08:00
14 changed files with 117 additions and 44 deletions

View File

@@ -44,7 +44,13 @@
# Changelog
- 2025/09/19 2.5.1 Released
- 2025/09/20 2.5.3 Released
- Dependency version range adjustment to enable Turing and earlier architecture GPUs to use vLLM acceleration for MinerU2.5 model inference.
- `pipeline` backend compatibility fixes for torch 2.8.0.
- Reduced default concurrency for vLLM async backend to lower server pressure and avoid connection closure issues caused by high load.
- More compatibility-related details can be found in the [announcement](https://github.com/opendatalab/MinerU/discussions/3548)
- 2025/09/19 2.5.2 Released
We are officially releasing MinerU2.5, currently the most powerful multimodal large model for document parsing.
With only 1.2B parameters, MinerU2.5's accuracy on the OmniDocBench benchmark comprehensively surpasses top-tier multimodal models like Gemini 2.5 Pro, GPT-4o, and Qwen2.5-VL-72B. It also significantly outperforms leading specialized models such as dots.ocr, MonkeyOCR, and PP-StructureV3.

View File

@@ -43,10 +43,15 @@
</div>
# 更新记录
- 2025/09/20 2.5.3 发布
- 依赖版本范围调整使得Turing及更早架构显卡可以使用vLLM加速推理MinerU2.5模型。
- `pipeline`后端对torch 2.8.0的一些兼容性修复。
- 降低vLLM异步后端默认的并发数降低服务端压力以避免高压导致的链接关闭问题。
- 更多兼容性相关内容详见[公告](https://github.com/opendatalab/MinerU/discussions/3547)
- 2025/09/19 2.5.1 发布
- 2025/09/19 2.5.2 发布
我们正式发布 MinerU2.5,当前最强文档解析多模态大模型。仅凭 1.2B 参数MinerU2.5 在 OmniDocBench 文档解析评测中,精度已全面超越 Gemini2.5-Pro、GPT-4o、Qwen2.5-VL-72B等顶级多模态大模型并显著领先于主流文档解析专用模型如 dots.ocr, MonkeyOCR, PP-StructureV3 等)。
模型已发布至[HuggingFace](https://huggingface.co/opendatalab/MinerU2.5-2509-1.2B)和[ModelScope](https://huggingface.co/opendatalab/MinerU2.5-2509-1.2B)平台,欢迎大家下载使用!
模型已发布至[HuggingFace](https://huggingface.co/opendatalab/MinerU2.5-2509-1.2B)和[ModelScope](https://modelscope.cn/models/opendatalab/MinerU2.5-2509-1.2B)平台,欢迎大家下载使用!
- 核心亮点
- 极致能效性能SOTA: 以 1.2B 的轻量化规模实现了超越百亿乃至千亿级模型的SOTA性能重新定义了文档解析的能效比。
- 先进架构,全面领先: 通过 “两阶段推理” (解耦布局分析与内容识别) 与 原生高分辨率架构 的结合,在布局分析、文本识别、公式识别、表格识别及阅读顺序五大方面均达到 SOTA 水平。

View File

@@ -1,9 +1,16 @@
# Use DaoCloud mirrored vllm image for China region
# Use DaoCloud mirrored vllm image for China region for gpu with Ampere architecture and above (Compute Capability>=8.0)
# Compute Capability version query (https://developer.nvidia.com/cuda-gpus)
FROM docker.m.daocloud.io/vllm/vllm-openai:v0.10.1.1
# Use the official vllm image
# FROM vllm/vllm-openai:v0.10.1.1
# Use DaoCloud mirrored vllm image for China region for gpu with Turing architecture and below (Compute Capability<8.0)
# FROM docker.m.daocloud.io/vllm/vllm-openai:v0.10.2
# Use the official vllm image
# FROM vllm/vllm-openai:v0.10.2
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \

View File

@@ -1,6 +1,10 @@
# Use the official vllm image
# Use the official vllm image for gpu with Ampere architecture and above (Compute Capability>=8.0)
# Compute Capability version query (https://developer.nvidia.com/cuda-gpus)
FROM vllm/vllm-openai:v0.10.1.1
# Use the official vllm image for gpu with Turing architecture and below (Compute Capability<8.0)
# FROM vllm/vllm-openai:v0.10.2
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \

View File

@@ -10,7 +10,8 @@ docker build -t mineru-vllm:latest -f Dockerfile .
```
> [!TIP]
> The [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/global/Dockerfile) uses `vllm/vllm-openai:v0.10.1.1` as the base image by default, supporting Turing/Ampere/Ada Lovelace/Hopper/Blackwell platforms.
> The [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/global/Dockerfile) uses `vllm/vllm-openai:v0.10.1.1` as the base image by default. This version of vLLM v1 engine has limited support for GPU models.
> If you cannot use vLLM accelerated inference on Turing and earlier architecture GPUs, you can resolve this issue by changing the base image to `vllm/vllm-openai:v0.10.2`.
## Docker Description

View File

@@ -10,7 +10,8 @@ docker build -t mineru-vllm:latest -f Dockerfile .
```
> [!TIP]
> [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/china/Dockerfile)默认使用`vllm/vllm-openai:v0.10.1.1`作为基础镜像,支持Turing/Ampere/Ada Lovelace/Hopper/Blackwell平台
> [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/china/Dockerfile)默认使用`vllm/vllm-openai:v0.10.1.1`作为基础镜像,
> 该版本的vLLM v1 engine对显卡型号支持有限如您无法在Turing及更早架构的显卡上使用vLLM加速推理可通过更改基础镜像为`vllm/vllm-openai:v0.10.2`来解决该问题。
## Docker说明

View File

@@ -116,9 +116,14 @@ class BatchAnalyze:
atom_model_name=AtomicModel.ImgOrientationCls,
)
try:
img_orientation_cls_model.batch_predict(table_res_list_all_page,
det_batch_size=self.batch_ratio * OCR_DET_BASE_BATCH_SIZE,
batch_size=TABLE_ORI_CLS_BATCH_SIZE)
if self.enable_ocr_det_batch:
img_orientation_cls_model.batch_predict(table_res_list_all_page,
det_batch_size=self.batch_ratio * OCR_DET_BASE_BATCH_SIZE,
batch_size=TABLE_ORI_CLS_BATCH_SIZE)
else:
for table_res in table_res_list_all_page:
rotate_label = img_orientation_cls_model.predict(table_res['table_img'])
img_orientation_cls_model.img_rotate(table_res, rotate_label)
except Exception as e:
logger.warning(
f"Image orientation classification failed: {e}, using original image"

View File

@@ -0,0 +1,41 @@
import os
from loguru import logger
from packaging import version
def enable_custom_logits_processors():
import torch
from vllm import __version__ as vllm_version
if not torch.cuda.is_available():
logger.info("CUDA not available, disabling custom_logits_processors")
return False
major, minor = torch.cuda.get_device_capability()
# 正确计算Compute Capability
compute_capability = f"{major}.{minor}"
# 安全地处理环境变量
vllm_use_v1_str = os.getenv('VLLM_USE_V1', "1")
if vllm_use_v1_str.isdigit():
vllm_use_v1 = int(vllm_use_v1_str)
else:
vllm_use_v1 = 1
if vllm_use_v1 == 0:
logger.info("VLLM_USE_V1 is set to 0, disabling custom_logits_processors")
return False
elif version.parse(vllm_version) < version.parse("0.10.1"):
logger.info(f"vllm version: {vllm_version} < 0.10.1, disable custom_logits_processors")
return False
elif version.parse(compute_capability) < version.parse("8.0"):
if version.parse(vllm_version) >= version.parse("0.10.2"):
logger.info(f"compute_capability: {compute_capability} < 8.0, but vllm version: {vllm_version} >= 0.10.2, enable custom_logits_processors")
return True
else:
logger.info(f"compute_capability: {compute_capability} < 8.0 and vllm version: {vllm_version} < 0.10.2, disable custom_logits_processors")
return False
else:
logger.info(f"compute_capability: {compute_capability} >= 8.0 and vllm version: {vllm_version} >= 0.10.1, enable custom_logits_processors")
return True

View File

@@ -4,6 +4,7 @@ import time
from loguru import logger
from .custom_logits_processors import enable_custom_logits_processors
from .model_output_to_middle_json import result_to_middle_json
from ...data.data_reader_writer import DataWriter
from mineru.utils.pdf_image_tools import load_images_from_pdf
@@ -88,7 +89,6 @@ class ModelSingleton:
elif backend == "vllm-engine":
try:
import vllm
vllm_version = vllm.__version__
from mineru_vl_utils import MinerULogitsProcessor
except ImportError:
raise ImportError("Please install vllm to use the vllm-engine backend.")
@@ -96,7 +96,7 @@ class ModelSingleton:
kwargs["gpu_memory_utilization"] = 0.5
if "model" not in kwargs:
kwargs["model"] = model_path
if version.parse(vllm_version) >= version.parse("0.10.1") and "logits_processors" not in kwargs:
if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
kwargs["logits_processors"] = [MinerULogitsProcessor]
# 使用kwargs为 vllm初始化参数
vllm_llm = vllm.LLM(**kwargs)
@@ -104,7 +104,6 @@ class ModelSingleton:
try:
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.v1.engine.async_llm import AsyncLLM
from vllm import __version__ as vllm_version
from mineru_vl_utils import MinerULogitsProcessor
except ImportError:
raise ImportError("Please install vllm to use the vllm-async-engine backend.")
@@ -112,7 +111,7 @@ class ModelSingleton:
kwargs["gpu_memory_utilization"] = 0.5
if "model" not in kwargs:
kwargs["model"] = model_path
if version.parse(vllm_version) >= version.parse("0.10.1") and "logits_processors" not in kwargs:
if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
kwargs["logits_processors"] = [MinerULogitsProcessor]
# 使用kwargs为 vllm初始化参数
vllm_async_llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**kwargs))

View File

@@ -54,7 +54,7 @@ def mk_blocks_to_markdown(para_blocks, make_mode, formula_enable, table_enable,
elif para_type == BlockType.LIST:
for block in para_block['blocks']:
item_text = merge_para_with_text(block, formula_enable=formula_enable, img_buket_path=img_buket_path)
para_text += f"{item_text}\n"
para_text += f"{item_text} \n"
elif para_type == BlockType.TITLE:
title_level = get_title_level(para_block)
para_text = f'{"#" * title_level} {merge_para_with_text(para_block)}'

View File

@@ -255,25 +255,28 @@ class PaddleOrientationClsModel:
results = self.sess.run(None, {"x": x})
for img_info, res in zip(rotated_imgs, results[0]):
label = self.labels[np.argmax(res)]
if label == "270":
img_info["table_img"] = cv2.rotate(
np.asarray(img_info["table_img"]),
cv2.ROTATE_90_CLOCKWISE,
)
img_info["wired_table_img"] = cv2.rotate(
np.asarray(img_info["wired_table_img"]),
cv2.ROTATE_90_CLOCKWISE,
)
elif label == "90":
img_info["table_img"] = cv2.rotate(
np.asarray(img_info["table_img"]),
cv2.ROTATE_90_COUNTERCLOCKWISE,
)
img_info["wired_table_img"] = cv2.rotate(
np.asarray(img_info["wired_table_img"]),
cv2.ROTATE_90_COUNTERCLOCKWISE,
)
else:
# 180度和0度不做处理
pass
self.img_rotate(img_info, label)
pbar.update(1)
def img_rotate(self, img_info, label):
if label == "270":
img_info["table_img"] = cv2.rotate(
np.asarray(img_info["table_img"]),
cv2.ROTATE_90_CLOCKWISE,
)
img_info["wired_table_img"] = cv2.rotate(
np.asarray(img_info["wired_table_img"]),
cv2.ROTATE_90_CLOCKWISE,
)
elif label == "90":
img_info["table_img"] = cv2.rotate(
np.asarray(img_info["table_img"]),
cv2.ROTATE_90_COUNTERCLOCKWISE,
)
img_info["wired_table_img"] = cv2.rotate(
np.asarray(img_info["wired_table_img"]),
cv2.ROTATE_90_COUNTERCLOCKWISE,
)
else:
# 180度和0度不做处理
pass

View File

@@ -1,10 +1,9 @@
import sys
from mineru.backend.vlm.custom_logits_processors import enable_custom_logits_processors
from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
from vllm.entrypoints.cli.main import main as vllm_main
from vllm import __version__ as vllm_version
from packaging import version
def main():
@@ -37,6 +36,8 @@ def main():
for index in sorted(model_arg_indices, reverse=True):
args.pop(index)
custom_logits_processors = enable_custom_logits_processors()
# 添加默认参数
if not has_port_arg:
args.extend(["--port", "30000"])
@@ -44,7 +45,7 @@ def main():
args.extend(["--gpu-memory-utilization", "0.5"])
if not model_path:
model_path = auto_download_and_get_model_root_path("/", "vlm")
if not has_logits_processors_arg and version.parse(vllm_version) >= version.parse("0.10.1"):
if (not has_logits_processors_arg) and custom_logits_processors:
args.extend(["--logits-processors", "mineru_vl_utils:MinerULogitsProcessor"])
# 重构参数,将模型路径作为位置参数

View File

@@ -1 +1 @@
__version__ = "2.5.0"
__version__ = "2.5.2"

View File

@@ -39,7 +39,7 @@ dependencies = [
"openai>=1.70.0,<2",
"beautifulsoup4>=4.13.5,<5",
"magika>=0.6.2,<0.7.0",
"mineru-vl-utils>=0.1.7,<1",
"mineru-vl-utils>=0.1.8,<1",
]
[project.optional-dependencies]
@@ -51,12 +51,12 @@ test = [
"fuzzywuzzy"
]
vlm = [
"torch>=2.6.0,<2.8.0",
"torch>=2.6.0,<3",
"transformers>=4.51.1,<5.0.0",
"accelerate>=1.5.1",
]
vllm = [
"vllm==0.10.1.1",
"vllm>=0.10.1.1,<0.11",
]
pipeline = [
"matplotlib>=3.10,<4",
@@ -68,7 +68,7 @@ pipeline = [
"shapely>=2.0.7,<3",
"pyclipper>=1.3.0,<2",
"omegaconf>=2.3.0,<3",
"torch>=2.6.0,<2.8.0",
"torch>=2.6.0,<3",
"torchvision",
"transformers>=4.49.0,!=4.51.0,<5.0.0",
"onnxruntime>1.17.0",