Compare commits

...

9 Commits

Author SHA1 Message Date
Xiaomeng Zhao
a56bd6c334 Merge pull request #3831 from opendatalab/dev
Dev
2025-10-24 17:25:03 +08:00
Xiaomeng Zhao
f5400f0c94 Merge pull request #3830 from myhloli/dev
fix: correct spelling of set_default_gpu_memory_utilization and set_default_batch_size functions
2025-10-24 17:24:31 +08:00
myhloli
6a6c650062 fix: correct spelling of set_default_gpu_memory_utilization and set_default_batch_size functions 2025-10-24 17:23:13 +08:00
Xiaomeng Zhao
ae084eb317 Merge pull request #3828 from myhloli/dev
Dev
2025-10-24 17:17:23 +08:00
myhloli
7c77db7135 fix: import enable_custom_logits_processors in server.py 2025-10-24 17:16:07 +08:00
myhloli
7b14a87b9d fix: update version number to 2.6.1 in README and README_zh-CN 2025-10-24 17:13:08 +08:00
myhloli
0d0ebfd7bc fix: improve GPU memory utilization handling and ensure OMP_NUM_THREADS is set only if not defined 2025-10-24 17:11:19 +08:00
Xiaomeng Zhao
91cc2524d5 Merge pull request #3824 from myhloli/dev
fix: update README and Chinese README to include GitHub link for optimization contributor
2025-10-24 16:00:54 +08:00
myhloli
e504e5e012 fix: update README and Chinese README to include GitHub link for optimization contributor 2025-10-24 15:58:23 +08:00
5 changed files with 18 additions and 14 deletions

View File

@@ -44,10 +44,10 @@
</div>
# Changelog
- 2025/10/24 2.6.0 Release
- 2025/10/24 2.6.1 Release
- `pipeline` backend optimizations
- Added experimental support for Chinese formulas, which can be enabled by setting the environment variable `export MINERU_FORMULA_CH_SUPPORT=1`. This feature may cause a slight decrease in MFR speed and failures in recognizing some long formulas. It is recommended to enable it only when parsing Chinese formulas is needed. To disable this feature, set the environment variable to `0`.
- `OCR` speed significantly improved by 200%~300%, thanks to the optimization solution provided by @cjsdurj
- `OCR` speed significantly improved by 200%~300%, thanks to the optimization solution provided by [@cjsdurj](https://github.com/cjsdurj)
- `OCR` models updated to `ppocr-v5` version for Cyrillic, Arabic, Devanagari, Telugu (te), and Tamil (ta) languages, with accuracy improved by over 40% compared to previous models
- `vlm` backend optimizations
- `table_caption` and `table_footnote` matching logic optimized to improve the accuracy of table caption and footnote matching and reading order rationality in scenarios with multiple consecutive tables on a page

View File

@@ -44,10 +44,10 @@
</div>
# 更新记录
- 2025/10/24 2.6.0 发布
- 2025/10/24 2.6.1 发布
- `pipline`后端优化
- 增加对中文公式的实验性支持,可通过配置环境变量`export MINERU_FORMULA_CH_SUPPORT=1`开启。该功能可能会导致MFR速率略微下降、部分长公式识别失败等问题建议仅在需要解析中文公式的场景下开启。如需关闭该功能可将环境变量设置为`0`
- `OCR`速度大幅提升200%~300%,感谢 @cjsdurj 提供的优化方案
- `OCR`速度大幅提升200%~300%,感谢 [@cjsdurj](https://github.com/cjsdurj) 提供的优化方案
- `OCR`模型更新西里尔文(cyrillic)、阿拉伯文(arabic)、天城文(devanagari)、泰卢固语(te)、泰米尔语(ta)语系至`ppocr-v5`版本精度相比上代模型提升40%以上
- `vlm`后端优化
- `table_caption``table_footnote`匹配逻辑优化,提升页内多张连续表场景下的表格标题和脚注的匹配准确率和阅读顺序合理性

View File

@@ -44,7 +44,7 @@ def enable_custom_logits_processors() -> bool:
return True
def set_defult_gpu_memory_utilization() -> float:
def set_default_gpu_memory_utilization() -> float:
from vllm import __version__ as vllm_version
if version.parse(vllm_version) >= version.parse("0.11.0"):
return 0.7
@@ -52,7 +52,7 @@ def set_defult_gpu_memory_utilization() -> float:
return 0.5
def set_defult_batch_size() -> int:
def set_default_batch_size() -> int:
try:
device = get_device()
vram = get_vram(device)

View File

@@ -4,7 +4,7 @@ import time
from loguru import logger
from .utils import enable_custom_logits_processors, set_defult_gpu_memory_utilization, set_defult_batch_size
from .utils import enable_custom_logits_processors, set_default_gpu_memory_utilization, set_default_batch_size
from .model_output_to_middle_json import result_to_middle_json
from ...data.data_reader_writer import DataWriter
from mineru.utils.pdf_image_tools import load_images_from_pdf
@@ -74,9 +74,11 @@ class ModelSingleton:
use_fast=True,
)
if batch_size == 0:
batch_size = set_defult_batch_size()
batch_size = set_default_batch_size()
else:
os.environ["OMP_NUM_THREADS"] = "1"
if os.getenv('OMP_NUM_THREADS') is None:
os.environ["OMP_NUM_THREADS"] = "1"
if backend == "vllm-engine":
try:
import vllm
@@ -84,7 +86,7 @@ class ModelSingleton:
except ImportError:
raise ImportError("Please install vllm to use the vllm-engine backend.")
if "gpu_memory_utilization" not in kwargs:
kwargs["gpu_memory_utilization"] = set_defult_gpu_memory_utilization()
kwargs["gpu_memory_utilization"] = set_default_gpu_memory_utilization()
if "model" not in kwargs:
kwargs["model"] = model_path
if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
@@ -99,7 +101,7 @@ class ModelSingleton:
except ImportError:
raise ImportError("Please install vllm to use the vllm-async-engine backend.")
if "gpu_memory_utilization" not in kwargs:
kwargs["gpu_memory_utilization"] = set_defult_gpu_memory_utilization()
kwargs["gpu_memory_utilization"] = set_default_gpu_memory_utilization()
if "model" not in kwargs:
kwargs["model"] = model_path
if enable_custom_logits_processors() and ("logits_processors" not in kwargs):

View File

@@ -1,7 +1,7 @@
import os
import sys
from mineru.backend.vlm.custom_logits_processors import enable_custom_logits_processors
from mineru.backend.vlm.utils import set_default_gpu_memory_utilization, enable_custom_logits_processors
from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
from vllm.entrypoints.cli.main import main as vllm_main
@@ -43,7 +43,8 @@ def main():
if not has_port_arg:
args.extend(["--port", "30000"])
if not has_gpu_memory_utilization_arg:
args.extend(["--gpu-memory-utilization", "0.7"])
gpu_memory_utilization = str(set_default_gpu_memory_utilization())
args.extend(["--gpu-memory-utilization", gpu_memory_utilization])
if not model_path:
model_path = auto_download_and_get_model_root_path("/", "vlm")
if (not has_logits_processors_arg) and custom_logits_processors:
@@ -52,7 +53,8 @@ def main():
# 重构参数,将模型路径作为位置参数
sys.argv = [sys.argv[0]] + ["serve", model_path] + args
os.environ["OMP_NUM_THREADS"] = "1"
if os.getenv('OMP_NUM_THREADS') is None:
os.environ["OMP_NUM_THREADS"] = "1"
# 启动vllm服务器
print(f"start vllm server: {sys.argv}")