Compare commits

...

44 Commits

Author SHA1 Message Date
Xiaomeng Zhao
1a773b01fb Merge pull request #3182 from myhloli/dev
Dev
2025-07-26 04:46:10 +08:00
myhloli
4ca963c860 Merge remote-tracking branch 'origin/dev' into dev 2025-07-26 04:36:43 +08:00
myhloli
050e1dbc70 chore: update changelog for version 2.1.6 release with fixes for table parsing and visualization box position 2025-07-26 04:36:33 +08:00
Xiaomeng Zhao
9d0ebd8a19 Merge pull request #3181 from myhloli/dev
refactor: enhance HTML table extraction and add block content conversion to HTML
2025-07-26 04:20:56 +08:00
Xiaomeng Zhao
6ed75347a6 Update mineru/utils/format_utils.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-26 04:20:30 +08:00
myhloli
825fc95a8a fix: ensure new pages are created for overlay merging in draw_bbox.py 2025-07-26 04:10:40 +08:00
myhloli
2ae1e3af16 Merge remote-tracking branch 'origin/dev' into dev 2025-07-26 03:49:59 +08:00
Xiaomeng Zhao
64e32bb57f Merge pull request #3175 from jinghuan-Chen/handle-pdf-rotation
Fix draw bbox when pdf rotate.
2025-07-26 03:49:21 +08:00
Xiaomeng Zhao
d0726b73c6 Update draw_bbox.py 2025-07-26 03:35:35 +08:00
Xiaomeng Zhao
8b552460da Update draw_bbox.py 2025-07-26 02:47:56 +08:00
myhloli
28553212c3 refactor: enhance HTML table extraction and add block content conversion to HTML 2025-07-26 02:37:10 +08:00
jinghuan-Chen
9c868484f5 Fix draw bbox when pdf rotate. 2025-07-25 00:12:45 +08:00
Xiaomeng Zhao
d878c7a3aa Merge pull request #3174 from opendatalab/master
master->dev
2025-07-24 21:39:21 +08:00
myhloli
30155bde3f Update version.py with new version 2025-07-24 13:36:50 +00:00
Xiaomeng Zhao
10e76530a5 Merge pull request #3173 from opendatalab/release-2.1.5
Release 2.1.5
2025-07-24 21:35:05 +08:00
Xiaomeng Zhao
bd80ce2ea2 Merge pull request #3172 from myhloli/dev
refactor: update imports and adapt to sglang version changes in processing logic
2025-07-24 21:33:29 +08:00
Xiaomeng Zhao
86690409ab Update mineru/model/vlm_sglang_model/model.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-24 21:33:16 +08:00
myhloli
62d1ef184e refactor: update sglang version checks to use packaging.version for comparison 2025-07-24 21:29:54 +08:00
myhloli
6d9380323b chore: update Dockerfile and documentation to use sglang v0.4.9 2025-07-24 21:14:25 +08:00
myhloli
56f25a4e90 refactor: update imports and adapt to sglang version changes in processing logic 2025-07-24 20:59:25 +08:00
Xiaomeng Zhao
f85f53d805 Merge pull request #3158 from opendatalab/master
master->dev
2025-07-23 15:53:04 +08:00
myhloli
f7ee044bf3 Update version.py with new version 2025-07-23 07:50:20 +00:00
Xiaomeng Zhao
715ccbb08e Merge pull request #3156 from opendatalab/release-2.1.4
Release 2.1.4
2025-07-23 15:49:02 +08:00
Xiaomeng Zhao
57ba7fab01 Merge pull request #3155 from opendatalab/dev
Dev
2025-07-23 15:48:37 +08:00
Xiaomeng Zhao
c39606c188 Merge pull request #3154 from myhloli/dev
chore: update version number to 2.1.4 in README files
2025-07-23 15:48:08 +08:00
myhloli
318bdf0d7c chore: update version number to 2.1.4 in README files 2025-07-23 15:47:16 +08:00
Xiaomeng Zhao
03f8e91889 Merge pull request #3151 from opendatalab/release-2.1.4
Release 2.1.4
2025-07-23 15:46:35 +08:00
Xiaomeng Zhao
3da26d1c6b Merge pull request #3150 from myhloli/dev
fix: remove unused import in pipeline_magic_model.py
2025-07-23 15:33:46 +08:00
myhloli
832f37271e fix: remove unused import in pipeline_magic_model.py 2025-07-23 15:20:35 +08:00
Xiaomeng Zhao
a5583ff4fb fix: improve candidate sorting logic in vlm_magic_model.py
fix: improve candidate sorting logic in vlm_magic_model.py
2025-07-23 15:14:52 +08:00
myhloli
1906643c67 refactor: streamline bbox processing and enhance category tying logic in magic_model_utils.py 2025-07-23 15:03:56 +08:00
myhloli
ee6d557fcc fix: correct comment formatting for batch_size logic in Unimernet.py 2025-07-23 11:21:17 +08:00
myhloli
a636b34324 fix: ensure batch_size is at least 1 when sorted_images is empty in Unimernet.py 2025-07-23 11:20:39 +08:00
Xiaomeng Zhao
9e5cb12967 Update mineru/utils/magic_model_utils.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-23 11:05:15 +08:00
myhloli
9bf8be9861 feat: add utility functions for bounding box processing in magic_model_utils.py 2025-07-23 10:59:14 +08:00
myhloli
4f612cbc1d fix: improve candidate sorting logic in vlm_magic_model.py 2025-07-23 09:58:26 +08:00
Xiaomeng Zhao
beacccb614 Merge pull request #3145 from opendatalab/master
master->dev
2025-07-23 00:30:20 +08:00
myhloli
dcd18c67a8 Update version.py with new version 2025-07-22 16:24:10 +00:00
Xiaomeng Zhao
308f1b0424 Merge pull request #3144 from opendatalab/release-2.1.3
Release 2.1.3
2025-07-23 00:21:28 +08:00
Xiaomeng Zhao
fae9fb9820 Update mineru/model/mfr/unimernet/Unimernet.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-23 00:20:08 +08:00
Xiaomeng Zhao
58268df3ce Merge pull request #3143 from myhloli/dev
docs: update changelog for version 2.1.3 with bug fixes and improvements
2025-07-23 00:12:51 +08:00
myhloli
f1ae0afcd6 docs: update changelog for version 2.1.3 with bug fixes and improvements 2025-07-23 00:11:52 +08:00
myhloli
139fd3ca65 Update version.py with new version 2025-07-22 14:38:29 +00:00
Xiaomeng Zhao
07f6ba7299 Merge pull request #3139 from opendatalab/release-2.1.2
Release 2.1.2
2025-07-22 22:27:57 +08:00
18 changed files with 479 additions and 486 deletions

View File

@@ -43,7 +43,12 @@
</div>
# Changelog
- 2025/07/22 2.1.2 Released
- 2025/07/26 2.1.6 Released
- Fixed table parsing issues in handwritten documents when using `vlm` backend
- Fixed visualization box position drift issue when document is rotated #3175
- 2025/07/24 2.1.5 Released
- `sglang` 0.4.9 version adaptation, synchronously upgrading the dockerfile base image to sglang 0.4.9.post3
- 2025/07/23 2.1.4 Released
- Bug Fixes
- Fixed the issue of excessive memory consumption during the `MFR` step in the `pipeline` backend under certain scenarios #2771
- Fixed the inaccurate matching between `image`/`table` and `caption`/`footnote` under certain conditions #3129

View File

@@ -43,7 +43,12 @@
</div>
# 更新记录
- 2025/07/22 2.1.2发布
- 2025/07/26 2.1.6发布
- 修复`vlm`后端解析部分手写文档时的表格异常问题
- 修复文档旋转时可视化框位置漂移问题 #3175
- 2025/07/24 2.1.5发布
- `sglang` 0.4.9 版本适配同步升级dockerfile基础镜像为sglang 0.4.9.post3
- 2025/07/23 2.1.4发布
- bug修复
- 修复`pipeline`后端中`MFR`步骤在某些情况下显存消耗过大的问题 #2771
- 修复某些情况下`image`/`table``caption`/`footnote`匹配不准确的问题 #3129

View File

@@ -1,5 +1,7 @@
# Use the official sglang image
FROM lmsysorg/sglang:v0.4.8.post1-cu126
FROM lmsysorg/sglang:v0.4.9.post3-cu126
# For blackwell GPU, use the following line instead:
# FROM lmsysorg/sglang:v0.4.9.post3-cu128-b200
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \

View File

@@ -1,5 +1,7 @@
# Use the official sglang image
FROM lmsysorg/sglang:v0.4.8.post1-cu126
FROM lmsysorg/sglang:v0.4.9.post3-cu126
# For blackwell GPU, use the following line instead:
# FROM lmsysorg/sglang:v0.4.9.post3-cu128-b200
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \

View File

@@ -10,8 +10,8 @@ docker build -t mineru-sglang:latest -f Dockerfile .
```
> [!TIP]
> The [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/global/Dockerfile) uses `lmsysorg/sglang:v0.4.8.post1-cu126` as the base image by default, supporting Turing/Ampere/Ada Lovelace/Hopper platforms.
> If you are using the newer `Blackwell` platform, please modify the base image to `lmsysorg/sglang:v0.4.8.post1-cu128-b200` before executing the build operation.
> The [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/global/Dockerfile) uses `lmsysorg/sglang:v0.4.9.post3-cu126` as the base image by default, supporting Turing/Ampere/Ada Lovelace/Hopper platforms.
> If you are using the newer `Blackwell` platform, please modify the base image to `lmsysorg/sglang:v0.4.9.post3-cu128-b200` before executing the build operation.
## Docker Description

View File

@@ -10,8 +10,8 @@ docker build -t mineru-sglang:latest -f Dockerfile .
```
> [!TIP]
> [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/china/Dockerfile)默认使用`lmsysorg/sglang:v0.4.8.post1-cu126`作为基础镜像支持Turing/Ampere/Ada Lovelace/Hopper平台
> 如您使用较新的`Blackwell`平台,请将基础镜像修改为`lmsysorg/sglang:v0.4.8.post1-cu128-b200` 再执行build操作。
> [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/china/Dockerfile)默认使用`lmsysorg/sglang:v0.4.9.post3-cu126`作为基础镜像支持Turing/Ampere/Ada Lovelace/Hopper平台
> 如您使用较新的`Blackwell`平台,请将基础镜像修改为`lmsysorg/sglang:v0.4.9.post3-cu128-b200` 再执行build操作。
## Docker说明

View File

@@ -256,9 +256,12 @@ class BatchAnalyze:
html_code, table_cell_bboxes, logic_points, elapse = table_model.predict(table_res_dict['table_img'])
# 判断是否返回正常
if html_code:
expected_ending = html_code.strip().endswith('</html>') or html_code.strip().endswith('</table>')
if expected_ending:
table_res_dict['table_res']['html'] = html_code
# 检查html_code是否包含'<table>'和'</table>'
if '<table>' in html_code and '</table>' in html_code:
# 选用<table>到</table>的内容放入table_res_dict['table_res']['html']
start_index = html_code.find('<table>')
end_index = html_code.rfind('</table>') + len('</table>')
table_res_dict['table_res']['html'] = html_code[start_index:end_index]
else:
logger.warning(
'table recognition processing fails, not found expected HTML table end'

View File

@@ -1,5 +1,6 @@
from mineru.utils.boxbase import bbox_relative_pos, calculate_iou, bbox_distance, is_in, get_minbox_if_overlap_by_ratio
from mineru.utils.boxbase import bbox_relative_pos, calculate_iou, bbox_distance, get_minbox_if_overlap_by_ratio
from mineru.utils.enum_class import CategoryId, ContentType
from mineru.utils.magic_model_utils import tie_up_category_by_distance_v3, reduct_overlap
class MagicModel:
@@ -208,170 +209,39 @@ class MagicModel:
return bbox_distance(bbox1, bbox2)
def __reduct_overlap(self, bboxes):
N = len(bboxes)
keep = [True] * N
for i in range(N):
for j in range(N):
if i == j:
continue
if is_in(bboxes[i]['bbox'], bboxes[j]['bbox']):
keep[i] = False
return [bboxes[i] for i in range(N) if keep[i]]
def __tie_up_category_by_distance_v3(
self,
subject_category_id: int,
object_category_id: int,
):
subjects = self.__reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == subject_category_id,
self.__page_model_info['layout_dets'],
),
)
)
)
objects = self.__reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == object_category_id,
self.__page_model_info['layout_dets'],
),
)
)
)
ret = []
N, M = len(subjects), len(objects)
subjects.sort(key=lambda x: x['bbox'][0] ** 2 + x['bbox'][1] ** 2)
objects.sort(key=lambda x: x['bbox'][0] ** 2 + x['bbox'][1] ** 2)
OBJ_IDX_OFFSET = 10000
SUB_BIT_KIND, OBJ_BIT_KIND = 0, 1
all_boxes_with_idx = [(i, SUB_BIT_KIND, sub['bbox'][0], sub['bbox'][1]) for i, sub in enumerate(subjects)] + [(i + OBJ_IDX_OFFSET , OBJ_BIT_KIND, obj['bbox'][0], obj['bbox'][1]) for i, obj in enumerate(objects)]
seen_idx = set()
seen_sub_idx = set()
while N > len(seen_sub_idx):
candidates = []
for idx, kind, x0, y0 in all_boxes_with_idx:
if idx in seen_idx:
continue
candidates.append((idx, kind, x0, y0))
if len(candidates) == 0:
break
left_x = min([v[2] for v in candidates])
top_y = min([v[3] for v in candidates])
candidates.sort(key=lambda x: (x[2]-left_x) ** 2 + (x[3] - top_y) ** 2)
fst_idx, fst_kind, left_x, top_y = candidates[0]
fst_bbox = subjects[fst_idx]['bbox'] if fst_kind == SUB_BIT_KIND else objects[fst_idx - OBJ_IDX_OFFSET]['bbox']
candidates.sort(key=lambda x: bbox_distance(fst_bbox, subjects[x[0]]['bbox']) if x[1] == SUB_BIT_KIND else bbox_distance(fst_bbox, objects[x[0] - OBJ_IDX_OFFSET]['bbox']))
nxt = None
for i in range(1, len(candidates)):
if candidates[i][1] ^ fst_kind == 1:
nxt = candidates[i]
break
if nxt is None:
break
if fst_kind == SUB_BIT_KIND:
sub_idx, obj_idx = fst_idx, nxt[0] - OBJ_IDX_OFFSET
else:
sub_idx, obj_idx = nxt[0], fst_idx - OBJ_IDX_OFFSET
pair_dis = bbox_distance(subjects[sub_idx]['bbox'], objects[obj_idx]['bbox'])
nearest_dis = float('inf')
for i in range(N):
# 取消原先算法中 1对1 匹配的偏置
# if i in seen_idx or i == sub_idx:continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]['bbox'], objects[obj_idx]['bbox']))
if pair_dis >= 3*nearest_dis:
seen_idx.add(sub_idx)
continue
seen_idx.add(sub_idx)
seen_idx.add(obj_idx + OBJ_IDX_OFFSET)
seen_sub_idx.add(sub_idx)
ret.append(
{
'sub_bbox': {
'bbox': subjects[sub_idx]['bbox'],
'score': subjects[sub_idx]['score'],
},
'obj_bboxes': [
{'score': objects[obj_idx]['score'], 'bbox': objects[obj_idx]['bbox']}
],
'sub_idx': sub_idx,
}
)
for i in range(len(objects)):
j = i + OBJ_IDX_OFFSET
if j in seen_idx:
continue
seen_idx.add(j)
nearest_dis, nearest_sub_idx = float('inf'), -1
for k in range(len(subjects)):
dis = bbox_distance(objects[i]['bbox'], subjects[k]['bbox'])
if dis < nearest_dis:
nearest_dis = dis
nearest_sub_idx = k
for k in range(len(subjects)):
if k != nearest_sub_idx: continue
if k in seen_sub_idx:
for kk in range(len(ret)):
if ret[kk]['sub_idx'] == k:
ret[kk]['obj_bboxes'].append({'score': objects[i]['score'], 'bbox': objects[i]['bbox']})
break
else:
ret.append(
{
'sub_bbox': {
'bbox': subjects[k]['bbox'],
'score': subjects[k]['score'],
},
'obj_bboxes': [
{'score': objects[i]['score'], 'bbox': objects[i]['bbox']}
],
'sub_idx': k,
}
def __tie_up_category_by_distance_v3(self, subject_category_id, object_category_id):
# 定义获取主体和客体对象的函数
def get_subjects():
return reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == subject_category_id,
self.__page_model_info['layout_dets'],
),
)
seen_sub_idx.add(k)
seen_idx.add(k)
for i in range(len(subjects)):
if i in seen_sub_idx:
continue
ret.append(
{
'sub_bbox': {
'bbox': subjects[i]['bbox'],
'score': subjects[i]['score'],
},
'obj_bboxes': [],
'sub_idx': i,
}
)
)
def get_objects():
return reduct_overlap(
list(
map(
lambda x: {'bbox': x['bbox'], 'score': x['score']},
filter(
lambda x: x['category_id'] == object_category_id,
self.__page_model_info['layout_dets'],
),
)
)
)
return ret
# 调用通用方法
return tie_up_category_by_distance_v3(
get_subjects,
get_objects
)
def get_imgs(self):
with_captions = self.__tie_up_category_by_distance_v3(

View File

@@ -3,10 +3,10 @@ from typing import Literal
from loguru import logger
from mineru.utils.boxbase import bbox_distance, is_in
from mineru.utils.enum_class import ContentType, BlockType, SplitFlag
from mineru.backend.vlm.vlm_middle_json_mkcontent import merge_para_with_text
from mineru.utils.format_utils import convert_otsl_to_html
from mineru.utils.format_utils import block_content_to_html
from mineru.utils.magic_model_utils import reduct_overlap, tie_up_category_by_distance_v3
class MagicModel:
@@ -40,6 +40,10 @@ class MagicModel:
block_type = block_info[1].strip()
block_content = block_info[2].strip()
# 如果bbox是0,0,999,999且type为text按notes增加表格处理
if x1 == 0 and y1 == 0 and x2 == 999 and y2 == 999 and block_type == "text":
block_content = block_content_to_html(block_content)
# print(f"坐标: {block_bbox}")
# print(f"类型: {block_type}")
# print(f"内容: {block_content}")
@@ -77,16 +81,7 @@ class MagicModel:
"type": span_type,
}
if span_type == ContentType.TABLE:
if "<fcel>" in block_content or "<ecel>" in block_content:
lines = block_content.split("\n\n")
new_lines = []
for line in lines:
if "<fcel>" in line or "<ecel>" in line:
line = convert_otsl_to_html(line)
new_lines.append(line)
span["html"] = "\n\n".join(new_lines)
else:
span["html"] = block_content
span["html"] = block_content_to_html(block_content)
elif span_type in [ContentType.INTERLINE_EQUATION]:
span = {
"bbox": block_bbox,
@@ -251,175 +246,39 @@ def latex_fix(latex):
return latex
def __reduct_overlap(bboxes):
N = len(bboxes)
keep = [True] * N
for i in range(N):
for j in range(N):
if i == j:
continue
if is_in(bboxes[i]["bbox"], bboxes[j]["bbox"]):
keep[i] = False
return [bboxes[i] for i in range(N) if keep[i]]
def __tie_up_category_by_distance_v3(
blocks: list,
subject_block_type: str,
object_block_type: str,
):
subjects = __reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == subject_block_type,
blocks,
),
)
)
)
objects = __reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == object_block_type,
blocks,
),
)
)
)
ret = []
N, M = len(subjects), len(objects)
subjects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
objects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
OBJ_IDX_OFFSET = 10000
SUB_BIT_KIND, OBJ_BIT_KIND = 0, 1
all_boxes_with_idx = [(i, SUB_BIT_KIND, sub["bbox"][0], sub["bbox"][1]) for i, sub in enumerate(subjects)] + [
(i + OBJ_IDX_OFFSET, OBJ_BIT_KIND, obj["bbox"][0], obj["bbox"][1]) for i, obj in enumerate(objects)
]
seen_idx = set()
seen_sub_idx = set()
while N > len(seen_sub_idx):
candidates = []
for idx, kind, x0, y0 in all_boxes_with_idx:
if idx in seen_idx:
continue
candidates.append((idx, kind, x0, y0))
if len(candidates) == 0:
break
left_x = min([v[2] for v in candidates])
top_y = min([v[3] for v in candidates])
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y) ** 2)
fst_idx, fst_kind, left_x, top_y = candidates[0]
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y) ** 2)
nxt = None
for i in range(1, len(candidates)):
if candidates[i][1] ^ fst_kind == 1:
nxt = candidates[i]
break
if nxt is None:
break
if fst_kind == SUB_BIT_KIND:
sub_idx, obj_idx = fst_idx, nxt[0] - OBJ_IDX_OFFSET
else:
sub_idx, obj_idx = nxt[0], fst_idx - OBJ_IDX_OFFSET
pair_dis = bbox_distance(subjects[sub_idx]["bbox"], objects[obj_idx]["bbox"])
nearest_dis = float("inf")
for i in range(N):
if i in seen_idx or i == sub_idx:
continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]["bbox"], objects[obj_idx]["bbox"]))
if pair_dis >= 3 * nearest_dis:
seen_idx.add(sub_idx)
continue
seen_idx.add(sub_idx)
seen_idx.add(obj_idx + OBJ_IDX_OFFSET)
seen_sub_idx.add(sub_idx)
ret.append(
{
"sub_bbox": {
"bbox": subjects[sub_idx]["bbox"],
"lines": subjects[sub_idx]["lines"],
"index": subjects[sub_idx]["index"],
},
"obj_bboxes": [
{"bbox": objects[obj_idx]["bbox"], "lines": objects[obj_idx]["lines"], "index": objects[obj_idx]["index"]}
],
"sub_idx": sub_idx,
}
)
for i in range(len(objects)):
j = i + OBJ_IDX_OFFSET
if j in seen_idx:
continue
seen_idx.add(j)
nearest_dis, nearest_sub_idx = float("inf"), -1
for k in range(len(subjects)):
dis = bbox_distance(objects[i]["bbox"], subjects[k]["bbox"])
if dis < nearest_dis:
nearest_dis = dis
nearest_sub_idx = k
for k in range(len(subjects)):
if k != nearest_sub_idx:
continue
if k in seen_sub_idx:
for kk in range(len(ret)):
if ret[kk]["sub_idx"] == k:
ret[kk]["obj_bboxes"].append(
{"bbox": objects[i]["bbox"], "lines": objects[i]["lines"], "index": objects[i]["index"]}
)
break
else:
ret.append(
{
"sub_bbox": {
"bbox": subjects[k]["bbox"],
"lines": subjects[k]["lines"],
"index": subjects[k]["index"],
},
"obj_bboxes": [
{"bbox": objects[i]["bbox"], "lines": objects[i]["lines"], "index": objects[i]["index"]}
],
"sub_idx": k,
}
def __tie_up_category_by_distance_v3(blocks, subject_block_type, object_block_type):
# 定义获取主体和客体对象的函数
def get_subjects():
return reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == subject_block_type,
blocks,
),
)
seen_sub_idx.add(k)
seen_idx.add(k)
for i in range(len(subjects)):
if i in seen_sub_idx:
continue
ret.append(
{
"sub_bbox": {
"bbox": subjects[i]["bbox"],
"lines": subjects[i]["lines"],
"index": subjects[i]["index"],
},
"obj_bboxes": [],
"sub_idx": i,
}
)
)
return ret
def get_objects():
return reduct_overlap(
list(
map(
lambda x: {"bbox": x["bbox"], "lines": x["lines"], "index": x["index"]},
filter(
lambda x: x["type"] == object_block_type,
blocks,
),
)
)
)
# 调用通用方法
return tie_up_category_by_distance_v3(
get_subjects,
get_objects
)
def get_type_blocks(blocks, block_type: Literal["image", "table"]):

View File

@@ -104,6 +104,10 @@ class UnimernetModel(object):
# Create dataset with sorted images
dataset = MathDataset(sorted_images, transform=self.model.transform)
# 如果batch_size > len(sorted_images)则设置为不超过len(sorted_images)的2的幂
batch_size = min(batch_size, max(1, 2 ** (len(sorted_images).bit_length() - 1))) if sorted_images else 1
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0)
# Process batches and store results

View File

@@ -1,16 +1,9 @@
from sglang.srt.configs.model_config import multimodal_model_archs
from sglang.srt.models.registry import ModelRegistry
try:
# sglang==0.4.5.post3
from sglang.srt.managers.multimodal_processor import (
PROCESSOR_MAPPING as PROCESSOR_MAPPING,
)
except ImportError:
# sglang==0.4.4.post1
from sglang.srt.managers.image_processor import (
IMAGE_PROCESSOR_MAPPING as PROCESSOR_MAPPING,
)
from sglang.srt.managers.multimodal_processor import (
PROCESSOR_MAPPING as PROCESSOR_MAPPING,
)
from .. import vlm_hf_model as _
from .image_processor import Mineru2ImageProcessor

View File

@@ -5,21 +5,22 @@ from typing import List, Optional, Union
import numpy as np
try:
# sglang==0.4.5.post3
from sglang.version import __version__ as sglang_version
from packaging import version
if version.parse(sglang_version) >= version.parse("0.4.9"):
# sglang >= 0.4.9
from sglang.srt.multimodal.processors.base_processor import (
BaseMultimodalProcessor as BaseProcessor,
)
from sglang.srt.multimodal.mm_utils import divide_to_patches, expand2square, select_best_resolution
else:
# 0.4.7 <= sglang < 0.4.9
from sglang.srt.managers.multimodal_processors.base_processor import (
BaseMultimodalProcessor as BaseProcessor,
)
from sglang.srt.mm_utils import divide_to_patches, expand2square, select_best_resolution
get_global_processor = None
except ImportError:
# sglang==0.4.4.post1
from sglang.srt.managers.image_processors.base_image_processor import (
BaseImageProcessor as BaseProcessor,
get_global_processor,
)
from sglang.srt.mm_utils import divide_to_patches, expand2square, select_best_resolution
get_global_processor = None
from sglang.srt.utils import load_image, logger
from sglang.utils import get_exception_traceback
@@ -123,64 +124,6 @@ class Mineru2ImageProcessor(BaseProcessor):
image_processor,
)
# sglang==0.4.4.post1
async def process_images_async(
self,
image_data: List[Union[str, bytes]],
input_text,
request_obj,
*args,
**kwargs,
):
if not image_data:
return None
modalities = request_obj.modalities or ["image"]
aspect_ratio = getattr(self.hf_config, "image_aspect_ratio", "")
grid_pinpoints = (
self.hf_config.image_grid_pinpoints
if hasattr(self.hf_config, "image_grid_pinpoints") and "anyres" in aspect_ratio
else None
)
if isinstance(image_data, str):
image_data = [image_data]
if isinstance(image_data, list) and len(image_data) > 0:
if "multi-images" in modalities or "video" in modalities:
# Multiple images
aspect_ratio = "pad" # LLaVA OneVision Handling: more than one image --> interleaved image mode or video mode. We do not use anyres
pixel_values, image_hashes, image_sizes = [], [], []
res = []
for img_data in image_data:
res.append(self._process_single_image(img_data, aspect_ratio, grid_pinpoints))
res = await asyncio.gather(*res)
for pixel_v, image_h, image_s in res:
pixel_values.append(pixel_v)
image_hashes.append(image_h)
image_sizes.append(image_s)
if isinstance(pixel_values[0], np.ndarray):
pixel_values = np.stack(pixel_values, axis=0)
else:
# A single image
pixel_values, image_hash, image_size = await self._process_single_image(
image_data[0], aspect_ratio, grid_pinpoints
)
image_hashes = [image_hash]
image_sizes = [image_size]
else:
raise ValueError(f"Invalid image data: {image_data}")
return {
"pixel_values": pixel_values,
"image_hashes": image_hashes,
"image_sizes": image_sizes,
"modalities": request_obj.modalities or ["image"],
}
# sglang==0.4.5.post3
async def process_mm_data_async(
self,
image_data: List[Union[str, bytes]],
@@ -191,11 +134,50 @@ class Mineru2ImageProcessor(BaseProcessor):
):
from sglang.srt.managers.schedule_batch import Modality, MultimodalDataItem
result = await self.process_images_async(image_data, input_text, request_obj, *args, **kwargs)
if result is None:
if not image_data:
return None
modalities = request_obj.modalities or ["image"]
aspect_ratio = getattr(self.hf_config, "image_aspect_ratio", None)
grid_pinpoints = (
self.hf_config.image_grid_pinpoints
if hasattr(self.hf_config, "image_grid_pinpoints")
and "anyres" in aspect_ratio
else None
)
if isinstance(image_data, str):
image_data = [image_data]
if isinstance(image_data, list) and len(image_data) > 0:
if "multi-images" in modalities or "video" in modalities:
# Multiple images
aspect_ratio = "pad" # LLaVA OneVision Handling: more than one image --> interleaved image mode or video mode. We do not use anyres
pixel_values, data_hashes, image_sizes = [], [], []
res = []
for img_data in image_data:
res.append(
self._process_single_image(
img_data, aspect_ratio, grid_pinpoints
)
)
res = await asyncio.gather(*res)
for pixel_v, image_h, image_s in res:
pixel_values.append(pixel_v)
data_hashes.append(image_h)
image_sizes.append(image_s)
if isinstance(pixel_values[0], np.ndarray):
pixel_values = np.stack(pixel_values, axis=0)
else:
# A single image
pixel_values, image_hash, image_size = await self._process_single_image(
image_data[0], aspect_ratio, grid_pinpoints
)
image_sizes = [image_size]
else:
raise ValueError(f"Invalid image data: {image_data}")
modality = Modality.IMAGE
if isinstance(request_obj.modalities, list):
if request_obj.modalities[0] == "multi-images":
@@ -203,15 +185,29 @@ class Mineru2ImageProcessor(BaseProcessor):
elif request_obj.modalities[0] == "video":
modality = Modality.VIDEO
return {
"mm_items": [
MultimodalDataItem(
pixel_values=result["pixel_values"],
image_sizes=result["image_sizes"],
modality=modality,
)
],
}
if version.parse(sglang_version) >= version.parse("0.4.9.post3"):
# sglang >= 0.4.9.post3
return {
"mm_items": [
MultimodalDataItem(
feature=pixel_values,
model_specific_data={
"image_sizes": image_sizes,
},
modality=modality,
)
],
}
else:
# 0.4.7 <= sglang <= 0.4.9.post2
return {
"mm_items": [
MultimodalDataItem(
pixel_values=pixel_values,
image_sizes=image_sizes,
modality=modality,
)
],
}
ImageProcessorMapping = {Mineru2QwenForCausalLM: Mineru2ImageProcessor}

View File

@@ -5,9 +5,20 @@ from typing import Iterable, List, Optional, Tuple
import numpy as np
import torch
from sglang.srt.layers.quantization.base_config import QuantizationConfig
from sglang.srt.mm_utils import (
get_anyres_image_grid_shape, # unpad_image, unpad_image_shape
)
from sglang.version import __version__ as sglang_version
from packaging import version
if version.parse(sglang_version) >= version.parse("0.4.9"):
# sglang >= 0.4.9
from sglang.srt.multimodal.mm_utils import (
get_anyres_image_grid_shape,
)
else:
# 0.4.7 <= sglang < 0.4.9
from sglang.srt.mm_utils import (
get_anyres_image_grid_shape,
)
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
from sglang.srt.model_loader.weight_utils import default_weight_loader
from sglang.srt.models.qwen2 import Qwen2ForCausalLM
@@ -111,14 +122,9 @@ class Mineru2QwenForCausalLM(nn.Module):
raise ValueError(f"Unexpected select feature: {self.select_feature}")
def pad_input_ids(self, input_ids: List[int], image_inputs):
if hasattr(image_inputs, "mm_items"): # MultimodalInputs
# sglang==0.4.5.post3
image_sizes = flatten_nested_list([item.image_sizes for item in image_inputs.mm_items])
pad_values = [item.pad_value for item in image_inputs.mm_items]
else: # ImageInputs
# sglang==0.4.4.post1
image_sizes = image_inputs.image_sizes
pad_values = image_inputs.pad_values
image_sizes = flatten_nested_list([item.image_sizes for item in image_inputs.mm_items])
pad_values = [item.pad_value for item in image_inputs.mm_items]
# hardcode for spatial_unpad + anyres
# if image_inputs.modalities is not None and (
@@ -196,14 +202,8 @@ class Mineru2QwenForCausalLM(nn.Module):
positions: torch.Tensor,
forward_batch: ForwardBatch,
) -> torch.Tensor:
if hasattr(forward_batch, "mm_inputs"):
# sglang==0.4.5.post3
image_inputs = forward_batch.mm_inputs
is_sglang_mm_inputs = True
else:
# sglang==0.4.4.post1
image_inputs = forward_batch.image_inputs
is_sglang_mm_inputs = False
image_inputs = forward_batch.mm_inputs
if image_inputs is None:
image_inputs = []
@@ -223,12 +223,7 @@ class Mineru2QwenForCausalLM(nn.Module):
max_image_offset = []
for im in image_inputs:
if im:
if hasattr(im, "mm_items"):
# sglang==0.4.5.post3
modalities_list.extend([downgrade_modality(item.modality) for item in im.mm_items])
elif im.modalities is not None:
# sglang==0.4.4.post1
modalities_list.extend(im.modalities)
modalities_list.extend([downgrade_modality(item.modality) for item in im.mm_items])
if im and im.image_offsets:
max_image_offset.append(np.max(np.array(im.image_offsets) + np.array(im.image_pad_len)))
else:
@@ -240,8 +235,18 @@ class Mineru2QwenForCausalLM(nn.Module):
if need_vision.any():
bs = forward_batch.batch_size
if is_sglang_mm_inputs:
# sglang==0.4.5.post3
if version.parse(sglang_version) >= version.parse("0.4.9.post3"):
# sglang >= 0.4.9.post3
pixel_values = flatten_nested_list(
[[item.feature for item in image_inputs[i].mm_items] for i in range(bs) if need_vision[i]]
) # image_inputs[batch_idx].mm_items[item_idx].pixel_values is Tensor
image_sizes = [
flatten_nested_list([item.model_specific_data["image_sizes"] for item in image_inputs[i].mm_items])
for i in range(bs)
if need_vision[i]
] # image_inputs[batch_idx].mm_items[item_idx].image_sizes should be tuple, but is list of tuple for now.
else:
# 0.4.7 <= sglang <= 0.4.9.post2
pixel_values = flatten_nested_list(
[[item.pixel_values for item in image_inputs[i].mm_items] for i in range(bs) if need_vision[i]]
) # image_inputs[batch_idx].mm_items[item_idx].pixel_values is Tensor
@@ -250,10 +255,6 @@ class Mineru2QwenForCausalLM(nn.Module):
for i in range(bs)
if need_vision[i]
] # image_inputs[batch_idx].mm_items[item_idx].image_sizes should be tuple, but is list of tuple for now.
else:
# sglang==0.4.4.post1
pixel_values = [image_inputs[i].pixel_values for i in range(bs) if need_vision[i]]
image_sizes = [image_inputs[i].image_sizes for i in range(bs) if need_vision[i]]
########## Encode Image ########

View File

@@ -2,21 +2,64 @@ import json
from io import BytesIO
from loguru import logger
from pypdf import PdfReader, PdfWriter
from pypdf import PdfReader, PdfWriter, PageObject
from reportlab.pdfgen import canvas
from .enum_class import BlockType, ContentType
def cal_canvas_rect(page, bbox):
"""
Calculate the rectangle coordinates on the canvas based on the original PDF page and bounding box.
Args:
page: A PyPDF2 Page object representing a single page in the PDF.
bbox: [x0, y0, x1, y1] representing the bounding box coordinates.
Returns:
rect: [x0, y0, width, height] representing the rectangle coordinates on the canvas.
"""
page_width, page_height = float(page.cropbox[2]), float(page.cropbox[3])
actual_width = page_width # The width of the final PDF display
actual_height = page_height # The height of the final PDF display
rotation = page.get("/Rotate", 0)
rotation = rotation % 360
if rotation in [90, 270]:
# PDF is rotated 90 degrees or 270 degrees, and the width and height need to be swapped
actual_width, actual_height = actual_height, actual_width
x0, y0, x1, y1 = bbox
rect_w = abs(x1 - x0)
rect_h = abs(y1 - y0)
if 270 == rotation:
rect_w, rect_h = rect_h, rect_w
x0 = actual_height - y1
y0 = actual_width - x1
elif 180 == rotation:
x0 = page_width - x1
y0 = y0
elif 90 == rotation:
rect_w, rect_h = rect_h, rect_w
x0, y0 = y0, x0
else:
# 0 == rotation:
x0 = x0
y0 = page_height - y1
rect = [x0, y0, rect_w, rect_h]
return rect
def draw_bbox_without_number(i, bbox_list, page, c, rgb_config, fill_config):
new_rgb = [float(color) / 255 for color in rgb_config]
page_data = bbox_list[i]
page_width, page_height = page.cropbox[2], page.cropbox[3]
for bbox in page_data:
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
rect = [bbox[0], page_height - bbox[3], width, height] # Define the rectangle
rect = cal_canvas_rect(page, bbox) # Define the rectangle
if fill_config: # filled rectangle
c.setFillColorRGB(new_rgb[0], new_rgb[1], new_rgb[2], 0.3)
@@ -35,10 +78,8 @@ def draw_bbox_with_number(i, bbox_list, page, c, rgb_config, fill_config, draw_b
for j, bbox in enumerate(page_data):
# 确保bbox的每个元素都是float
x0, y0, x1, y1 = map(float, bbox)
width = x1 - x0
height = y1 - y0
rect = [x0, page_height - y1, width, height]
rect = cal_canvas_rect(page, bbox) # Define the rectangle
if draw_bbox:
if fill_config:
c.setFillColorRGB(*new_rgb, 0.3)
@@ -48,8 +89,23 @@ def draw_bbox_with_number(i, bbox_list, page, c, rgb_config, fill_config, draw_b
c.rect(rect[0], rect[1], rect[2], rect[3], stroke=1, fill=0)
c.setFillColorRGB(*new_rgb, 1.0)
c.setFontSize(size=10)
# 这里也要用float
c.drawString(x1 + 2, page_height - y0 - 10, str(j + 1))
c.saveState()
rotation = page.get("/Rotate", 0)
rotation = rotation % 360
if 0 == rotation:
c.translate(rect[0] + rect[2] + 2, rect[1] + rect[3] - 10)
elif 90 == rotation:
c.translate(rect[0] + 10, rect[1] + rect[3] + 2)
elif 180 == rotation:
c.translate(rect[0] - 2, rect[1] + 10)
elif 270 == rotation:
c.translate(rect[0] + rect[2] - 10, rect[1] - 2)
c.rotate(rotation)
c.drawString(0, 0, str(j + 1))
c.restoreState()
return c
@@ -185,6 +241,9 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
# 添加检查确保overlay_pdf.pages不为空
if len(overlay_pdf.pages) > 0:
new_page = PageObject(pdf=None)
new_page.update(page)
page = new_page
page.merge_page(overlay_pdf.pages[0])
else:
# 记录日志并继续处理下一个页面
@@ -300,6 +359,9 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
# 添加检查确保overlay_pdf.pages不为空
if len(overlay_pdf.pages) > 0:
new_page = PageObject(pdf=None)
new_page.update(page)
page = new_page
page.merge_page(overlay_pdf.pages[0])
else:
# 记录日志并继续处理下一个页面

View File

@@ -317,3 +317,26 @@ def convert_otsl_to_html(otsl_content: str):
)
return export_to_html(table_data)
def block_content_to_html(block_content: str) -> str:
"""
Converts block content containing OTSL (Open Table Structure Language) tags into HTML.
This function processes a block of text, splitting it into lines and converting any lines
containing OTSL table tags (e.g., <fcel>, <ecel>) into HTML tables. Lines without these
tags are left unchanged.
Parameters:
block_content (str): A string containing block content with potential OTSL tags.
Returns:
str: The processed block content with OTSL tags converted to HTML tables.
"""
lines = block_content.split("\n\n")
new_lines = []
for line in lines:
if "<fcel>" in line or "<ecel>" in line:
line = convert_otsl_to_html(line)
new_lines.append(line)
return "\n\n".join(new_lines)

View File

@@ -0,0 +1,168 @@
"""
包含两个MagicModel类中重复使用的方法和逻辑
"""
from typing import List, Dict, Any, Callable
from mineru.utils.boxbase import bbox_distance, is_in
def reduct_overlap(bboxes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
去除重叠的bbox保留不被其他bbox包含的bbox
Args:
bboxes: 包含bbox信息的字典列表
Returns:
去重后的bbox列表
"""
N = len(bboxes)
keep = [True] * N
for i in range(N):
for j in range(N):
if i == j:
continue
if is_in(bboxes[i]['bbox'], bboxes[j]['bbox']):
keep[i] = False
return [bboxes[i] for i in range(N) if keep[i]]
def tie_up_category_by_distance_v3(
get_subjects_func: Callable,
get_objects_func: Callable,
extract_subject_func: Callable = None,
extract_object_func: Callable = None
):
"""
通用的类别关联方法,用于将主体对象与客体对象进行关联
参数:
get_subjects_func: 函数,提取主体对象
get_objects_func: 函数,提取客体对象
extract_subject_func: 函数自定义提取主体属性默认使用bbox和其他属性
extract_object_func: 函数自定义提取客体属性默认使用bbox和其他属性
返回:
关联后的对象列表
"""
subjects = get_subjects_func()
objects = get_objects_func()
# 如果没有提供自定义提取函数,使用默认函数
if extract_subject_func is None:
extract_subject_func = lambda x: x
if extract_object_func is None:
extract_object_func = lambda x: x
ret = []
N, M = len(subjects), len(objects)
subjects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
objects.sort(key=lambda x: x["bbox"][0] ** 2 + x["bbox"][1] ** 2)
OBJ_IDX_OFFSET = 10000
SUB_BIT_KIND, OBJ_BIT_KIND = 0, 1
all_boxes_with_idx = [(i, SUB_BIT_KIND, sub["bbox"][0], sub["bbox"][1]) for i, sub in enumerate(subjects)] + [
(i + OBJ_IDX_OFFSET, OBJ_BIT_KIND, obj["bbox"][0], obj["bbox"][1]) for i, obj in enumerate(objects)
]
seen_idx = set()
seen_sub_idx = set()
while N > len(seen_sub_idx):
candidates = []
for idx, kind, x0, y0 in all_boxes_with_idx:
if idx in seen_idx:
continue
candidates.append((idx, kind, x0, y0))
if len(candidates) == 0:
break
left_x = min([v[2] for v in candidates])
top_y = min([v[3] for v in candidates])
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y) ** 2)
fst_idx, fst_kind, left_x, top_y = candidates[0]
fst_bbox = subjects[fst_idx]['bbox'] if fst_kind == SUB_BIT_KIND else objects[fst_idx - OBJ_IDX_OFFSET]['bbox']
candidates.sort(
key=lambda x: bbox_distance(fst_bbox, subjects[x[0]]['bbox']) if x[1] == SUB_BIT_KIND else bbox_distance(
fst_bbox, objects[x[0] - OBJ_IDX_OFFSET]['bbox']))
nxt = None
for i in range(1, len(candidates)):
if candidates[i][1] ^ fst_kind == 1:
nxt = candidates[i]
break
if nxt is None:
break
if fst_kind == SUB_BIT_KIND:
sub_idx, obj_idx = fst_idx, nxt[0] - OBJ_IDX_OFFSET
else:
sub_idx, obj_idx = nxt[0], fst_idx - OBJ_IDX_OFFSET
pair_dis = bbox_distance(subjects[sub_idx]["bbox"], objects[obj_idx]["bbox"])
nearest_dis = float("inf")
for i in range(N):
# 取消原先算法中 1对1 匹配的偏置
# if i in seen_idx or i == sub_idx:continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]["bbox"], objects[obj_idx]["bbox"]))
if pair_dis >= 3 * nearest_dis:
seen_idx.add(sub_idx)
continue
seen_idx.add(sub_idx)
seen_idx.add(obj_idx + OBJ_IDX_OFFSET)
seen_sub_idx.add(sub_idx)
ret.append(
{
"sub_bbox": extract_subject_func(subjects[sub_idx]),
"obj_bboxes": [extract_object_func(objects[obj_idx])],
"sub_idx": sub_idx,
}
)
for i in range(len(objects)):
j = i + OBJ_IDX_OFFSET
if j in seen_idx:
continue
seen_idx.add(j)
nearest_dis, nearest_sub_idx = float("inf"), -1
for k in range(len(subjects)):
dis = bbox_distance(objects[i]["bbox"], subjects[k]["bbox"])
if dis < nearest_dis:
nearest_dis = dis
nearest_sub_idx = k
for k in range(len(subjects)):
if k != nearest_sub_idx:
continue
if k in seen_sub_idx:
for kk in range(len(ret)):
if ret[kk]["sub_idx"] == k:
ret[kk]["obj_bboxes"].append(extract_object_func(objects[i]))
break
else:
ret.append(
{
"sub_bbox": extract_subject_func(subjects[k]),
"obj_bboxes": [extract_object_func(objects[i])],
"sub_idx": k,
}
)
seen_sub_idx.add(k)
seen_idx.add(k)
for i in range(len(subjects)):
if i in seen_sub_idx:
continue
ret.append(
{
"sub_bbox": extract_subject_func(subjects[i]),
"obj_bboxes": [],
"sub_idx": i,
}
)
return ret

View File

@@ -1 +1 @@
__version__ = "2.1.1"
__version__ = "2.1.5"

View File

@@ -53,7 +53,7 @@ vlm = [
"pydantic",
]
sglang = [
"sglang[all]>=0.4.8,<0.4.9",
"sglang[all]>=0.4.7,<0.4.10",
]
pipeline = [
"matplotlib>=3.10,<4",