Compare commits

...

48 Commits

Author SHA1 Message Date
Xiaomeng Zhao
68e8a00d8b Merge pull request #2705 from opendatalab/dev
update docs
2025-06-17 21:59:00 +08:00
Xiaomeng Zhao
c0cf62e4cc Merge pull request #2704 from myhloli/dev
fix: update version to 2.0.5 and clarify changelog entries in README files
2025-06-17 21:57:57 +08:00
myhloli
21ff17a65d fix: update version to 2.0.5 and clarify changelog entries in README files 2025-06-17 21:57:00 +08:00
Xiaomeng Zhao
cdf6e0cfd0 Merge pull request #2703 from myhloli/dev
Dev
2025-06-17 21:46:27 +08:00
myhloli
ec3adde809 fix: remove unused import of ModelPath in vlm_analyze.py 2025-06-17 21:36:06 +08:00
myhloli
d58b24b5dd fix: add conditional imports for torch and torch_npu in model_utils.py 2025-06-17 20:59:41 +08:00
myhloli
bd5252d946 fix: add conditional import for torch and torch_npu in config_reader.py 2025-06-17 20:58:46 +08:00
myhloli
b398a2d2b8 fix: update NPU compile mode handling in pipeline_analyze.py 2025-06-17 20:56:47 +08:00
myhloli
bfaf07c69f fix: refactor device mode and virtual VRAM size handling in client.py and common.py 2025-06-17 20:16:49 +08:00
Xiaomeng Zhao
c8904da6d3 Merge pull request #2698 from hsia/escape-html
Fix: 表格内容中的HTML Entity会导致表格内容错乱 #2694
2025-06-17 19:07:11 +08:00
Li Xia
3854bd0fa0 Fix: 表格内容中的HTML Entity会导致表格内容错乱 [#2694] 2025-06-17 18:00:45 +08:00
Xiaomeng Zhao
38dfe835e4 Merge pull request #2691 from zjx20/lazily-import
chore: speed up "mineru --help"
2025-06-17 16:15:38 +08:00
zjx20
5b26a38726 chore: speed up "mineru --help" 2025-06-17 07:37:44 +00:00
Xiaomeng Zhao
80b5e4fe8a Merge pull request #2688 from opendatalab/master
master->dev
2025-06-17 14:48:07 +08:00
myhloli
45a282fa4e Update version.py with new version 2025-06-17 06:19:58 +00:00
Xiaomeng Zhao
e9175b1937 Merge pull request #2686 from opendatalab/release-2.0.4
Release 2.0.4
2025-06-17 14:17:52 +08:00
Xiaomeng Zhao
13c23c475d Merge pull request #2681 from opendatalab/dev
Dev
2025-06-17 12:06:33 +08:00
Xiaomeng Zhao
039cf27fd5 Merge pull request #2680 from myhloli/dev
fix: include model_path in key for backend model retrieval in vlm_analyze.py
2025-06-17 12:05:13 +08:00
myhloli
72c946a9ec fix: include model_path in key for backend model retrieval in vlm_analyze.py 2025-06-17 12:03:07 +08:00
Xiaomeng Zhao
1bec25b1e1 Merge pull request #2679 from myhloli/dev
fix: support NPU device in UnimernetModel initialization
2025-06-17 11:26:08 +08:00
myhloli
2785f60424 fix: support NPU device in UnimernetModel initialization 2025-06-17 11:24:43 +08:00
myhloli
3cdcd76c34 docs: update changelog for version 2.0.4 and add installation instructions for mineru client 2025-06-17 11:21:46 +08:00
Xiaomeng Zhao
252255166f Merge pull request #2676 from hotelll/patch-1
correct the chat template of sglang_server
2025-06-17 11:16:58 +08:00
github-actions[bot]
8dae3ff1ad @hotelll has signed the CLA in opendatalab/MinerU#2676 2025-06-17 03:10:08 +00:00
He Tianyao
8deb3b5253 correct server chat template
MinerU2.0's chat template during training has additional "\n" compared with chatml's. This difference may slightly affect the performance under server mode. (because sglang_server use sglang's chat template).
2025-06-17 10:56:54 +08:00
Xiaomeng Zhao
f34644eb61 Merge pull request #2675 from myhloli/dev
add new enum values and improve MIN_BATCH_INFERENCE_SIZE documentation in pipeline_analyze.py
2025-06-17 02:04:04 +08:00
myhloli
5b88eba7a1 Merge remote-tracking branch 'origin/dev' into dev 2025-06-17 02:01:35 +08:00
myhloli
58b8e8a912 fix: add new enum values and improve MIN_BATCH_INFERENCE_SIZE documentation in pipeline_analyze.py 2025-06-17 02:01:24 +08:00
Xiaomeng Zhao
7960d8b799 Merge pull request #2668 from myhloli/dev
fix: include server_url in model key for backend model retrieval in vlm_analyze.py
2025-06-16 16:59:17 +08:00
myhloli
20dcbd2164 fix: include server_url in model key for backend model retrieval in vlm_analyze.py 2025-06-16 16:58:09 +08:00
Xiaomeng Zhao
e4e58fa2de Merge pull request #2667 from myhloli/dev
fix: refine model path condition for transformers and sglang-engine backends in vlm_analyze.py
2025-06-16 16:52:13 +08:00
myhloli
013ddc02b2 fix: refine model path condition for transformers and sglang-engine backends in vlm_analyze.py 2025-06-16 16:50:52 +08:00
Xiaomeng Zhao
390ddd8b96 Merge pull request #2660 from opendatalab/dev
fix: correct syntax error in demo.py for VLM client backend
2025-06-15 18:21:24 +08:00
Xiaomeng Zhao
a1e377be05 Merge pull request #2659 from myhloli/dev
fix: correct syntax error in demo.py for VLM client backend
2025-06-15 18:20:05 +08:00
myhloli
b7d7a1bf99 fix: correct syntax error in demo.py for VLM client backend 2025-06-15 18:19:19 +08:00
Xiaomeng Zhao
d71c7f3a7d Merge pull request #2656 from opendatalab/master
master->dev
2025-06-15 11:32:07 +08:00
Xiaomeng Zhao
2abcc43493 Merge pull request #2655 from opendatalab/dev
update doc
2025-06-15 11:29:37 +08:00
Xiaomeng Zhao
f07ac1c8a2 Merge pull request #2654 from myhloli/dev
update doc
2025-06-15 11:29:04 +08:00
myhloli
af6a2166fe fix: improve clarity in changelog entries for feature toggle issues and local deployment 2025-06-15 11:27:42 +08:00
myhloli
0f7d960885 Update version.py with new version 2025-06-15 03:12:31 +00:00
Xiaomeng Zhao
9f0008acff Merge pull request #2653 from opendatalab/release-2.0.3
Release 2.0.3
2025-06-15 11:10:10 +08:00
Xiaomeng Zhao
ccd2a71fbb Merge pull request #2652 from myhloli/dev
fix: update sglang version to 0.4.7 and adjust changelog for compatibility issues
2025-06-15 11:07:17 +08:00
myhloli
e1181ba814 fix: update sglang version to 0.4.7 and adjust changelog for compatibility issues 2025-06-15 11:03:07 +08:00
myhloli
6717712e91 Merge remote-tracking branch 'origin/dev' into dev 2025-06-15 10:57:52 +08:00
myhloli
a49c605f12 fix: update request handling to pass state from send_one_request to wait_one_response 2025-06-15 10:57:30 +08:00
Xiaomeng Zhao
740c5f6f5c Merge pull request #2650 from opendatalab/master
master->dev
2025-06-15 03:28:45 +08:00
myhloli
8230995a50 Update version.py with new version 2025-06-14 19:12:08 +00:00
Xiaomeng Zhao
8cf86e3818 Merge pull request #2649 from opendatalab/release-2.0.2
Release 2.0.2
2025-06-15 03:10:30 +08:00
17 changed files with 129 additions and 53 deletions

View File

@@ -51,9 +51,14 @@ Easier to use: Just grab MinerU Desktop. No coding, no login, just a simple inte
</div>
# Changelog
- 2025/06/15 2.0.2 released
- 2025/06/17 2.0.5 Released
- Fixed the issue where models were still required to be downloaded in the `sglang-client` mode
- Fixed the issue where the `sglang-client` mode unnecessarily depended on packages like `torch` during runtime.
- Fixed the issue where only the first instance would take effect when attempting to launch multiple `sglang-client` instances via multiple URLs within the same process
- 2025/06/15 2.0.3 released
- Fixed a configuration file key-value update error that occurred when downloading model type was set to `all`
- Fixed an issue where formula and table feature toggle parameters in the command line interface could not be effectively disabled
- Fixed the issue where the formula and table feature toggle switches were not working in `command line mode`, causing the features to remain enabled.
- Fixed compatibility issues with sglang version 0.4.7 in the `sglang-engine` mode.
- Updated Dockerfile and installation documentation for deploying the full version of MinerU in sglang environment
- 2025/06/13 2.0.0 Released
- MinerU 2.0 represents a comprehensive reconstruction and upgrade from architecture to functionality, delivering a more streamlined design, enhanced performance, and more flexible user experience.
@@ -366,7 +371,7 @@ Easier to use: Just grab MinerU Desktop. No coding, no login, just a simple inte
<li><a href="#quick-start">Quick Start</a>
<ul>
<li><a href="#online-demo">Online Demo</a></li>
<li><a href="#quick-cpu-demo">Local Deployment</a></li>
<li><a href="#local-deployment">Local Deployment</a></li>
</ul>
</li>
</ul>
@@ -532,6 +537,14 @@ If you need to use **sglang to accelerate VLM model inference**, you can choose
> [!TIP]
> The Dockerfile uses `lmsysorg/sglang:v0.4.7-cu124` as the default base image. If necessary, you can modify it to another platform version.
#### 1.4 Install client (for connecting to sglang-server on edge devices that require only CPU and network connectivity)
```bash
uv pip install -U mineru
mineru -p <input_path> -o <output_path> -b vlm-sglang-client -u http://<host_ip>:<port>
```
---
### 2. Using MinerU

View File

@@ -50,9 +50,14 @@
</div>
# 更新记录
- 2025/06/15 2.0.2发布
- 2025/06/17 2.0.5发布
- 修复了`sglang-client`模式下依然需要下载模型的问题
- 修复了`sglang-client`模式需要依赖`torch`等实际运行不需要的包的问题
- 修复了同一进程内尝试通过多个url启动多个`sglang-client`实例时,只有第一个生效的问题
- 2025/06/15 2.0.3发布
- 修复了当下载模型类型设置为`all`时,配置文件出现键值更新错误的问题
- 修复了命令行接口中公式和表格功能开关参数实际无法关闭的问题
- 修复了命令行模式下公式和表格功能开关不生效导致功能无法关闭的问题
- 修复了`sglang-engine`模式下0.4.7版本sglang的兼容性问题
- 更新了sglang环境下部署完整版MinerU的Dockerfile和相关安装文档
- 2025/06/13 2.0.0发布
- MinerU 2.0 是一次从架构到功能的全面重构与升级,带来了更简洁的设计、更强的性能以及更灵活的使用体验。
@@ -521,6 +526,13 @@ uv pip install -e .[core] -i https://mirrors.aliyun.com/pypi/simple
> [!TIP]
> Dockerfile默认使用`lmsysorg/sglang:v0.4.7-cu124`作为基础镜像,如有需要,您可以自行修改为其他平台版本。
#### 1.4 安装client用于在仅需 CPU 和网络连接的边缘设备上连接 sglang-server
```bash
uv pip install -U mineru -i https://mirrors.aliyun.com/pypi/simple
mineru -p <input_path> -o <output_path> -b vlm-sglang-client -u http://<host_ip>:<port>
```
---

View File

@@ -240,4 +240,4 @@ if __name__ == '__main__':
"""To enable VLM mode, change the backend to 'vlm-xxx'"""
# parse_doc(doc_path_list, output_dir, backend="vlm-transformers") # more general.
# parse_doc(doc_path_list, output_dir, backend="vlm-sglang-engine") # faster(engine).
# parse_doc(doc_path_list, output_dir, backend="vlm-sglang-client", server_url="http://127.0.0.1:30000" # faster(client).
# parse_doc(doc_path_list, output_dir, backend="vlm-sglang-client", server_url="http://127.0.0.1:30000") # faster(client).

View File

@@ -2,15 +2,12 @@ import os
import time
from typing import List, Tuple
import PIL.Image
import torch
from loguru import logger
from .model_init import MineruPipelineModel
from mineru.utils.config_reader import get_device
from ...utils.pdf_classify import classify
from ...utils.pdf_image_tools import load_images_from_pdf
from loguru import logger
from ...utils.model_utils import get_vram, clean_memory
@@ -76,7 +73,11 @@ def doc_analyze(
formula_enable=True,
table_enable=True,
):
MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
"""
适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能可能会增加显存使用量
可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置默认值为100。
"""
min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
# 收集所有页面信息
all_pages_info = [] # 存储(dataset_index, page_index, img, ocr, lang, width, height)
@@ -109,7 +110,7 @@ def doc_analyze(
# 准备批处理
images_with_extra_info = [(info[2], info[3], info[4]) for info in all_pages_info]
batch_size = MIN_BATCH_INFERENCE_SIZE
batch_size = min_batch_inference_size
batch_images = [
images_with_extra_info[i:i + batch_size]
for i in range(0, len(images_with_extra_info), batch_size)
@@ -162,7 +163,7 @@ def batch_image_analyze(
try:
import torch_npu
if torch_npu.npu.is_available():
torch.npu.set_compile_mode(jit_compile=False)
torch_npu.npu.set_compile_mode(jit_compile=False)
except Exception as e:
raise RuntimeError(
"NPU is selected as device, but torch_npu is not available. "

View File

@@ -8,7 +8,6 @@ from mineru.utils.pdf_image_tools import load_images_from_pdf
from .base_predictor import BasePredictor
from .predictor import get_predictor
from .token_to_middle_json import result_to_middle_json
from ...utils.enum_class import ModelPath
from ...utils.models_download_utils import auto_download_and_get_model_root_path
@@ -27,9 +26,9 @@ class ModelSingleton:
model_path: str | None,
server_url: str | None,
) -> BasePredictor:
key = (backend,)
key = (backend, model_path, server_url)
if key not in self._models:
if not model_path:
if backend in ['transformers', 'sglang-engine'] and not model_path:
model_path = auto_download_and_get_model_root_path("/","vlm")
self._models[key] = get_predictor(
backend=backend,

View File

@@ -9,7 +9,6 @@ from mineru.utils.model_utils import get_vram
from ..version import __version__
from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
@click.command()
@click.version_option(__version__,
'--version',
@@ -139,25 +138,26 @@ from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
def main(input_path, output_dir, method, backend, lang, server_url, start_page_id, end_page_id, formula_enable, table_enable, device_mode, virtual_vram, model_source):
def get_device_mode() -> str:
if device_mode is not None:
return device_mode
else:
return get_device()
if os.getenv('MINERU_DEVICE_MODE', None) is None:
os.environ['MINERU_DEVICE_MODE'] = get_device_mode()
if not backend.endswith('-client'):
def get_device_mode() -> str:
if device_mode is not None:
return device_mode
else:
return get_device()
if os.getenv('MINERU_DEVICE_MODE', None) is None:
os.environ['MINERU_DEVICE_MODE'] = get_device_mode()
def get_virtual_vram_size() -> int:
if virtual_vram is not None:
return virtual_vram
if get_device_mode().startswith("cuda") or get_device_mode().startswith("npu"):
return round(get_vram(get_device_mode()))
return 1
if os.getenv('MINERU_VIRTUAL_VRAM_SIZE', None) is None:
os.environ['MINERU_VIRTUAL_VRAM_SIZE']= str(get_virtual_vram_size())
def get_virtual_vram_size() -> int:
if virtual_vram is not None:
return virtual_vram
if get_device_mode().startswith("cuda") or get_device_mode().startswith("npu"):
return round(get_vram(get_device_mode()))
return 1
if os.getenv('MINERU_VIRTUAL_VRAM_SIZE', None) is None:
os.environ['MINERU_VIRTUAL_VRAM_SIZE']= str(get_virtual_vram_size())
if os.getenv('MINERU_MODEL_SOURCE', None) is None:
os.environ['MINERU_MODEL_SOURCE'] = model_source
if os.getenv('MINERU_MODEL_SOURCE', None) is None:
os.environ['MINERU_MODEL_SOURCE'] = model_source
os.makedirs(output_dir, exist_ok=True)

View File

@@ -8,15 +8,12 @@ from pathlib import Path
import pypdfium2 as pdfium
from loguru import logger
from mineru.backend.pipeline.pipeline_middle_json_mkcontent import union_make as pipeline_union_make
from mineru.backend.pipeline.model_json_to_middle_json import result_to_middle_json as pipeline_result_to_middle_json
from mineru.backend.vlm.vlm_middle_json_mkcontent import union_make as vlm_union_make
from mineru.backend.vlm.vlm_analyze import doc_analyze as vlm_doc_analyze
from mineru.backend.pipeline.pipeline_analyze import doc_analyze as pipeline_doc_analyze
from mineru.data.data_reader_writer import FileBasedDataWriter
from mineru.utils.draw_bbox import draw_layout_bbox, draw_span_bbox
from mineru.utils.enum_class import MakeMode
from mineru.utils.pdf_image_tools import images_bytes_to_pdf_bytes
from mineru.backend.vlm.vlm_middle_json_mkcontent import union_make as vlm_union_make
from mineru.backend.vlm.vlm_analyze import doc_analyze as vlm_doc_analyze
pdf_suffixes = [".pdf"]
image_suffixes = [".png", ".jpeg", ".jpg"]
@@ -99,6 +96,11 @@ def do_parse(
):
if backend == "pipeline":
from mineru.backend.pipeline.pipeline_middle_json_mkcontent import union_make as pipeline_union_make
from mineru.backend.pipeline.model_json_to_middle_json import result_to_middle_json as pipeline_result_to_middle_json
from mineru.backend.pipeline.pipeline_analyze import doc_analyze as pipeline_doc_analyze
for idx, pdf_bytes in enumerate(pdf_bytes_list):
new_pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, start_page_id, end_page_id)
pdf_bytes_list[idx] = new_pdf_bytes
@@ -163,6 +165,7 @@ def do_parse(
logger.info(f"local output dir is {local_md_dir}")
else:
if backend.startswith("vlm-"):
backend = backend[4:]

View File

@@ -21,7 +21,7 @@ class MathDataset(Dataset):
class UnimernetModel(object):
def __init__(self, weight_dir, _device_="cpu"):
from .unimernet_hf import UnimernetModel
if _device_.startswith("mps"):
if _device_.startswith("mps") or _device_.startswith("npu"):
self.model = UnimernetModel.from_pretrained(weight_dir, attn_implementation="eager")
else:
self.model = UnimernetModel.from_pretrained(weight_dir)

View File

@@ -1,4 +1,5 @@
import os
import html
import cv2
import numpy as np
from loguru import logger
@@ -8,6 +9,11 @@ from mineru.utils.enum_class import ModelPath
from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
def escape_html(input_string):
"""Escape HTML Entities."""
return html.escape(input_string)
class RapidTableModel(object):
def __init__(self, ocr_engine):
slanet_plus_model_path = os.path.join(auto_download_and_get_model_root_path(ModelPath.slanet_plus), ModelPath.slanet_plus)
@@ -63,7 +69,7 @@ class RapidTableModel(object):
# Continue with OCR on potentially rotated image
ocr_result = self.ocr_engine.ocr(bgr_image)[0]
if ocr_result:
ocr_result = [[item[0], item[1][0], item[1][1]] for item in ocr_result if
ocr_result = [[item[0], escape_html(item[1][0]), item[1][1]] for item in ocr_result if
len(item) == 2 and isinstance(item[1], tuple)]
else:
ocr_result = None

View File

@@ -183,8 +183,8 @@ async def _one_request(
created_time: Optional[float],
):
tokenized_obj = await self._tokenize_one_request(obj)
self._send_one_request(obj, tokenized_obj, created_time)
async for out in self._wait_one_response(obj, request):
state = self._send_one_request(obj, tokenized_obj, created_time)
async for out in self._wait_one_response(obj, state, request):
yield out
@@ -256,8 +256,8 @@ async def _generate_request(
is_single = obj.is_single
if is_single:
tokenized_obj = await self._tokenize_one_request(obj)
self._send_one_request(obj, tokenized_obj, created_time)
async for response in self._wait_one_response(obj, request):
state = self._send_one_request(obj, tokenized_obj, created_time)
async for response in self._wait_one_response(obj, state, request):
yield response
else:
async for response in _handle_batch_request(self, obj, request, created_time):

View File

@@ -6,10 +6,26 @@ from sglang.srt.entrypoints.http_server import app, generate_request, launch_ser
from sglang.srt.managers.io_struct import GenerateReqInput
from sglang.srt.server_args import prepare_server_args
from sglang.srt.utils import kill_process_tree
from sglang.srt.conversation import Conversation
from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
from .logit_processor import Mineru2LogitProcessor
# mineru2.0的chat_template与chatml在换行上有微小区别
def custom_get_prompt(self) -> str:
system_prompt = self.system_template.format(system_message=self.system_message)
if self.system_message == "":
ret = ""
else:
ret = system_prompt + self.sep
for role, message in self.messages:
if message:
ret += role + "\n" + message + self.sep
else:
ret += role + "\n"
return ret
_custom_logit_processor_str = Mineru2LogitProcessor().to_str()
# remote the existing /generate route
@@ -45,6 +61,7 @@ def main():
if server_args.chat_template is None:
server_args.chat_template = "chatml"
Conversation.get_prompt = custom_get_prompt
server_args.enable_custom_logit_processor = True

View File

@@ -1,10 +1,15 @@
# Copyright (c) Opendatalab. All rights reserved.
import json
import os
import torch
from loguru import logger
try:
import torch
import torch_npu
except ImportError:
pass
# 定义配置文件名常量
CONFIG_FILE_NAME = os.getenv('MINERU_TOOLS_CONFIG_JSON', 'mineru.json')
@@ -78,7 +83,6 @@ def get_device():
return "mps"
else:
try:
import torch_npu
if torch_npu.npu.is_available():
return "npu"
except Exception as e:

View File

@@ -33,9 +33,11 @@ class CategoryId:
TableCaption = 6
TableFootnote = 7
InterlineEquation_Layout = 8
InterlineEquationNumber_Layout = 9
InlineEquation = 13
InterlineEquation_YOLO = 14
OcrText = 15
LowScoreText = 16
ImageFootnote = 101

View File

@@ -1,5 +1,4 @@
import time
import torch
import gc
from PIL import Image
from loguru import logger
@@ -7,6 +6,12 @@ import numpy as np
from mineru.utils.boxbase import get_minbox_if_overlap_by_ratio
try:
import torch
import torch_npu
except ImportError:
pass
def crop_img(input_res, input_img, crop_paste_x=0, crop_paste_y=0):
@@ -303,7 +308,6 @@ def clean_memory(device='cuda'):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
elif str(device).startswith("npu"):
import torch_npu
if torch_npu.npu.is_available():
torch_npu.npu.empty_cache()
elif str(device).startswith("mps"):
@@ -325,7 +329,6 @@ def get_vram(device):
total_memory = torch.cuda.get_device_properties(device).total_memory / (1024 ** 3) # 将字节转换为 GB
return total_memory
elif str(device).startswith("npu"):
import torch_npu
if torch_npu.npu.is_available():
total_memory = torch_npu.npu.get_device_properties(device).total_memory / (1024 ** 3) # 转为 GB
return total_memory

View File

@@ -1 +1 @@
__version__ = "2.0.1"
__version__ = "2.0.4"

View File

@@ -43,7 +43,7 @@ vlm = [
"pydantic",
]
sglang = [
"sglang[all]>=0.4.7",
"sglang[all]==0.4.7",
]
pipeline = [
"matplotlib>=3.10,<4",

View File

@@ -311,6 +311,22 @@
"created_at": "2025-06-13T14:02:16Z",
"repoId": 765083837,
"pullRequestNo": 2634
},
{
"name": "hotelll",
"id": 45009029,
"comment_id": 2978780331,
"created_at": "2025-06-17T03:09:54Z",
"repoId": 765083837,
"pullRequestNo": 2676
},
{
"name": "hsia",
"id": 654127,
"comment_id": 2979415817,
"created_at": "2025-06-17T17:35:10Z",
"repoId": 765083837,
"pullRequestNo": 2699
}
]
}