diff --git a/Dockerfile b/Dockerfile index 82cd0a5556..6e53d29117 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,24 +8,22 @@ ARG USE_CN_MIRROR ENV DEBIAN_FRONTEND="noninteractive" -RUN <<'EOF' -set -e -if [ "${USE_CN_MIRROR:-false}" = "true" ]; then - sed -i "s/deb.debian.org/mirrors.ustc.edu.cn/g" "/etc/apt/sources.list.d/debian.sources" -fi -apt update -apt install ca-certificates proxychains-ng -qy -mkdir -p /distroless/bin /distroless/etc /distroless/etc/ssl/certs /distroless/lib -cp /usr/lib/$(arch)-linux-gnu/libproxychains.so.4 /distroless/lib/libproxychains.so.4 -cp /usr/lib/$(arch)-linux-gnu/libdl.so.2 /distroless/lib/libdl.so.2 -cp /usr/bin/proxychains4 /distroless/bin/proxychains -cp /etc/proxychains4.conf /distroless/etc/proxychains4.conf -cp /usr/lib/$(arch)-linux-gnu/libstdc++.so.6 /distroless/lib/libstdc++.so.6 -cp /usr/lib/$(arch)-linux-gnu/libgcc_s.so.1 /distroless/lib/libgcc_s.so.1 -cp /usr/local/bin/node /distroless/bin/node -cp /etc/ssl/certs/ca-certificates.crt /distroless/etc/ssl/certs/ca-certificates.crt -rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/* -EOF +RUN set -e && \ + if [ "${USE_CN_MIRROR:-false}" = "true" ]; then \ + sed -i "s/deb.debian.org/mirrors.ustc.edu.cn/g" "/etc/apt/sources.list.d/debian.sources"; \ + fi && \ + apt update && \ + apt install ca-certificates proxychains-ng -qy && \ + mkdir -p /distroless/bin /distroless/etc /distroless/etc/ssl/certs /distroless/lib && \ + cp /usr/lib/$(arch)-linux-gnu/libproxychains.so.4 /distroless/lib/libproxychains.so.4 && \ + cp /usr/lib/$(arch)-linux-gnu/libdl.so.2 /distroless/lib/libdl.so.2 && \ + cp /usr/bin/proxychains4 /distroless/bin/proxychains && \ + cp /etc/proxychains4.conf /distroless/etc/proxychains4.conf && \ + cp /usr/lib/$(arch)-linux-gnu/libstdc++.so.6 /distroless/lib/libstdc++.so.6 && \ + cp /usr/lib/$(arch)-linux-gnu/libgcc_s.so.1 /distroless/lib/libgcc_s.so.1 && \ + cp /usr/local/bin/node /distroless/bin/node && \ + cp /etc/ssl/certs/ca-certificates.crt /distroless/etc/ssl/certs/ca-certificates.crt && \ + rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/* ## Builder image, install all the dependencies and build the app FROM base AS builder @@ -77,23 +75,21 @@ COPY patches ./patches # bring in desktop workspace manifest so pnpm can resolve it COPY apps/desktop/src/main/package.json ./apps/desktop/src/main/package.json -RUN <<'EOF' -set -e -if [ "${USE_CN_MIRROR:-false}" = "true" ]; then - export SENTRYCLI_CDNURL="https://npmmirror.com/mirrors/sentry-cli" - npm config set registry "https://registry.npmmirror.com/" - echo 'canvas_binary_host_mirror=https://npmmirror.com/mirrors/canvas' >> .npmrc -fi -export COREPACK_NPM_REGISTRY=$(npm config get registry | sed 's/\/$//') -npm i -g corepack@latest -corepack enable -corepack use $(sed -n 's/.*"packageManager": "\(.*\)".*/\1/p' package.json) -pnpm i -mkdir -p /deps -cd /deps -pnpm init -pnpm add pg drizzle-orm -EOF +RUN set -e && \ + if [ "${USE_CN_MIRROR:-false}" = "true" ]; then \ + export SENTRYCLI_CDNURL="https://npmmirror.com/mirrors/sentry-cli"; \ + npm config set registry "https://registry.npmmirror.com/"; \ + echo 'canvas_binary_host_mirror=https://npmmirror.com/mirrors/canvas' >> .npmrc; \ + fi && \ + export COREPACK_NPM_REGISTRY=$(npm config get registry | sed 's/\/$//') && \ + npm i -g corepack@latest && \ + corepack enable && \ + corepack use $(sed -n 's/.*"packageManager": "\(.*\)".*/\1/p' package.json) && \ + pnpm i && \ + mkdir -p /deps && \ + cd /deps && \ + pnpm init && \ + pnpm add pg drizzle-orm COPY . . @@ -101,17 +97,15 @@ COPY . . RUN npm run build:docker # Prepare desktop export assets for Electron packaging (if generated) -RUN <<'EOF' -set -e -if [ -d "/app/out" ]; then - mkdir -p /app/apps/desktop/dist/next - cp -a /app/out/. /app/apps/desktop/dist/next/ - echo "✅ Copied Next export output into /app/apps/desktop/dist/next" -else - echo "ℹ️ No Next export output found at /app/out, creating empty directory" - mkdir -p /app/apps/desktop/dist/next -fi -EOF +RUN set -e && \ + if [ -d "/app/out" ]; then \ + mkdir -p /app/apps/desktop/dist/next && \ + cp -a /app/out/. /app/apps/desktop/dist/next/ && \ + echo "Copied Next export output into /app/apps/desktop/dist/next"; \ + else \ + echo "No Next export output found at /app/out, creating empty directory" && \ + mkdir -p /app/apps/desktop/dist/next; \ + fi ## Application image, copy all the files for production FROM busybox:latest AS app @@ -138,12 +132,10 @@ COPY --from=builder /deps/node_modules/drizzle-orm /app/node_modules/drizzle-orm COPY --from=builder /app/scripts/serverLauncher/startServer.js /app/startServer.js COPY --from=builder /app/scripts/_shared /app/scripts/_shared -RUN <<'EOF' -set -e -addgroup -S -g 1001 nodejs -adduser -D -G nodejs -H -S -h /app -u 1001 nextjs -chown -R nextjs:nodejs /app /etc/proxychains4.conf -EOF +RUN set -e && \ + addgroup -S -g 1001 nodejs && \ + adduser -D -G nodejs -H -S -h /app -u 1001 nextjs && \ + chown -R nextjs:nodejs /app /etc/proxychains4.conf ## Production image, copy all the files and run next FROM scratch diff --git a/locales/ar/chat.json b/locales/ar/chat.json index e875c3c788..cc969ec650 100644 --- a/locales/ar/chat.json +++ b/locales/ar/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "جارٍ استدعاء {{toolName}}...", "task.activity.toolResult": "تم استلام نتيجة {{toolName}}", "task.batchTasks": "{{count}} مهمة فرعية مجمعة", + "task.groupTasks": "{{count}} مهام متوازية", + "task.groupTasksTitle": "{{agents}} و{{count}} مهام للوكلاء", + "task.groupTasksTitleSimple": "{{agents}} {{count}} مهام", "task.instruction": "تعليمات المهمة", "task.intermediateSteps": "{{count}} خطوة وسيطة", "task.metrics.duration": "(استغرق {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "استخدامات الأداة", "task.status.cancelled": "تم إلغاء المهمة", "task.status.failed": "فشلت المهمة", + "task.status.fetchingDetails": "جارٍ جلب التفاصيل...", "task.status.initializing": "جارٍ تهيئة المهمة...", "task.subtask": "مهمة فرعية", "thread.divider": "موضوع فرعي", diff --git a/locales/ar/models.json b/locales/ar/models.json index 650efc003f..3baaa63184 100644 --- a/locales/ar/models.json +++ b/locales/ar/models.json @@ -906,6 +906,9 @@ "mistralai/Mixtral-8x7B-Instruct-v0.1.description": "Mixtral-8x7B Instruct (46.7B) يوفر قدرة عالية لمعالجة البيانات على نطاق واسع.", "mistralai/Mixtral-8x7B-v0.1.description": "Mixtral 8x7B هو نموذج MoE متفرق يعزز سرعة الاستدلال، مناسب للمهام متعددة اللغات وتوليد الكود.", "mistralai/mistral-nemo.description": "Mistral Nemo هو نموذج يحتوي على 7.3 مليار معامل يدعم لغات متعددة ويتميز بأداء قوي في البرمجة.", + "mixtral-8x7b-32768.description": "Mixtral 8x7B يوفر حوسبة متوازية مقاومة للأخطاء للمهام المعقدة.", + "mixtral.description": "Mixtral هو نموذج MoE من Mistral AI بوزن مفتوح، يدعم توليد الشيفرات وفهم اللغة.", + "mixtral:8x22b.description": "Mixtral هو نموذج MoE من Mistral AI بوزن مفتوح، يدعم توليد الشيفرات وفهم اللغة.", "moonshot-v1-128k-vision-preview.description": "نماذج Kimi للرؤية (بما في ذلك moonshot-v1-8k-vision-preview/moonshot-v1-32k-vision-preview/moonshot-v1-128k-vision-preview) قادرة على فهم محتوى الصور مثل النصوص، الألوان، وأشكال الكائنات.", "moonshot-v1-128k.description": "Moonshot V1 128K يوفر سياقًا طويلًا للغاية لتوليد نصوص طويلة جدًا، حيث يتعامل مع ما يصل إلى 128,000 رمز، مما يجعله مثاليًا للبحث، والأكاديميا، والوثائق الكبيرة.", "moonshot-v1-32k-vision-preview.description": "نماذج Kimi للرؤية (بما في ذلك moonshot-v1-8k-vision-preview/moonshot-v1-32k-vision-preview/moonshot-v1-128k-vision-preview) قادرة على فهم محتوى الصور مثل النصوص، الألوان، وأشكال الكائنات.", @@ -941,6 +944,68 @@ "o4-mini-2025-04-16.description": "o4-mini هو نموذج تفكير من OpenAI بمدخلات نصية وصورية ومخرجات نصية، مناسب للمهام المعقدة التي تتطلب معرفة واسعة، مع نافذة سياق بحجم 200K.", "o4-mini-deep-research.description": "o4-mini-deep-research هو نموذج بحث عميق أسرع وأكثر توفيرًا للمهام البحثية متعددة الخطوات المعقدة. يمكنه البحث في الويب والوصول إلى بياناتك عبر موصلات MCP.", "o4-mini.description": "o4-mini هو أحدث نموذج صغير من سلسلة o، مُحسّن للتفكير السريع والفعال مع كفاءة عالية في مهام البرمجة والرؤية.", + "open-codestral-mamba.description": "Codestral Mamba هو نموذج لغة من نوع Mamba 2 يركز على توليد الشيفرات، ويدعم مهام البرمجة المتقدمة والتفكير المنطقي.", + "open-mistral-7b.description": "Mistral 7B هو نموذج صغير الحجم عالي الأداء، قوي في المعالجة الدُفعية والمهام البسيطة مثل التصنيف وتوليد النصوص، مع قدرات منطقية قوية.", + "open-mistral-nemo.description": "Mistral Nemo هو نموذج بحجم 12B تم تطويره بالتعاون مع Nvidia، ويقدم أداءً قويًا في التفكير والبرمجة مع سهولة في التكامل.", + "open-mixtral-8x22b.description": "Mixtral 8x22B هو نموذج MoE أكبر للمهام المعقدة، يوفر قدرات منطقية قوية وإنتاجية أعلى.", + "open-mixtral-8x7b.description": "Mixtral 8x7B هو نموذج MoE متفرق يعزز سرعة الاستدلال، مناسب للمهام متعددة اللغات وتوليد الشيفرات.", + "openai/gpt-3.5-turbo-instruct.description": "قدرات مشابهة لنماذج GPT-3، متوافقة مع نقاط نهاية الإكمال القديمة بدلاً من الدردشة.", + "openai/gpt-3.5-turbo.description": "أقوى وأوفر نماذج GPT-3.5 من OpenAI، محسّن للدردشة مع أداء قوي في الإكمالات التقليدية.", + "openai/gpt-4-turbo.description": "gpt-4-turbo من OpenAI يتمتع بمعرفة عامة واسعة وخبرة تخصصية، يتبع تعليمات اللغة الطبيعية المعقدة ويحل المشكلات الصعبة بدقة. تاريخ التحديث المعرفي: أبريل 2023، مع نافذة سياق 128K.", + "openai/gpt-4.1-mini.description": "GPT-4.1 Mini يقدم زمن استجابة أقل وقيمة أفضل للمهام متوسطة السياق.", + "openai/gpt-4.1-nano.description": "GPT-4.1 Nano هو خيار منخفض التكلفة وزمن استجابة منخفض للمحادثات القصيرة المتكررة أو التصنيف.", + "openai/gpt-4.1.description": "سلسلة GPT-4.1 توفر نوافذ سياق أكبر وقدرات هندسية ومنطقية أقوى.", + "openai/gpt-4o-mini.description": "GPT-4o-mini هو إصدار صغير وسريع من GPT-4o للاستخدام متعدد الوسائط بزمن استجابة منخفض.", + "openai/gpt-4o.description": "عائلة GPT-4o هي نموذج Omni من OpenAI يدعم إدخال نصوص وصور وإخراج نصي.", + "openai/gpt-5-chat.description": "GPT-5 Chat هو إصدار من GPT-5 محسّن للمحادثات بزمن استجابة أقل لتفاعل أفضل.", + "openai/gpt-5-codex.description": "GPT-5-Codex هو إصدار من GPT-5 محسّن أكثر للبرمجة وتدفقات العمل البرمجية واسعة النطاق.", + "openai/gpt-5-mini.description": "GPT-5 Mini هو إصدار أصغر من GPT-5 للسيناريوهات منخفضة التكلفة وزمن الاستجابة.", + "openai/gpt-5-nano.description": "GPT-5 Nano هو الإصدار فائق الصغر للسيناريوهات ذات القيود الصارمة على التكلفة وزمن الاستجابة.", + "openai/gpt-5-pro.description": "GPT-5 Pro هو النموذج الرائد من OpenAI، يوفر قدرات منطقية قوية وتوليد شيفرات وميزات على مستوى المؤسسات، مع توجيه ذكي أثناء وقت التنفيذ وسياسات أمان أكثر صرامة.", + "openai/gpt-5.1-chat.description": "GPT-5.1 Chat هو العضو الخفيف في عائلة GPT-5.1، محسّن للمحادثات بزمن استجابة منخفض مع الحفاظ على قدرات منطقية قوية وتنفيذ التعليمات.", + "openai/gpt-5.1-codex-mini.description": "GPT-5.1-Codex-Mini هو إصدار أصغر وأسرع من GPT-5.1-Codex، مثالي للبرمجة في سيناريوهات حساسة للتكلفة وزمن الاستجابة.", + "openai/gpt-5.1-codex.description": "GPT-5.1-Codex هو إصدار من GPT-5.1 محسّن لهندسة البرمجيات وتدفقات العمل البرمجية، مناسب لإعادة هيكلة واسعة، وتصحيح الأخطاء المعقدة، ومهام البرمجة الذاتية الطويلة.", + "openai/gpt-5.1.description": "GPT-5.1 هو النموذج الرائد الأحدث في سلسلة GPT-5، مع تحسينات كبيرة في التفكير العام، اتباع التعليمات، وطبيعية المحادثة، مناسب لمجموعة واسعة من المهام.", + "openai/gpt-5.description": "GPT-5 هو نموذج عالي الأداء من OpenAI لمجموعة واسعة من المهام الإنتاجية والبحثية.", + "openai/gpt-oss-120b.description": "نموذج لغة كبير عام القدرات يتمتع بتفكير قوي وقابل للتحكم.", + "openai/gpt-oss-20b.description": "نموذج لغة صغير الحجم بوزن مفتوح، محسّن لزمن استجابة منخفض وبيئات محدودة الموارد، بما في ذلك النشر المحلي وعلى الأطراف.", + "openai/o1-mini.description": "o1-mini هو نموذج تفكير سريع وفعال من حيث التكلفة مصمم للبرمجة والرياضيات والعلوم. يدعم سياق 128K وتاريخ معرفة حتى أكتوبر 2023.", + "openai/o1-preview.description": "o1 هو نموذج التفكير الجديد من OpenAI للمهام المعقدة التي تتطلب معرفة واسعة. يدعم سياق 128K وتاريخ معرفة حتى أكتوبر 2023.", + "openai/o1.description": "OpenAI o1 هو نموذج تفكير رائد مصمم لحل المشكلات المعقدة التي تتطلب تفكيرًا عميقًا، ويقدم تفكيرًا قويًا ودقة أعلى في المهام متعددة الخطوات.", + "openai/o3-mini-high.description": "o3-mini (تفكير عالي) يقدم ذكاءً أعلى بنفس تكلفة وزمن استجابة o1-mini.", + "openai/o3-mini.description": "o3-mini هو أحدث نموذج تفكير صغير من OpenAI، يقدم ذكاءً أعلى بنفس تكلفة وزمن استجابة o1-mini.", + "openai/o3.description": "OpenAI o3 هو أقوى نموذج تفكير، يحقق أداءً رائدًا في البرمجة والرياضيات والعلوم والإدراك البصري. يتفوق في الاستفسارات المعقدة متعددة الأبعاد، ويتميز بتحليل الصور والمخططات والرسوم البيانية.", + "openai/o4-mini-high.description": "o4-mini فئة التفكير العالي، محسّن للتفكير السريع والفعال مع أداء قوي في البرمجة والرؤية.", + "openai/o4-mini.description": "OpenAI o4-mini هو نموذج تفكير صغير وفعال للسيناريوهات ذات زمن الاستجابة المنخفض.", + "openai/text-embedding-3-large.description": "أقوى نموذج تضمين من OpenAI للمهام باللغة الإنجليزية وغير الإنجليزية.", + "openai/text-embedding-3-small.description": "إصدار محسّن عالي الأداء من نموذج تضمين ada.", + "openai/text-embedding-ada-002.description": "نموذج تضمين النصوص القديم من OpenAI.", + "openrouter/auto.description": "استنادًا إلى طول السياق والموضوع والتعقيد، يتم توجيه طلبك إلى Llama 3 70B Instruct أو Claude 3.5 Sonnet (بمراقبة ذاتية) أو GPT-4o.", + "perplexity/sonar-pro.description": "المنتج الرائد من Perplexity مع دعم البحث، يدعم الاستفسارات المتقدمة والمتابعة.", + "perplexity/sonar-reasoning-pro.description": "نموذج متقدم يركز على التفكير، ينتج سلسلة تفكير (CoT) مع بحث محسّن، بما في ذلك استعلامات بحث متعددة لكل طلب.", + "perplexity/sonar-reasoning.description": "نموذج يركز على التفكير، ينتج سلسلة تفكير (CoT) مع شروحات مفصلة مدعومة بالبحث.", + "perplexity/sonar.description": "منتج خفيف من Perplexity مع دعم البحث، أسرع وأرخص من Sonar Pro.", + "phi3.description": "Phi-3 هو نموذج مفتوح وخفيف من Microsoft للتكامل الفعال والتفكير واسع النطاق.", + "phi3:14b.description": "Phi-3 هو نموذج مفتوح وخفيف من Microsoft للتكامل الفعال والتفكير واسع النطاق.", + "pixtral-12b-2409.description": "Pixtral يتميز بفهم الرسوم البيانية/الصور، والإجابة على الأسئلة من المستندات، والتفكير متعدد الوسائط، واتباع التعليمات. يستوعب الصور بدقة ونسبة أبعاد أصلية ويدعم أي عدد من الصور ضمن نافذة سياق 128K.", + "pixtral-large-latest.description": "Pixtral Large هو نموذج متعدد الوسائط مفتوح يحتوي على 124 مليار معامل، مبني على Mistral Large 2، الثاني في عائلتنا متعددة الوسائط مع فهم متقدم للصور.", + "pro-128k.description": "Spark Pro 128K يوفر سعة سياق كبيرة جدًا تصل إلى 128K، مثالي للمستندات الطويلة التي تتطلب تحليل نص كامل وتماسك بعيد المدى، مع منطق سلس ودعم استشهاد متنوع في المناقشات المعقدة.", + "pro-deepseek-r1.description": "نموذج خدمة مخصص للمؤسسات مع تزامن مدمج.", + "pro-deepseek-v3.description": "نموذج خدمة مخصص للمؤسسات مع تزامن مدمج.", + "qianfan-70b.description": "Qianfan 70B هو نموذج صيني كبير لتوليد عالي الجودة وتفكير معقد.", + "qianfan-8b.description": "Qianfan 8B هو نموذج عام متوسط الحجم يوازن بين التكلفة والجودة لتوليد النصوص والإجابة على الأسئلة.", + "qianfan-agent-intent-32k.description": "Qianfan Agent Intent 32K يستهدف التعرف على النوايا وتنظيم الوكلاء مع دعم سياق طويل.", + "qianfan-agent-lite-8k.description": "Qianfan Agent Lite 8K هو نموذج وكيل خفيف للحوار متعدد الأدوار منخفض التكلفة وتدفقات العمل.", + "qianfan-agent-speed-32k.description": "Qianfan Agent Speed 32K هو نموذج وكيل عالي الإنتاجية لتطبيقات الوكلاء متعددة المهام واسعة النطاق.", + "qianfan-agent-speed-8k.description": "Qianfan Agent Speed 8K هو نموذج وكيل عالي التزامن للمحادثات القصيرة إلى المتوسطة والاستجابة السريعة.", + "qianfan-check-vl.description": "Qianfan Check VL هو نموذج مراجعة محتوى متعدد الوسائط لمهام التوافق والتعرف على الصور والنصوص.", + "qianfan-composition.description": "Qianfan Composition هو نموذج إنشاء متعدد الوسائط لفهم وتوليد الصور والنصوص المختلطة.", + "qianfan-engcard-vl.description": "Qianfan EngCard VL هو نموذج تعرف متعدد الوسائط يركز على السيناريوهات الإنجليزية.", + "qianfan-lightning-128b-a19b.description": "Qianfan Lightning 128B A19B هو نموذج صيني عام عالي الأداء للأسئلة المعقدة والتفكير واسع النطاق.", + "qianfan-llama-vl-8b.description": "Qianfan Llama VL 8B هو نموذج متعدد الوسائط مبني على Llama لفهم عام للصور والنصوص.", + "qianfan-multipicocr.description": "Qianfan MultiPicOCR هو نموذج OCR متعدد الصور لاكتشاف النصوص والتعرف عليها عبر الصور.", + "qianfan-qi-vl.description": "Qianfan QI VL هو نموذج سؤال وجواب متعدد الوسائط للاسترجاع الدقيق والإجابة في سيناريوهات الصور والنصوص المعقدة.", + "qianfan-singlepicocr.description": "Qianfan SinglePicOCR هو نموذج OCR لصورة واحدة بدقة عالية في التعرف على الأحرف.", "qianfan-vl-70b.description": "Qianfan VL 70B هو نموذج لغة بصري كبير لفهم معقد للنصوص والصور.", "qianfan-vl-8b.description": "Qianfan VL 8B هو نموذج لغة بصري خفيف الوزن مخصص للإجابة اليومية على الأسئلة وتحليل الصور والنصوص.", "qvq-72b-preview.description": "QVQ-72B-Preview هو نموذج بحث تجريبي من Qwen يركز على تحسين الاستدلال البصري.", diff --git a/locales/bg-BG/chat.json b/locales/bg-BG/chat.json index 048fc2cf78..721512d130 100644 --- a/locales/bg-BG/chat.json +++ b/locales/bg-BG/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Извикване на {{toolName}}...", "task.activity.toolResult": "Получен резултат от {{toolName}}", "task.batchTasks": "{{count}} групови подзадачи", + "task.groupTasks": "{{count}} паралелни задачи", + "task.groupTasksTitle": "{{agents}} и {{count}} задачи на агенти", + "task.groupTasksTitleSimple": "{{agents}} {{count}} задачи", "task.instruction": "Инструкция за задача", "task.intermediateSteps": "{{count}} междинни стъпки", "task.metrics.duration": "(отне {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "използвания на инструменти", "task.status.cancelled": "Задачата е отменена", "task.status.failed": "Задачата е неуспешна", + "task.status.fetchingDetails": "Извличане на подробности...", "task.status.initializing": "Инициализиране на задачата...", "task.subtask": "Подзадача", "thread.divider": "Подтема", diff --git a/locales/bg-BG/models.json b/locales/bg-BG/models.json index c0dd9c98f5..73d6b005be 100644 --- a/locales/bg-BG/models.json +++ b/locales/bg-BG/models.json @@ -724,6 +724,16 @@ "imagen-4.0-ultra-generate-001.description": "Ultra версията от четвъртото поколение модели Imagen за преобразуване от текст към изображение.", "imagen-4.0-ultra-generate-preview-06-06.description": "Ultra вариант от четвъртото поколение модели Imagen за преобразуване от текст към изображение.", "inception/mercury-coder-small.description": "Mercury Coder Small е идеален за генериране на код, отстраняване на грешки и рефакториране с минимално закъснение.", + "inclusionAI/Ling-flash-2.0.description": "Ling-flash-2.0 е третият модел от архитектурата Ling 2.0, разработен от екипа Bailing на Ant Group. Това е MoE модел със 100 милиарда общи параметри, от които само 6.1 милиарда са активни на токен (4.8 милиарда без вграждане). Въпреки леката си конфигурация, той съперничи или надминава плътни модели с 40 милиарда параметри и дори по-големи MoE модели в множество бенчмаркове, като постига висока ефективност чрез архитектура и стратегия на обучение.", + "inclusionAI/Ling-mini-2.0.description": "Ling-mini-2.0 е малък, високоефективен MoE LLM с 16 милиарда общи параметри и само 1.4 милиарда активни на токен (789 милиона без вграждане), осигуряващ изключително бързо генериране. Благодарение на ефективния MoE дизайн и голям обем висококачествени обучителни данни, той постига водеща производителност, сравнима с плътни модели под 10 милиарда и по-големи MoE модели.", + "inclusionAI/Ring-flash-2.0.description": "Ring-flash-2.0 е високоефективен мислещ модел, оптимизиран от базовия Ling-flash-2.0. Използва MoE архитектура със 100 милиарда общи параметри и само 6.1 милиарда активни при всяка инференция. Алгоритъмът icepop стабилизира обучението с подсилване (RL) за MoE модели, позволявайки допълнителни подобрения в сложното разсъждение. Постига значителни пробиви в трудни бенчмаркове (математически състезания, генериране на код, логическо мислене), надминавайки водещи плътни модели под 40 милиарда и съперничейки на по-големи отворени и затворени MoE модели за разсъждение. Също така се представя добре в творческо писане, а ефективната му архитектура осигурява бърза инференция с по-ниски разходи за внедряване при висока едновременност.", + "inclusionai/ling-1t.description": "Ling-1T е MoE модел на inclusionAI с 1 трилион параметри, оптимизиран за задачи с висока интензивност на разсъждение и работа с голям контекст.", + "inclusionai/ling-flash-2.0.description": "Ling-flash-2.0 е MoE модел на inclusionAI, оптимизиран за ефективност и производителност при разсъждение, подходящ за средни до големи задачи.", + "inclusionai/ling-mini-2.0.description": "Ling-mini-2.0 е лек MoE модел на inclusionAI, който значително намалява разходите, като същевременно запазва способността за разсъждение.", + "inclusionai/ming-flash-omini-preview.description": "Ming-flash-omni Preview е мултимодален модел на inclusionAI, поддържащ вход от реч, изображение и видео, с подобрено визуализиране на изображения и разпознаване на реч.", + "inclusionai/ring-1t.description": "Ring-1T е MoE модел на inclusionAI с трилион параметри, предназначен за мащабни задачи по разсъждение и научни изследвания.", + "inclusionai/ring-flash-2.0.description": "Ring-flash-2.0 е вариант на модела Ring от inclusionAI за сценарии с висока пропускателна способност, с акцент върху скоростта и ефективността на разходите.", + "inclusionai/ring-mini-2.0.description": "Ring-mini-2.0 е лек MoE модел на inclusionAI с висока пропускателна способност, създаден за едновременна работа.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 е отворен LLM, предназначен за разработчици, изследователи и предприятия, създаден да им помага да изграждат, експериментират и отговорно мащабират идеи за генеративен ИИ. Като част от основата за глобални иновации в общността, той е подходящ за среди с ограничени изчислителни ресурси, крайни устройства и по-бързо обучение.", "meta/Llama-3.2-11B-Vision-Instruct.description": "Силен визуален анализ на изображения с висока резолюция, подходящ за приложения за визуално разбиране.", "meta/Llama-3.2-90B-Vision-Instruct.description": "Разширен визуален анализ за приложения с агенти за визуално разбиране.", diff --git a/locales/de-DE/chat.json b/locales/de-DE/chat.json index 08a1ca6f40..e73475667d 100644 --- a/locales/de-DE/chat.json +++ b/locales/de-DE/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "{{toolName}} wird aufgerufen...", "task.activity.toolResult": "{{toolName}}-Ergebnis empfangen", "task.batchTasks": "{{count}} Teilaufgaben", + "task.groupTasks": "{{count}} parallele Aufgaben", + "task.groupTasksTitle": "{{agents}} und {{count}} Aufgaben von Agenten", + "task.groupTasksTitleSimple": "{{agents}} {{count}} Aufgaben", "task.instruction": "Aufgabenanweisung", "task.intermediateSteps": "{{count}} Zwischenschritte", "task.metrics.duration": "(dauerte {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "Tool-Nutzungen", "task.status.cancelled": "Aufgabe abgebrochen", "task.status.failed": "Aufgabe fehlgeschlagen", + "task.status.fetchingDetails": "Details werden abgerufen...", "task.status.initializing": "Aufgabe wird initialisiert...", "task.subtask": "Teilaufgabe", "thread.divider": "Unterthema", diff --git a/locales/de-DE/models.json b/locales/de-DE/models.json index 5b1d7ed191..e89f19a3e5 100644 --- a/locales/de-DE/models.json +++ b/locales/de-DE/models.json @@ -1118,6 +1118,46 @@ "qwen3-vl-flash.description": "Qwen3 VL Flash: leichtgewichtige, hochschnelle Denkversion für latenzempfindliche oder hochvolumige Anfragen.", "qwen3-vl-plus.description": "Qwen VL ist ein Textgenerierungsmodell mit Bildverständnis. Es kann OCR durchführen sowie zusammenfassen und schlussfolgern, z. B. Attribute aus Produktfotos extrahieren oder Probleme aus Bildern lösen.", "qwen3.description": "Qwen3 ist Alibabas nächste Generation eines großen Sprachmodells mit starker Leistung in vielfältigen Anwendungsfällen.", + "qwq-32b-preview.description": "QwQ ist ein experimentelles Forschungsmodell von Qwen mit Fokus auf verbesserte Schlussfolgerungen.", + "qwq-32b.description": "QwQ ist ein Schlussfolgerungsmodell aus der Qwen-Familie. Im Vergleich zu standardmäßig instruktionstunierten Modellen bietet es überlegene Denk- und Schlussfolgerungsfähigkeiten, die die Leistung bei nachgelagerten Aufgaben deutlich steigern – insbesondere bei komplexen Problemen. QwQ-32B ist ein mittelgroßes Modell, das mit führenden Schlussfolgerungsmodellen wie DeepSeek-R1 und o1-mini konkurriert.", + "qwq-plus.description": "Das auf Qwen2.5 trainierte QwQ-Schlussfolgerungsmodell nutzt Reinforcement Learning (RL), um die Denkfähigkeiten erheblich zu verbessern. Zentrale Kennzahlen in Mathematik/Code (AIME 24/25, LiveCodeBench) sowie in allgemeinen Benchmarks (IFEval, LiveBench) erreichen das Niveau von DeepSeek-R1.", + "qwq.description": "QwQ ist ein Schlussfolgerungsmodell aus der Qwen-Familie. Im Vergleich zu standardmäßig instruktionstunierten Modellen bietet es überlegene Denk- und Schlussfolgerungsfähigkeiten, die die Leistung bei nachgelagerten Aufgaben deutlich verbessern – insbesondere bei schwierigen Problemen. QwQ-32B ist ein mittelgroßes Modell, das mit führenden Schlussfolgerungsmodellen wie DeepSeek-R1 und o1-mini mithalten kann.", + "qwq_32b.description": "Mittelgroßes Schlussfolgerungsmodell aus der Qwen-Familie. Im Vergleich zu standardmäßig instruktionstunierten Modellen steigern QwQs Denk- und Schlussfolgerungsfähigkeiten die Leistung bei nachgelagerten Aufgaben deutlich – insbesondere bei schwierigen Problemen.", + "r1-1776.description": "R1-1776 ist eine nachtrainierte Variante von DeepSeek R1, die darauf ausgelegt ist, unzensierte, objektive und faktenbasierte Informationen bereitzustellen.", + "solar-mini-ja.description": "Solar Mini (Ja) erweitert Solar Mini mit einem Fokus auf Japanisch und behält dabei eine effiziente und starke Leistung in Englisch und Koreanisch bei.", + "solar-mini.description": "Solar Mini ist ein kompaktes LLM, das GPT-3.5 übertrifft. Es bietet starke mehrsprachige Fähigkeiten in Englisch und Koreanisch und ist eine effiziente Lösung mit kleinem Ressourcenbedarf.", + "solar-pro.description": "Solar Pro ist ein hochintelligentes LLM von Upstage, das auf Befolgen von Anweisungen auf einer einzelnen GPU ausgelegt ist und IFEval-Werte über 80 erreicht. Derzeit wird Englisch unterstützt; die vollständige Veröffentlichung mit erweitertem Sprachsupport und längeren Kontexten war für November 2024 geplant.", + "sonar-deep-research.description": "Deep Research führt umfassende Expertenrecherchen durch und bereitet diese in zugänglichen, umsetzbaren Berichten auf.", + "sonar-pro.description": "Ein fortschrittliches Suchprodukt mit fundierter Suche für komplexe Anfragen und Folgefragen.", + "sonar-reasoning-pro.description": "Ein fortschrittliches Suchprodukt mit fundierter Suche für komplexe Anfragen und Folgefragen.", + "sonar-reasoning.description": "Ein fortschrittliches Suchprodukt mit fundierter Suche für komplexe Anfragen und Folgefragen.", + "sonar.description": "Ein leichtgewichtiges, suchbasiertes Produkt – schneller und kostengünstiger als Sonar Pro.", + "spark-x.description": "X1.5-Updates: (1) Einführung eines dynamischen Denkmodus, gesteuert über das Feld `thinking`; (2) größere Kontextlänge mit 64K Eingabe und 64K Ausgabe; (3) Unterstützung für FunctionCall.", + "stable-diffusion-3-medium.description": "Das neueste Text-zu-Bild-Modell von Stability AI. Diese Version verbessert die Bildqualität, das Textverständnis und die Stilvielfalt erheblich, interpretiert komplexe Spracheingaben präziser und erzeugt genauere, vielfältigere Bilder.", + "stable-diffusion-3.5-large-turbo.description": "stable-diffusion-3.5-large-turbo nutzt Adversarial Diffusion Distillation (ADD) auf stable-diffusion-3.5-large für höhere Geschwindigkeit.", + "stable-diffusion-3.5-large.description": "stable-diffusion-3.5-large ist ein MMDiT Text-zu-Bild-Modell mit 800 Millionen Parametern, das hervorragende Qualität und präzise Prompt-Umsetzung bietet. Es unterstützt 1-Megapixel-Bilder und läuft effizient auf Consumer-Hardware.", + "stable-diffusion-v1.5.description": "stable-diffusion-v1.5 basiert auf dem v1.2-Checkpoint und wurde 595.000 Schritte lang auf „laion-aesthetics v2 5+“ bei 512x512 Auflösung feinjustiert. Die Textkonditionierung wurde um 10 % reduziert, um das classifier-free guidance sampling zu verbessern.", + "stable-diffusion-xl-base-1.0.description": "Ein Open-Source-Text-zu-Bild-Modell von Stability AI mit branchenführender kreativer Bildgenerierung. Es versteht Anweisungen sehr gut und unterstützt umgekehrte Prompt-Definitionen für präzise Generierung.", + "stable-diffusion-xl.description": "stable-diffusion-xl bringt große Verbesserungen gegenüber v1.5 und erreicht Ergebnisse auf dem Niveau der besten offenen Text-zu-Bild-Modelle. Zu den Verbesserungen gehören ein dreimal größerer UNet-Backbone, ein Verfeinerungsmodul für bessere Bildqualität und effizientere Trainingstechniken.", + "step-1-128k.description": "Ausgewogenes Verhältnis von Leistung und Kosten für allgemeine Anwendungsfälle.", + "step-1-256k.description": "Verarbeitung extralanger Kontexte – ideal für die Analyse langer Dokumente.", + "step-1-32k.description": "Unterstützt mittellange Konversationen für vielfältige Szenarien.", + "step-1-8k.description": "Kleines Modell für einfache Aufgaben mit geringem Ressourcenbedarf.", + "step-1-flash.description": "Hochgeschwindigkeitsmodell für Echtzeit-Chats geeignet.", + "step-1.5v-mini.description": "Starke Fähigkeiten im Videoverständnis.", + "step-1o-turbo-vision.description": "Starkes Bildverständnis, übertrifft 1o in Mathematik und Programmierung. Kleiner als 1o mit schnellerer Ausgabe.", + "step-1o-vision-32k.description": "Starkes Bildverständnis mit besserer visueller Leistung als die Step-1V-Serie.", + "step-1v-32k.description": "Unterstützt visuelle Eingaben für reichhaltige multimodale Interaktionen.", + "step-1v-8k.description": "Kleines Vision-Modell für grundlegende Bild-und-Text-Aufgaben.", + "step-1x-edit.description": "Dieses Modell konzentriert sich auf die Bildbearbeitung – es verändert und verbessert Bilder basierend auf benutzerdefinierten Bildern und Texten. Es unterstützt mehrere Eingabeformate, darunter Textbeschreibungen und Beispielbilder, und erzeugt Bearbeitungen, die der Benutzerabsicht entsprechen.", + "step-1x-medium.description": "Dieses Modell bietet starke Bildgenerierung auf Basis von Texteingaben. Mit nativer Unterstützung für Chinesisch versteht es chinesische Beschreibungen besser, erfasst deren Bedeutung und wandelt sie in visuelle Merkmale für eine präzisere Generierung um. Es erzeugt hochauflösende, qualitativ hochwertige Bilder und unterstützt einen gewissen Grad an Stilübertragung.", + "step-2-16k-exp.description": "Experimentelle Step-2-Version mit den neuesten Funktionen und laufenden Updates. Nicht für den Produktionseinsatz empfohlen.", + "step-2-16k.description": "Unterstützt Interaktionen mit großem Kontext für komplexe Dialoge.", + "step-2-mini.description": "Basierend auf der nächsten Generation der internen MFA-Attention-Architektur liefert es Ergebnisse auf Step-1-Niveau bei deutlich geringeren Kosten, höherem Durchsatz und geringerer Latenz. Bewältigt allgemeine Aufgaben mit starker Programmierleistung.", + "step-2x-large.description": "Ein neues StepFun-Bildmodell der nächsten Generation mit Fokus auf Bildgenerierung. Es erzeugt hochwertige Bilder aus Texteingaben mit realistischeren Texturen und besserer Darstellung chinesischer/englischer Texte.", + "step-3.description": "Dieses Modell verfügt über starke visuelle Wahrnehmung und komplexe Schlussfolgerungsfähigkeiten. Es verarbeitet domänenübergreifendes Wissen, analysiert Mathematik und visuelle Inhalte gemeinsam und bewältigt eine Vielzahl alltäglicher visueller Analyseaufgaben.", + "step-r1-v-mini.description": "Ein Schlussfolgerungsmodell mit starkem Bildverständnis, das Bilder und Texte verarbeiten und anschließend durch tiefes Denken Text generieren kann. Es glänzt im visuellen Denken und liefert Spitzenleistungen in Mathematik, Programmierung und Textverständnis – mit einem Kontextfenster von 100K.", + "stepfun-ai/step3.description": "Step3 ist ein hochmodernes multimodales Schlussfolgerungsmodell von StepFun, basierend auf einer MoE-Architektur mit insgesamt 321B und 38B aktiven Parametern. Sein End-to-End-Design minimiert die Dekodierungskosten und liefert erstklassige Vision-Language-Schlussfolgerungen. Dank MFA- und AFD-Design bleibt es sowohl auf High-End- als auch auf Low-End-Beschleunigern effizient. Das Pretraining umfasst über 20T Text-Tokens und 4T Bild-Text-Tokens in vielen Sprachen. Es erreicht führende Leistungen bei offenen Modellen in Mathematik, Code und multimodalen Benchmarks.", "taichu_llm.description": "Trainiert mit umfangreichen hochwertigen Daten, mit verbesserter Textverständnis, Inhaltserstellung und dialogbasierter Fragebeantwortung.", "taichu_o1.description": "taichu_o1 ist ein Next-Gen-Reasoning-Modell, das multimodale Interaktion und Reinforcement Learning nutzt, um menschenähnliches Denken in Ketten zu ermöglichen. Es unterstützt komplexe Entscheidungssimulationen, legt Denkpfade offen und liefert hochpräzise Ergebnisse – ideal für strategische Analysen und tiefgehendes Denken.", "taichu_vl.description": "Kombiniert Bildverständnis, Wissensübertragung und logische Zuordnung – herausragend bei Bild-Text-Fragen.", @@ -1182,6 +1222,7 @@ "z-ai/glm-4.5-air.description": "GLM 4.5 Air ist eine leichtgewichtige Variante von GLM 4.5 für kostensensitive Szenarien bei gleichzeitig starker Reasoning-Leistung.", "z-ai/glm-4.5.description": "GLM 4.5 ist Z.AIs Flaggschiffmodell mit hybridem Reasoning, optimiert für Engineering- und Langkontextaufgaben.", "z-ai/glm-4.6.description": "GLM 4.6 ist Z.AIs Flaggschiffmodell mit erweitertem Kontextumfang und Codierungsfähigkeiten.", + "z-ai/glm-4.7.description": "GLM-4.7 ist das neueste Flaggschiffmodell von Zhipu mit verbesserten allgemeinen Fähigkeiten, natürlicheren und einfacheren Antworten sowie einem immersiveren Schreiberlebnis.", "zai-glm-4.6.description": "Leistungsstark bei Codierungs- und Reasoning-Aufgaben, unterstützt Streaming und Toolaufrufe – ideal für agentenbasiertes Codieren und komplexes Denken.", "zai-org/GLM-4.5-Air.description": "GLM-4.5-Air ist ein Basismodell für Agentenanwendungen mit Mixture-of-Experts-Architektur. Es ist optimiert für Toolnutzung, Web-Browsing, Softwareentwicklung und Frontend-Codierung und integriert sich mit Code-Agenten wie Claude Code und Roo Code. Es nutzt hybrides Reasoning für komplexe und alltägliche Szenarien.", "zai-org/GLM-4.5.description": "GLM-4.5 ist ein Basismodell für Agentenanwendungen mit Mixture-of-Experts-Architektur. Es ist tiefgreifend optimiert für Toolnutzung, Web-Browsing, Softwareentwicklung und Frontend-Codierung und integriert sich mit Code-Agenten wie Claude Code und Roo Code. Es nutzt hybrides Reasoning für komplexe und alltägliche Szenarien.", diff --git a/locales/en-US/chat.json b/locales/en-US/chat.json index e340ea717d..9e6899ed11 100644 --- a/locales/en-US/chat.json +++ b/locales/en-US/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Calling {{toolName}}...", "task.activity.toolResult": "{{toolName}} result received", "task.batchTasks": "{{count}} Batch Subtasks", + "task.groupTasks": "{{count}} Parallel Tasks", + "task.groupTasksTitle": "{{agents}} and {{count}} agents tasks", + "task.groupTasksTitleSimple": "{{agents}} {{count}} tasks", "task.instruction": "Task Instruction", "task.intermediateSteps": "{{count}} intermediate steps", "task.metrics.duration": "(took {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "skill uses", "task.status.cancelled": "Task Cancelled", "task.status.failed": "Task Failed", + "task.status.fetchingDetails": "Fetching details...", "task.status.initializing": "Initializing task...", "task.subtask": "Subtask", "thread.divider": "Subtopic", diff --git a/locales/es-ES/chat.json b/locales/es-ES/chat.json index a33c08731b..4003cfed9d 100644 --- a/locales/es-ES/chat.json +++ b/locales/es-ES/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Llamando a {{toolName}}...", "task.activity.toolResult": "Resultado de {{toolName}} recibido", "task.batchTasks": "{{count}} subtareas en lote", + "task.groupTasks": "{{count}} Tareas Paralelas", + "task.groupTasksTitle": "{{agents}} y {{count}} tareas de agentes", + "task.groupTasksTitleSimple": "{{agents}} {{count}} tareas", "task.instruction": "Instrucciones de la tarea", "task.intermediateSteps": "{{count}} pasos intermedios", "task.metrics.duration": "(duró {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "usos de herramientas", "task.status.cancelled": "Tarea cancelada", "task.status.failed": "Tarea fallida", + "task.status.fetchingDetails": "Obteniendo detalles...", "task.status.initializing": "Inicializando tarea...", "task.subtask": "Subtarea", "thread.divider": "Subtema", diff --git a/locales/es-ES/models.json b/locales/es-ES/models.json index 895945eefd..ffe8e3f9e9 100644 --- a/locales/es-ES/models.json +++ b/locales/es-ES/models.json @@ -1034,6 +1034,18 @@ "qwen-vl-plus-latest.description": "Modelo Qwen visión-lenguaje mejorado a gran escala con importantes mejoras en detalle y reconocimiento de texto, compatible con resoluciones superiores a un megapíxel y relaciones de aspecto arbitrarias.", "qwen-vl-plus.description": "Modelo Qwen visión-lenguaje mejorado a gran escala con importantes mejoras en detalle y reconocimiento de texto, compatible con resoluciones superiores a un megapíxel y relaciones de aspecto arbitrarias.", "qwen-vl-v1.description": "Modelo preentrenado inicializado desde Qwen-7B con un módulo de visión añadido y entrada de imagen de resolución 448.", + "qwen/qwen-2-7b-instruct.description": "Qwen2 es la nueva serie de modelos LLM de Qwen. Qwen2 7B es un modelo basado en transformadores que destaca en comprensión del lenguaje, capacidad multilingüe, programación, matemáticas y razonamiento.", + "qwen/qwen-2-7b-instruct:free.description": "Qwen2 es una nueva familia de modelos de lenguaje de gran tamaño con mayor capacidad de comprensión y generación.", + "qwen/qwen-2-vl-72b-instruct.description": "Qwen2-VL es la última iteración de Qwen-VL, alcanzando un rendimiento de vanguardia en pruebas de visión como MathVista, DocVQA, RealWorldQA y MTVQA. Puede comprender más de 20 minutos de video para preguntas y respuestas de alta calidad, diálogos y creación de contenido. También maneja razonamiento complejo y toma de decisiones, integrándose con dispositivos móviles y robots para actuar según el contexto visual e instrucciones de texto. Además del inglés y chino, también lee texto en imágenes en muchos idiomas, incluidos la mayoría de los idiomas europeos, japonés, coreano, árabe y vietnamita.", + "qwen/qwen-2.5-72b-instruct.description": "Qwen2.5-72B-Instruct es uno de los últimos lanzamientos de modelos LLM de Alibaba Cloud. El modelo de 72B ofrece mejoras notables en programación y matemáticas, admite más de 29 idiomas (incluidos chino e inglés) y mejora significativamente el seguimiento de instrucciones, la comprensión de datos estructurados y la generación de salidas estructuradas (especialmente JSON).", + "qwen/qwen2.5-32b-instruct.description": "Qwen2.5-32B-Instruct es uno de los últimos lanzamientos de modelos LLM de Alibaba Cloud. El modelo de 32B ofrece mejoras notables en programación y matemáticas, admite más de 29 idiomas (incluidos chino e inglés) y mejora significativamente el seguimiento de instrucciones, la comprensión de datos estructurados y la generación de salidas estructuradas (especialmente JSON).", + "qwen/qwen2.5-7b-instruct.description": "Un modelo LLM bilingüe para chino e inglés que abarca lenguaje, programación, matemáticas y razonamiento.", + "qwen/qwen2.5-coder-32b-instruct.description": "Un modelo LLM avanzado para generación, razonamiento y corrección de código en lenguajes de programación principales.", + "qwen/qwen2.5-coder-7b-instruct.description": "Un modelo de código de tamaño medio con contexto de 32K, excelente en programación multilingüe.", + "qwen/qwen3-14b.description": "Qwen3-14B es la variante de 14B para razonamiento general y escenarios de conversación.", + "qwen/qwen3-14b:free.description": "Qwen3-14B es un modelo LLM denso de 14.8B parámetros diseñado para razonamiento complejo y conversación eficiente. Alterna entre un modo de pensamiento para matemáticas, programación y lógica, y un modo sin pensamiento para conversación general. Ajustado para seguir instrucciones, uso de herramientas de agentes y escritura creativa en más de 100 idiomas y dialectos. Maneja nativamente contexto de 32K y escala hasta 131K con YaRN.", + "qwen/qwen3-235b-a22b-2507.description": "Qwen3-235B-A22B-Instruct-2507 es la variante Instruct de la serie Qwen3, equilibrando el uso multilingüe de instrucciones con escenarios de contexto largo.", + "qwen/qwen3-235b-a22b-thinking-2507.description": "Qwen3-235B-A22B-Thinking-2507 es la variante de pensamiento de Qwen3, reforzada para tareas complejas de matemáticas y razonamiento.", "qwen/qwen3-235b-a22b.description": "Qwen3-235B-A22B es un modelo MoE de 235 mil millones de parámetros de Qwen, con 22 mil millones activos por pasada. Alterna entre un modo de pensamiento para razonamiento complejo, matemáticas y programación, y un modo sin pensamiento para chats eficientes. Ofrece un razonamiento sólido, soporte multilingüe (más de 100 idiomas y dialectos), seguimiento avanzado de instrucciones y uso de herramientas de agentes. Maneja de forma nativa contextos de 32K y escala hasta 131K con YaRN.", "qwen/qwen3-235b-a22b:free.description": "Qwen3-235B-A22B es un modelo MoE de 235 mil millones de parámetros de Qwen, con 22 mil millones activos por pasada. Alterna entre un modo de pensamiento para razonamiento complejo, matemáticas y programación, y un modo sin pensamiento para chats eficientes. Ofrece un razonamiento sólido, soporte multilingüe (más de 100 idiomas y dialectos), seguimiento avanzado de instrucciones y uso de herramientas de agentes. Maneja de forma nativa contextos de 32K y escala hasta 131K con YaRN.", "qwen/qwen3-30b-a3b.description": "Qwen3 es la última generación de modelos LLM de Qwen con arquitecturas densas y MoE, destacando en razonamiento, soporte multilingüe y tareas avanzadas de agentes. Su capacidad única de alternar entre un modo de pensamiento para razonamiento complejo y un modo sin pensamiento para chats eficientes garantiza un rendimiento versátil y de alta calidad.\n\nQwen3 supera significativamente a modelos anteriores como QwQ y Qwen2.5, ofreciendo excelentes resultados en matemáticas, programación, razonamiento de sentido común, escritura creativa y chat interactivo. La variante Qwen3-30B-A3B tiene 30.5 mil millones de parámetros (3.3 mil millones activos), 48 capas, 128 expertos (8 activos por tarea) y admite contextos de hasta 131K con YaRN, estableciendo un nuevo estándar para modelos abiertos.", @@ -1073,6 +1085,44 @@ "qwen2:0.5b.description": "Qwen2 es el modelo de lenguaje de nueva generación de Alibaba con un rendimiento sólido en diversos casos de uso.", "qwen2:1.5b.description": "Qwen2 es el modelo de lenguaje de nueva generación de Alibaba con un rendimiento sólido en diversos casos de uso.", "qwen2:72b.description": "Qwen2 es el modelo de lenguaje de nueva generación de Alibaba con un rendimiento sólido en diversos casos de uso.", + "qwen3-0.6b.description": "Qwen3 0.6B es un modelo de nivel inicial para razonamiento simple y entornos muy limitados.", + "qwen3-1.7b.description": "Qwen3 1.7B es un modelo ultraligero para implementación en dispositivos y entornos periféricos.", + "qwen3-14b.description": "Qwen3 14B es un modelo de tamaño medio para preguntas y respuestas multilingües y generación de texto.", + "qwen3-235b-a22b-instruct-2507.description": "Qwen3 235B A22B Instruct 2507 es un modelo instructivo insignia para una amplia gama de tareas de generación y razonamiento.", + "qwen3-235b-a22b-thinking-2507.description": "Qwen3 235B A22B Thinking 2507 es un modelo de pensamiento ultra grande para razonamiento complejo.", + "qwen3-235b-a22b.description": "Qwen3 235B A22B es un modelo general de gran tamaño para tareas complejas.", + "qwen3-30b-a3b-instruct-2507.description": "Qwen3 30B A3B Instruct 2507 es un modelo instructivo de tamaño medio-grande para generación de alta calidad y preguntas y respuestas.", + "qwen3-30b-a3b-thinking-2507.description": "Qwen3 30B A3B Thinking 2507 es un modelo de pensamiento de tamaño medio-grande que equilibra precisión y coste.", + "qwen3-30b-a3b.description": "Qwen3 30B A3B es un modelo general de tamaño medio-grande que equilibra coste y calidad.", + "qwen3-32b.description": "Qwen3 32B es adecuado para tareas generales que requieren mayor comprensión.", + "qwen3-4b.description": "Qwen3 4B es adecuado para aplicaciones pequeñas a medianas e inferencia local.", + "qwen3-8b.description": "Qwen3 8B es un modelo ligero con implementación flexible para cargas de trabajo de alta concurrencia.", + "qwen3-coder-30b-a3b-instruct.description": "Modelo de código Qwen de código abierto. El último qwen3-coder-30b-a3b-instruct se basa en Qwen3 y ofrece sólidas capacidades de agente de codificación, uso de herramientas e interacción con entornos para programación autónoma, con excelente rendimiento en código y capacidad general sólida.", + "qwen3-coder-480b-a35b-instruct.description": "Qwen3 Coder 480B A35B Instruct es un modelo de código insignia para programación multilingüe y comprensión de código complejo.", + "qwen3-coder-flash.description": "Modelo de código Qwen. La última serie Qwen3-Coder se basa en Qwen3 y ofrece sólidas capacidades de agente de codificación, uso de herramientas e interacción con entornos para programación autónoma, con excelente rendimiento en código y capacidad general sólida.", + "qwen3-coder-plus.description": "Modelo de código Qwen. La última serie Qwen3-Coder se basa en Qwen3 y ofrece sólidas capacidades de agente de codificación, uso de herramientas e interacción con entornos para programación autónoma, con excelente rendimiento en código y capacidad general sólida.", + "qwen3-coder:480b.description": "Modelo de alto rendimiento de Alibaba para tareas de agente y programación con contexto largo.", + "qwen3-max-preview.description": "Modelo Qwen con mejor rendimiento para tareas complejas y de múltiples pasos. La vista previa admite razonamiento.", + "qwen3-max.description": "Los modelos Qwen3 Max ofrecen grandes mejoras sobre la serie 2.5 en capacidad general, comprensión en chino/inglés, seguimiento de instrucciones complejas, tareas abiertas subjetivas, capacidad multilingüe y uso de herramientas, con menos alucinaciones. La última versión qwen3-max mejora la programación agente y el uso de herramientas respecto a qwen3-max-preview. Este lanzamiento alcanza el estado del arte en el campo y está dirigido a necesidades de agentes más complejas.", + "qwen3-next-80b-a3b-instruct.description": "Modelo de próxima generación Qwen3 de código abierto sin razonamiento. En comparación con la versión anterior (Qwen3-235B-A22B-Instruct-2507), mejora la comprensión del chino, el razonamiento lógico y la generación de texto.", + "qwen3-next-80b-a3b-thinking.description": "Qwen3 Next 80B A3B Thinking es una versión insignia de razonamiento para tareas complejas.", + "qwen3-omni-flash.description": "Qwen-Omni acepta entradas combinadas de texto, imágenes, audio y video, y genera texto o voz. Ofrece múltiples estilos de voz natural, admite habla multilingüe y dialectal, y se adapta a casos como redacción, reconocimiento visual y asistentes de voz.", + "qwen3-vl-235b-a22b-instruct.description": "Qwen3 VL 235B A22B Instruct es un modelo multimodal insignia para comprensión y creación exigentes.", + "qwen3-vl-235b-a22b-thinking.description": "Qwen3 VL 235B A22B Thinking es la versión de razonamiento insignia para planificación y razonamiento multimodal complejo.", + "qwen3-vl-30b-a3b-instruct.description": "Qwen3 VL 30B A3B Instruct es un modelo multimodal grande que equilibra precisión y rendimiento en razonamiento.", + "qwen3-vl-30b-a3b-thinking.description": "Qwen3 VL 30B A3B Thinking es una versión de pensamiento profundo para tareas multimodales complejas.", + "qwen3-vl-32b-instruct.description": "Qwen3 VL 32B Instruct es un modelo multimodal ajustado para instrucciones, ideal para preguntas y respuestas imagen-texto de alta calidad y creación.", + "qwen3-vl-32b-thinking.description": "Qwen3 VL 32B Thinking es una versión multimodal de pensamiento profundo para razonamiento complejo y análisis de cadenas largas.", + "qwen3-vl-8b-instruct.description": "Qwen3 VL 8B Instruct es un modelo multimodal ligero para preguntas visuales diarias e integración en aplicaciones.", + "qwen3-vl-8b-thinking.description": "Qwen3 VL 8B Thinking es un modelo multimodal de cadena de pensamiento para razonamiento visual detallado.", + "qwen3-vl-flash.description": "Qwen3 VL Flash: versión ligera y de razonamiento rápido para solicitudes sensibles a la latencia o de alto volumen.", + "qwen3-vl-plus.description": "Qwen VL es un modelo de generación de texto con comprensión visual. Puede realizar OCR, resumir y razonar, como extraer atributos de fotos de productos o resolver problemas a partir de imágenes.", + "qwen3.description": "Qwen3 es el modelo de lenguaje de próxima generación de Alibaba con alto rendimiento en diversos casos de uso.", + "qwq-32b-preview.description": "QwQ es un modelo experimental de investigación de Qwen centrado en mejorar el razonamiento.", + "qwq-32b.description": "QwQ es un modelo de razonamiento de la familia Qwen. En comparación con los modelos estándar ajustados por instrucciones, ofrece capacidades de pensamiento y razonamiento que mejoran significativamente el rendimiento en tareas complejas. QwQ-32B es un modelo de razonamiento de tamaño medio que rivaliza con los mejores modelos como DeepSeek-R1 y o1-mini.", + "qwq-plus.description": "El modelo de razonamiento QwQ entrenado sobre Qwen2.5 utiliza aprendizaje por refuerzo (RL) para mejorar significativamente el razonamiento. Las métricas clave en matemáticas/código (AIME 24/25, LiveCodeBench) y algunos benchmarks generales (IFEval, LiveBench) alcanzan el nivel completo de DeepSeek-R1.", + "qwq.description": "QwQ es un modelo de razonamiento de la familia Qwen. En comparación con los modelos estándar ajustados por instrucciones, ofrece capacidades de pensamiento y razonamiento que mejoran significativamente el rendimiento en tareas difíciles. QwQ-32B es un modelo de razonamiento de tamaño medio que compite con los mejores modelos como DeepSeek-R1 y o1-mini.", + "qwq_32b.description": "Modelo de razonamiento de tamaño medio de la familia Qwen. En comparación con los modelos estándar ajustados por instrucciones, las capacidades de pensamiento y razonamiento de QwQ mejoran significativamente el rendimiento en tareas difíciles.", "stable-diffusion-3.5-large.description": "stable-diffusion-3.5-large es un modelo de texto a imagen MMDiT con 800 millones de parámetros que ofrece una excelente calidad y alineación con los prompts, compatible con imágenes de 1 megapíxel y ejecución eficiente en hardware de consumo.", "stable-diffusion-v1.5.description": "stable-diffusion-v1.5 se inicializa desde el checkpoint v1.2 y se afina durante 595k pasos en \"laion-aesthetics v2 5+\" a una resolución de 512x512, reduciendo el condicionamiento de texto en un 10% para mejorar el muestreo sin clasificador.", "stable-diffusion-xl-base-1.0.description": "Un modelo de texto a imagen de código abierto de Stability AI con generación creativa de imágenes líder en la industria. Posee una sólida comprensión de instrucciones y admite definiciones inversas de prompts para una generación precisa.", diff --git a/locales/fa-IR/chat.json b/locales/fa-IR/chat.json index a7c30cb5ff..23c61c08dd 100644 --- a/locales/fa-IR/chat.json +++ b/locales/fa-IR/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "در حال فراخوانی {{toolName}}...", "task.activity.toolResult": "نتیجه {{toolName}} دریافت شد", "task.batchTasks": "{{count}} زیرکار گروهی", + "task.groupTasks": "{{count}} وظیفه هم‌زمان", + "task.groupTasksTitle": "{{agents}} و {{count}} وظیفه برای نمایندگان", + "task.groupTasksTitleSimple": "{{agents}} {{count}} وظیفه", "task.instruction": "دستورالعمل وظیفه", "task.intermediateSteps": "{{count}} مرحله میانی", "task.metrics.duration": "(مدت زمان: {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "استفاده از ابزار", "task.status.cancelled": "وظیفه لغو شد", "task.status.failed": "وظیفه ناموفق بود", + "task.status.fetchingDetails": "در حال دریافت جزئیات...", "task.status.initializing": "در حال آغاز وظیفه...", "task.subtask": "زیرکار", "thread.divider": "زیرموضوع", diff --git a/locales/fa-IR/models.json b/locales/fa-IR/models.json index e74491ac34..64bcef1464 100644 --- a/locales/fa-IR/models.json +++ b/locales/fa-IR/models.json @@ -1049,6 +1049,29 @@ "qwen/qwen3-235b-a22b.description": "Qwen3-235B-A22B یک مدل MoE با ۲۳۵ میلیارد پارامتر از Qwen است که در هر عبور رو به جلو ۲۲ میلیارد پارامتر فعال دارد. این مدل بین حالت تفکر (برای استدلال پیچیده، ریاضی و کدنویسی) و حالت غیرتفکر (برای چت کارآمد) جابجا می‌شود. از استدلال قوی، پشتیبانی چندزبانه (بیش از ۱۰۰ زبان/گویش)، پیروی پیشرفته از دستورالعمل‌ها و استفاده از ابزارهای عامل پشتیبانی می‌کند. به‌صورت بومی از حافظه متنی ۳۲ هزار توکن پشتیبانی می‌کند و با YaRN تا ۱۳۱ هزار توکن گسترش می‌یابد.", "qwen/qwen3-235b-a22b:free.description": "Qwen3-235B-A22B یک مدل MoE با ۲۳۵ میلیارد پارامتر از Qwen است که در هر عبور رو به جلو ۲۲ میلیارد پارامتر فعال دارد. این مدل بین حالت تفکر (برای استدلال پیچیده، ریاضی و کدنویسی) و حالت غیرتفکر (برای چت کارآمد) جابجا می‌شود. از استدلال قوی، پشتیبانی چندزبانه (بیش از ۱۰۰ زبان/گویش)، پیروی پیشرفته از دستورالعمل‌ها و استفاده از ابزارهای عامل پشتیبانی می‌کند. به‌صورت بومی از حافظه متنی ۳۲ هزار توکن پشتیبانی می‌کند و با YaRN تا ۱۳۱ هزار توکن گسترش می‌یابد.", "qwen/qwen3-30b-a3b.description": "Qwen3 جدیدترین نسل مدل‌های LLM Qwen با معماری‌های متراکم و MoE است که در استدلال، پشتیبانی چندزبانه و وظایف پیشرفته عامل بسیار توانمند است. توانایی منحصربه‌فرد آن در جابجایی بین حالت تفکر برای استدلال پیچیده و حالت غیرتفکر برای چت کارآمد، عملکردی همه‌جانبه و با کیفیت بالا را تضمین می‌کند.\n\nQwen3 به‌طور قابل‌توجهی از مدل‌های قبلی مانند QwQ و Qwen2.5 پیشی می‌گیرد و عملکردی عالی در ریاضی، برنامه‌نویسی، استدلال عقل سلیم، نوشتن خلاقانه و چت تعاملی ارائه می‌دهد. نسخه Qwen3-30B-A3B دارای ۳۰.۵ میلیارد پارامتر (۳.۳ میلیارد فعال)، ۴۸ لایه، ۱۲۸ متخصص (۸ فعال در هر وظیفه) است و از حافظه متنی تا ۱۳۱ هزار توکن با YaRN پشتیبانی می‌کند و استاندارد جدیدی برای مدل‌های متن‌باز تعیین می‌کند.", + "qwen/qwen3-30b-a3b:free.description": "Qwen3 جدیدترین نسل مدل‌های زبانی Qwen با معماری‌های متراکم و MoE است که در استدلال، پشتیبانی چندزبانه و وظایف پیشرفته عامل‌ها عملکردی برجسته دارد. توانایی منحصربه‌فرد آن در جابجایی بین حالت تفکر برای استدلال پیچیده و حالت بدون تفکر برای گفت‌وگوی سریع، عملکردی همه‌جانبه و با کیفیت بالا را تضمین می‌کند.\n\nQwen3 به‌طور قابل‌توجهی از مدل‌های قبلی مانند QwQ و Qwen2.5 پیشی می‌گیرد و در ریاضیات، برنامه‌نویسی، استدلال عقل سلیم، نویسندگی خلاق و گفت‌وگوی تعاملی عملکردی عالی دارد. نسخه Qwen3-30B-A3B دارای ۳۰.۵ میلیارد پارامتر (۳.۳ میلیارد فعال)، ۴۸ لایه، ۱۲۸ کارشناس (۸ فعال در هر وظیفه) است و با پشتیبانی از زمینه تا ۱۳۱ هزار توکن با استفاده از YaRN، استاندارد جدیدی برای مدل‌های باز ایجاد کرده است.", + "qwen/qwen3-32b.description": "Qwen3-32B یک مدل زبانی علّی متراکم با ۳۲.۸ میلیارد پارامتر است که برای استدلال پیچیده و گفت‌وگوی کارآمد بهینه‌سازی شده است. این مدل بین حالت تفکر برای ریاضی، برنامه‌نویسی و منطق و حالت بدون تفکر برای گفت‌وگوی عمومی سریع جابجا می‌شود. در پیروی از دستورالعمل‌ها، استفاده از ابزارهای عامل و نویسندگی خلاق در بیش از ۱۰۰ زبان و گویش عملکردی قوی دارد. به‌صورت بومی از زمینه ۳۲ هزار توکن پشتیبانی می‌کند و با YaRN تا ۱۳۱ هزار توکن گسترش می‌یابد.", + "qwen/qwen3-32b:free.description": "Qwen3-32B یک مدل زبانی علّی متراکم با ۳۲.۸ میلیارد پارامتر است که برای استدلال پیچیده و گفت‌وگوی کارآمد بهینه‌سازی شده است. این مدل بین حالت تفکر برای ریاضی، برنامه‌نویسی و منطق و حالت بدون تفکر برای گفت‌وگوی عمومی سریع جابجا می‌شود. در پیروی از دستورالعمل‌ها، استفاده از ابزارهای عامل و نویسندگی خلاق در بیش از ۱۰۰ زبان و گویش عملکردی قوی دارد. به‌صورت بومی از زمینه ۳۲ هزار توکن پشتیبانی می‌کند و با YaRN تا ۱۳۱ هزار توکن گسترش می‌یابد.", + "qwen/qwen3-8b:free.description": "Qwen3-8B یک مدل زبانی علّی متراکم با ۸.۲ میلیارد پارامتر است که برای وظایف مبتنی بر استدلال و گفت‌وگوی کارآمد طراحی شده است. این مدل بین حالت تفکر برای ریاضی، برنامه‌نویسی و منطق و حالت بدون تفکر برای گفت‌وگوی عمومی جابجا می‌شود. برای پیروی از دستورالعمل‌ها، یکپارچه‌سازی با عامل‌ها و نویسندگی خلاق در بیش از ۱۰۰ زبان و گویش به‌خوبی تنظیم شده است. به‌صورت بومی از زمینه ۳۲ هزار توکن پشتیبانی می‌کند و با YaRN تا ۱۳۱ هزار توکن گسترش می‌یابد.", + "qwen/qwen3-coder-plus.description": "Qwen3-Coder-Plus یک مدل عامل برنامه‌نویسی از سری Qwen است که برای استفاده از ابزارهای پیچیده‌تر و جلسات طولانی بهینه‌سازی شده است.", + "qwen/qwen3-coder.description": "Qwen3-Coder خانواده تولید کد Qwen3 است که در درک و تولید کد در اسناد طولانی عملکردی قوی دارد.", + "qwen/qwen3-max-preview.description": "Qwen3 Max (پیش‌نمایش) نسخه Max برای استدلال پیشرفته و یکپارچه‌سازی ابزارها است.", + "qwen/qwen3-max.description": "Qwen3 Max مدل استدلال سطح بالا در سری Qwen3 برای استدلال چندزبانه و یکپارچه‌سازی ابزارها است.", + "qwen/qwen3-vl-plus.description": "Qwen3 VL-Plus نسخه تقویت‌شده Qwen3 با قابلیت‌های بینایی است که در استدلال چندوجهی و پردازش ویدئو بهبود یافته است.", + "qwen2.5-14b-instruct-1m.description": "مدل متن‌باز Qwen2.5 با ۷۲ میلیارد پارامتر.", + "qwen2.5-14b-instruct.description": "مدل متن‌باز Qwen2.5 با ۱۴ میلیارد پارامتر.", + "qwen2.5-32b-instruct.description": "مدل متن‌باز Qwen2.5 با ۳۲ میلیارد پارامتر.", + "qwen2.5-72b-instruct.description": "مدل متن‌باز Qwen2.5 با ۷۲ میلیارد پارامتر.", + "qwen2.5-7b-instruct.description": "Qwen2.5 7B Instruct یک مدل متن‌باز بالغ برای گفت‌وگو و تولید در سناریوهای مختلف است.", + "qwen2.5-coder-1.5b-instruct.description": "مدل برنامه‌نویسی متن‌باز Qwen.", + "qwen2.5-coder-14b-instruct.description": "مدل برنامه‌نویسی متن‌باز Qwen.", + "qwen2.5-coder-32b-instruct.description": "مدل برنامه‌نویسی متن‌باز Qwen.", + "qwen2.5-coder-7b-instruct.description": "مدل برنامه‌نویسی متن‌باز Qwen.", + "qwen2.5-coder-instruct.description": "Qwen2.5-Coder جدیدترین مدل متمرکز بر کدنویسی در خانواده Qwen (قبلاً با نام CodeQwen شناخته می‌شد) است.", + "qwen2.5-instruct.description": "Qwen2.5 جدیدترین سری مدل‌های زبانی Qwen است که شامل مدل‌های پایه و تنظیم‌شده برای دستورالعمل‌ها با اندازه‌هایی از ۰.۵ تا ۷۲ میلیارد پارامتر می‌باشد.", + "qwen2.5-math-1.5b-instruct.description": "Qwen-Math در حل مسائل ریاضی عملکردی قوی دارد.", + "qwen2.5-math-72b-instruct.description": "Qwen-Math در حل مسائل ریاضی عملکردی قوی دارد.", + "qwen2.5-math-7b-instruct.description": "Qwen-Math در حل مسائل ریاضی عملکردی قوی دارد.", "qwen2.5-omni-7b.description": "مدل‌های Qwen-Omni از ورودی‌های چندرسانه‌ای (ویدیو، صدا، تصویر، متن) پشتیبانی می‌کنند و خروجی آن‌ها به صورت صوتی و متنی است.", "qwen2.5-vl-32b-instruct.description": "Qwen2.5 VL 32B Instruct یک مدل چندرسانه‌ای متن‌باز است که برای استقرار خصوصی و استفاده در سناریوهای مختلف مناسب می‌باشد.", "qwen2.5-vl-72b-instruct.description": "بهبود در پیروی از دستورالعمل‌ها، ریاضیات، حل مسئله و برنامه‌نویسی، با توانایی قوی‌تر در شناسایی اشیاء عمومی. از مکان‌یابی دقیق عناصر بصری در قالب‌های مختلف، درک ویدیوهای طولانی (تا ۱۰ دقیقه) با زمان‌بندی رویداد در سطح ثانیه، ترتیب زمانی و درک سرعت، و عامل‌هایی که می‌توانند سیستم‌عامل یا موبایل را از طریق تجزیه و تحلیل و مکان‌یابی کنترل کنند، پشتیبانی می‌کند. استخراج اطلاعات کلیدی قوی و خروجی JSON. این نسخه ۷۲B، قوی‌ترین نسخه در این سری است.", @@ -1100,6 +1123,22 @@ "qwq-plus.description": "مدل استدلال QwQ که بر پایه Qwen2.5 آموزش دیده و با استفاده از یادگیری تقویتی (RL) توانایی استدلال را به‌طور چشمگیری بهبود داده است. در معیارهای اصلی ریاضی/کد (AIME 24/25، LiveCodeBench) و برخی معیارهای عمومی (IFEval، LiveBench) به سطح کامل DeepSeek-R1 رسیده است.", "qwq.description": "QwQ یک مدل استدلال در خانواده Qwen است. در مقایسه با مدل‌های تنظیم‌شده با دستورالعمل استاندارد، توانایی تفکر و استدلال آن عملکرد پایین‌دستی را به‌ویژه در مسائل دشوار به‌طور قابل توجهی بهبود می‌بخشد. QwQ-32B یک مدل استدلال میان‌رده است که با مدل‌های برتر مانند DeepSeek-R1 و o1-mini رقابت می‌کند.", "qwq_32b.description": "مدل استدلال میان‌رده در خانواده Qwen. در مقایسه با مدل‌های تنظیم‌شده با دستورالعمل استاندارد، توانایی تفکر و استدلال QwQ عملکرد پایین‌دستی را به‌ویژه در مسائل دشوار به‌طور قابل توجهی بهبود می‌بخشد.", + "r1-1776.description": "R1-1776 نسخه پس‌آموزشی مدل DeepSeek R1 است که برای ارائه اطلاعات واقعی، بدون سانسور و بی‌طرف طراحی شده است.", + "solar-mini-ja.description": "Solar Mini (ژاپنی) نسخه‌ای از Solar Mini با تمرکز بر زبان ژاپنی است که در عین حال عملکرد قوی و کارآمدی در زبان‌های انگلیسی و کره‌ای حفظ می‌کند.", + "solar-mini.description": "Solar Mini یک مدل زبانی فشرده است که عملکردی بهتر از GPT-3.5 دارد و با پشتیبانی چندزبانه قوی از زبان‌های انگلیسی و کره‌ای، راه‌حلی کارآمد با حجم کم ارائه می‌دهد.", + "solar-pro.description": "Solar Pro یک مدل زبانی هوشمند از Upstage است که برای پیروی از دستورالعمل‌ها روی یک GPU طراحی شده و امتیاز IFEval بالای ۸۰ دارد. در حال حاضر از زبان انگلیسی پشتیبانی می‌کند؛ انتشار کامل آن برای نوامبر ۲۰۲۴ با پشتیبانی زبانی گسترده‌تر و زمینه طولانی‌تر برنامه‌ریزی شده است.", + "sonar-deep-research.description": "Deep Research پژوهشی جامع در سطح تخصصی انجام داده و آن را به گزارش‌هایی قابل‌فهم و قابل‌اقدام تبدیل می‌کند.", + "sonar-pro.description": "یک محصول جستجوی پیشرفته با پشتیبانی از جستجوی مبتنی بر زمینه برای پرس‌وجوهای پیچیده و پیگیری‌ها.", + "sonar-reasoning-pro.description": "یک محصول جستجوی پیشرفته با پشتیبانی از جستجوی مبتنی بر زمینه برای پرس‌وجوهای پیچیده و پیگیری‌ها.", + "sonar-reasoning.description": "یک محصول جستجوی پیشرفته با پشتیبانی از جستجوی مبتنی بر زمینه برای پرس‌وجوهای پیچیده و پیگیری‌ها.", + "sonar.description": "یک محصول سبک‌وزن با جستجوی مبتنی بر زمینه، سریع‌تر و ارزان‌تر از Sonar Pro.", + "spark-x.description": "به‌روزرسانی‌های X1.5: (۱) افزودن حالت تفکر پویا با کنترل از طریق فیلد `thinking`؛ (۲) طول زمینه بزرگ‌تر با ورودی ۶۴K و خروجی ۶۴K؛ (۳) پشتیبانی از FunctionCall.", + "stable-diffusion-3-medium.description": "جدیدترین مدل تبدیل متن به تصویر از Stability AI. این نسخه کیفیت تصویر، درک متن و تنوع سبک را به‌طور قابل‌توجهی بهبود می‌بخشد، دستورات زبان طبیعی پیچیده را دقیق‌تر تفسیر کرده و تصاویر متنوع‌تری تولید می‌کند.", + "stable-diffusion-3.5-large-turbo.description": "stable-diffusion-3.5-large-turbo از تکنیک تقطیر انتشار خصمانه (ADD) برای افزایش سرعت در stable-diffusion-3.5-large استفاده می‌کند.", + "stable-diffusion-3.5-large.description": "stable-diffusion-3.5-large یک مدل تبدیل متن به تصویر MMDiT با ۸۰۰ میلیون پارامتر است که کیفیت بالا و تطابق دقیق با دستورات را ارائه می‌دهد و از تصاویر ۱ مگاپیکسلی و اجرای کارآمد روی سخت‌افزار مصرفی پشتیبانی می‌کند.", + "stable-diffusion-v1.5.description": "stable-diffusion-v1.5 از نقطه بازیابی v1.2 آغاز شده و به مدت ۵۹۵ هزار مرحله روی مجموعه «laion-aesthetics v2 5+» با وضوح ۵۱۲x۵۱۲ آموزش دیده است. با کاهش ۱۰٪ در شرط‌بندی متنی، نمونه‌گیری بدون طبقه‌بندی را بهبود می‌بخشد.", + "stable-diffusion-xl-base-1.0.description": "یک مدل متن‌باز تبدیل متن به تصویر از Stability AI با قابلیت‌های خلاقانه پیشرو در صنعت. درک قوی از دستورالعمل‌ها دارد و از تعریف معکوس دستورات برای تولید دقیق پشتیبانی می‌کند.", + "stable-diffusion-xl.description": "stable-diffusion-xl بهبودهای عمده‌ای نسبت به نسخه v1.5 دارد و با بهترین نتایج متن‌باز تبدیل متن به تصویر برابری می‌کند. بهبودها شامل ستون فقرات UNet سه برابر بزرگ‌تر، ماژول پالایش برای کیفیت بهتر تصویر و تکنیک‌های آموزشی کارآمدتر است.", "tencent/Hunyuan-A13B-Instruct.description": "Hunyuan-A13B-Instruct با استفاده از ۸۰ میلیارد پارامتر کلی و ۱۳ میلیارد پارامتر فعال، عملکردی هم‌تراز با مدل‌های بزرگ‌تر ارائه می‌دهد. این مدل از استدلال ترکیبی سریع/کند، درک پایدار متون بلند و توانایی پیشرو در عامل‌ها در آزمون‌های BFCL-v3 و τ-Bench پشتیبانی می‌کند. فرمت‌های GQA و چندکوانتیزه‌سازی، استنتاج کارآمد را ممکن می‌سازند.", "tencent/Hunyuan-MT-7B.description": "مدل ترجمه Hunyuan شامل Hunyuan-MT-7B و مدل ترکیبی Hunyuan-MT-Chimera است. Hunyuan-MT-7B یک مدل ترجمه سبک با ۷ میلیارد پارامتر است که از ۳۳ زبان به‌علاوه ۵ زبان اقلیت چینی پشتیبانی می‌کند. در رقابت WMT25، در ۳۰ جفت‌زبان از ۳۱ مورد، رتبه اول را کسب کرد. Hunyuan از یک زنجیره کامل آموزش شامل پیش‌آموزش، SFT، تقویت یادگیری ترجمه و تقویت یادگیری ترکیبی استفاده می‌کند و با عملکردی پیشرو در اندازه خود، به‌راحتی قابل استقرار است.", "text-embedding-3-large.description": "قوی‌ترین مدل تعبیه‌سازی برای وظایف انگلیسی و غیرانگلیسی.", diff --git a/locales/fr-FR/chat.json b/locales/fr-FR/chat.json index 3fea441ebc..75f1fb7686 100644 --- a/locales/fr-FR/chat.json +++ b/locales/fr-FR/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Appel de {{toolName}}...", "task.activity.toolResult": "Résultat de {{toolName}} reçu", "task.batchTasks": "{{count}} sous-tâches groupées", + "task.groupTasks": "{{count}} tâches parallèles", + "task.groupTasksTitle": "{{agents}} et {{count}} tâches d'agents", + "task.groupTasksTitleSimple": "{{agents}} {{count}} tâches", "task.instruction": "Instruction de la tâche", "task.intermediateSteps": "{{count}} étapes intermédiaires", "task.metrics.duration": "(durée : {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "utilisations d'outil", "task.status.cancelled": "Tâche annulée", "task.status.failed": "Échec de la tâche", + "task.status.fetchingDetails": "Récupération des détails...", "task.status.initializing": "Initialisation de la tâche...", "task.subtask": "Sous-tâche", "thread.divider": "Sous-sujet", diff --git a/locales/fr-FR/models.json b/locales/fr-FR/models.json index 1cd486d235..4ede2de0e7 100644 --- a/locales/fr-FR/models.json +++ b/locales/fr-FR/models.json @@ -817,6 +817,15 @@ "meta-llama/llama-3.1-70b-instruct.description": "La dernière série Llama 3.1 de Meta, la variante 70B ajustée par instruction, est optimisée pour des dialogues de haute qualité. Elle affiche de solides performances face aux modèles fermés leaders dans les évaluations industrielles. (Disponible uniquement pour les entités vérifiées en entreprise.)", "meta-llama/llama-3.1-8b-instruct.description": "La dernière série Llama 3.1 de Meta, la variante 8B ajustée par instruction, est particulièrement rapide et efficace. Elle offre de solides performances dans les évaluations industrielles, surpassant de nombreux modèles fermés leaders. (Disponible uniquement pour les entités vérifiées en entreprise.)", "meta-llama/llama-3.1-8b-instruct:free.description": "LLaMA 3.1 offre un support multilingue et fait partie des modèles génératifs les plus performants.", + "meta-llama/llama-3.2-11b-vision-instruct.description": "LLaMA 3.2 est conçu pour les tâches combinant vision et texte. Il excelle en légendage d’images et en questions-réponses visuelles, faisant le lien entre génération de langage et raisonnement visuel.", + "meta-llama/llama-3.2-3b-instruct.description": "meta-llama/llama-3.2-3b-instruct", + "meta-llama/llama-3.2-90b-vision-instruct.description": "LLaMA 3.2 est conçu pour les tâches combinant vision et texte. Il excelle en légendage d’images et en questions-réponses visuelles, faisant le lien entre génération de langage et raisonnement visuel.", + "meta-llama/llama-3.3-70b-instruct.description": "Llama 3.3 est le modèle Llama multilingue open source le plus avancé, offrant des performances proches de 405B à très faible coût. Basé sur l’architecture Transformer, il est amélioré par SFT et RLHF pour une meilleure utilité et sécurité. La version optimisée pour les instructions est conçue pour les conversations multilingues et surpasse de nombreux modèles ouverts et fermés selon les benchmarks industriels. Date de coupure des connaissances : décembre 2023.", + "meta-llama/llama-3.3-70b-instruct:free.description": "Llama 3.3 est le modèle Llama multilingue open source le plus avancé, offrant des performances proches de 405B à très faible coût. Basé sur l’architecture Transformer, il est amélioré par SFT et RLHF pour une meilleure utilité et sécurité. La version optimisée pour les instructions est conçue pour les conversations multilingues et surpasse de nombreux modèles ouverts et fermés selon les benchmarks industriels. Date de coupure des connaissances : décembre 2023.", + "meta.llama3-1-405b-instruct-v1:0.description": "Meta Llama 3.1 405B Instruct est le plus grand et le plus puissant modèle Llama 3.1 Instruct, conçu pour le raisonnement conversationnel et la génération de données synthétiques. Il constitue une base solide pour un pré-entraînement ou un ajustement spécifique à un domaine. Les LLMs multilingues Llama 3.1 sont des modèles de génération pré-entraînés et ajustés par instruction, disponibles en tailles 8B, 70B et 405B (texte en entrée/sortie). Les modèles ajustés sont optimisés pour le dialogue multilingue et surpassent de nombreux modèles de chat ouverts sur les benchmarks industriels. Llama 3.1 est destiné à un usage commercial et de recherche dans plusieurs langues. Les modèles ajustés conviennent aux assistants conversationnels, tandis que les modèles pré-entraînés sont adaptés à des tâches plus générales de génération de texte. Les sorties de Llama 3.1 peuvent également être utilisées pour améliorer d'autres modèles, notamment via la génération et le raffinement de données synthétiques. Llama 3.1 est un modèle Transformer autorégressif avec une architecture optimisée. Les versions ajustées utilisent un apprentissage supervisé (SFT) et un apprentissage par renforcement avec retour humain (RLHF) pour s’aligner sur les préférences humaines en matière d’utilité et de sécurité.", + "meta.llama3-1-70b-instruct-v1:0.description": "Une version mise à jour de Meta Llama 3.1 70B Instruct avec une fenêtre de contexte étendue à 128K, un support multilingue et un raisonnement amélioré. Les LLMs multilingues Llama 3.1 sont des modèles de génération pré-entraînés et ajustés par instruction, disponibles en tailles 8B, 70B et 405B (texte en entrée/sortie). Les modèles ajustés sont optimisés pour le dialogue multilingue et surpassent de nombreux modèles de chat ouverts sur les benchmarks industriels. Llama 3.1 est destiné à un usage commercial et de recherche dans plusieurs langues. Les modèles ajustés conviennent aux assistants conversationnels, tandis que les modèles pré-entraînés sont adaptés à des tâches plus générales de génération de texte. Les sorties de Llama 3.1 peuvent également être utilisées pour améliorer d'autres modèles, notamment via la génération et le raffinement de données synthétiques. Llama 3.1 est un modèle Transformer autorégressif avec une architecture optimisée. Les versions ajustées utilisent un apprentissage supervisé (SFT) et un apprentissage par renforcement avec retour humain (RLHF) pour s’aligner sur les préférences humaines en matière d’utilité et de sécurité.", + "meta.llama3-1-8b-instruct-v1:0.description": "Une version mise à jour de Meta Llama 3.1 8B Instruct avec une fenêtre de contexte de 128K, un support multilingue et un raisonnement amélioré. La famille Llama 3.1 comprend des modèles de texte ajustés par instruction en 8B, 70B et 405B, optimisés pour le chat multilingue et des performances solides sur les benchmarks. Conçu pour un usage commercial et de recherche dans plusieurs langues ; les modèles ajustés conviennent aux assistants conversationnels, tandis que les modèles pré-entraînés sont adaptés à des tâches de génération plus générales. Les sorties de Llama 3.1 peuvent également être utilisées pour améliorer d'autres modèles (par exemple, données synthétiques et raffinement). Il s'agit d'un modèle Transformer autorégressif, avec SFT et RLHF pour s’aligner sur l’utilité et la sécurité.", + "meta.llama3-70b-instruct-v1:0.description": "Meta Llama 3 est un LLM ouvert destiné aux développeurs, chercheurs et entreprises, conçu pour les aider à créer, expérimenter et faire évoluer de manière responsable leurs idées en IA générative. En tant que fondation pour l’innovation communautaire mondiale, il est bien adapté à la création de contenu, à l’IA conversationnelle, à la compréhension du langage, à la R&D et aux applications d’entreprise.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 est un modèle LLM ouvert destiné aux développeurs, chercheurs et entreprises, conçu pour les aider à créer, expérimenter et faire évoluer de manière responsable des idées d'IA générative. Faisant partie de la base de l'innovation communautaire mondiale, il est particulièrement adapté aux environnements à ressources limitées, aux appareils en périphérie et aux temps d'entraînement réduits.", "meta/Llama-3.2-11B-Vision-Instruct.description": "Raisonnement visuel performant sur des images haute résolution, idéal pour les applications de compréhension visuelle.", "meta/Llama-3.2-90B-Vision-Instruct.description": "Raisonnement visuel avancé pour les agents d'applications de compréhension visuelle.", diff --git a/locales/it-IT/chat.json b/locales/it-IT/chat.json index e298cdb3a6..acb3216567 100644 --- a/locales/it-IT/chat.json +++ b/locales/it-IT/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Chiamata {{toolName}}...", "task.activity.toolResult": "Risultato {{toolName}} ricevuto", "task.batchTasks": "{{count}} sottocompiti in batch", + "task.groupTasks": "{{count}} Attività Parallele", + "task.groupTasksTitle": "{{agents}} e {{count}} attività degli agenti", + "task.groupTasksTitleSimple": "{{agents}} {{count}} attività", "task.instruction": "Istruzioni dell'attività", "task.intermediateSteps": "{{count}} passaggi intermedi", "task.metrics.duration": "(durata: {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "usi strumento", "task.status.cancelled": "Attività annullata", "task.status.failed": "Attività non riuscita", + "task.status.fetchingDetails": "Recupero dei dettagli in corso...", "task.status.initializing": "Inizializzazione attività...", "task.subtask": "Sottocompito", "thread.divider": "Sottotema", diff --git a/locales/it-IT/models.json b/locales/it-IT/models.json index 63b1e10e1b..9a7b9994e2 100644 --- a/locales/it-IT/models.json +++ b/locales/it-IT/models.json @@ -760,6 +760,68 @@ "learnlm-1.5-pro-experimental.description": "LearnLM è un modello sperimentale e specifico per compiti, addestrato secondo i principi della scienza dell'apprendimento per seguire istruzioni di sistema in scenari di insegnamento/apprendimento, agendo come un tutor esperto.", "learnlm-2.0-flash-experimental.description": "LearnLM è un modello sperimentale e specifico per compiti, addestrato secondo i principi della scienza dell'apprendimento per seguire istruzioni di sistema in scenari di insegnamento/apprendimento, agendo come un tutor esperto.", "lite.description": "Spark Lite è un LLM leggero con latenza ultra-bassa ed elaborazione efficiente. È completamente gratuito e supporta la ricerca web in tempo reale. Le sue risposte rapide si comportano bene su dispositivi a bassa potenza e per il fine-tuning dei modelli, offrendo un'esperienza intelligente e conveniente, soprattutto per domande e risposte, generazione di contenuti e scenari di ricerca.", + "llama-3.1-70b-versatile.description": "Llama 3.1 70B offre un ragionamento AI avanzato per applicazioni complesse, supportando carichi computazionali elevati con alta efficienza e precisione.", + "llama-3.1-8b-instant.description": "Llama 3.1 8B è un modello ad alta efficienza con generazione di testo rapida, ideale per applicazioni su larga scala e a basso costo.", + "llama-3.1-instruct.description": "Il modello Llama 3.1 ottimizzato per istruzioni è progettato per chat e supera molti modelli open source nei benchmark di settore.", + "llama-3.2-11b-vision-instruct.description": "Potente ragionamento visivo su immagini ad alta risoluzione, ideale per applicazioni di comprensione visiva.", + "llama-3.2-11b-vision-preview.description": "Llama 3.2 è progettato per compiti che combinano visione e testo, eccellendo in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "llama-3.2-90b-vision-instruct.description": "Ragionamento visivo avanzato per applicazioni di agenti con comprensione visiva.", + "llama-3.2-90b-vision-preview.description": "Llama 3.2 è progettato per compiti che combinano visione e testo, eccellendo in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "llama-3.2-vision-instruct.description": "Il modello Llama 3.2-Vision ottimizzato per istruzioni è progettato per riconoscimento visivo, ragionamento su immagini, generazione di didascalie e domande e risposte generali su immagini.", + "llama-3.3-70b-versatile.description": "Meta Llama 3.3 è un LLM multilingue con 70 miliardi di parametri (input/output testuale), disponibile in versioni pre-addestrate e ottimizzate per istruzioni. Il modello testuale ottimizzato per istruzioni è pensato per dialoghi multilingue e supera molti modelli open e closed nei benchmark di settore.", + "llama-3.3-70b.description": "Llama 3.3 70B: un modello Llama di dimensioni medio-grandi che bilancia ragionamento e throughput.", + "llama-3.3-instruct.description": "Il modello Llama 3.3 ottimizzato per istruzioni è progettato per chat e supera molti modelli open source nei benchmark di settore.", + "llama3-70b-8192.description": "Meta Llama 3 70B offre una gestione eccezionale della complessità per progetti impegnativi.", + "llama3-8b-8192.description": "Meta Llama 3 8B garantisce prestazioni di ragionamento solide in scenari diversificati.", + "llama3-groq-70b-8192-tool-use-preview.description": "Llama 3 Groq 70B Tool Use offre un uso efficiente degli strumenti per la gestione di compiti complessi.", + "llama3-groq-8b-8192-tool-use-preview.description": "Llama 3 Groq 8B Tool Use è ottimizzato per un uso efficiente degli strumenti con calcolo parallelo veloce.", + "llama3.1-8b.description": "Llama 3.1 8B: una variante Llama compatta e a bassa latenza per inferenza online leggera e chat.", + "llama3.1.description": "Llama 3.1 è il modello di punta di Meta, scalabile fino a 405 miliardi di parametri per dialoghi complessi, traduzioni multilingue e analisi dei dati.", + "llama3.1:405b.description": "Llama 3.1 è il modello di punta di Meta, scalabile fino a 405 miliardi di parametri per dialoghi complessi, traduzioni multilingue e analisi dei dati.", + "llama3.1:70b.description": "Llama 3.1 è il modello di punta di Meta, scalabile fino a 405 miliardi di parametri per dialoghi complessi, traduzioni multilingue e analisi dei dati.", + "llava-v1.5-7b-4096-preview.description": "LLaVA 1.5 7B integra l'elaborazione visiva per generare output complessi da input visivi.", + "llava.description": "LLaVA è un modello multimodale che combina un encoder visivo e Vicuna per una solida comprensione visione-linguaggio.", + "llava:13b.description": "LLaVA è un modello multimodale che combina un encoder visivo e Vicuna per una solida comprensione visione-linguaggio.", + "llava:34b.description": "LLaVA è un modello multimodale che combina un encoder visivo e Vicuna per una solida comprensione visione-linguaggio.", + "magistral-medium-latest.description": "Magistral Medium 1.2 è un modello di ragionamento avanzato di Mistral AI (settembre 2025) con supporto visivo.", + "magistral-small-2509.description": "Magistral Small 1.2 è un modello open source compatto di Mistral AI (settembre 2025) con supporto visivo.", + "mathstral.description": "MathΣtral è progettato per la ricerca scientifica e il ragionamento matematico, con forti capacità di calcolo e spiegazione.", + "max-32k.description": "Spark Max 32K offre elaborazione di contesti estesi con una comprensione logica e contestuale avanzata, supportando input fino a 32.000 token per lettura di documenti lunghi e domande su conoscenze private.", + "megrez-3b-instruct.description": "Megrez 3B Instruct è un modello compatto ed efficiente sviluppato da Wuwen Xinqiong.", + "meituan/longcat-flash-chat.description": "Un modello base open source non riflessivo di Meituan, ottimizzato per dialoghi e compiti agentici, eccellente nell'uso di strumenti e interazioni complesse multi-turno.", + "meta-llama-3-70b-instruct.description": "Un potente modello da 70 miliardi di parametri che eccelle nel ragionamento, nella programmazione e nei compiti linguistici generali.", + "meta-llama-3-8b-instruct.description": "Un modello versatile da 8 miliardi di parametri ottimizzato per chat e generazione di testo.", + "meta-llama-3.1-405b-instruct.description": "Modello testuale Llama 3.1 ottimizzato per istruzioni, progettato per chat multilingue, con prestazioni elevate nei benchmark di settore tra i modelli open e closed.", + "meta-llama-3.1-70b-instruct.description": "Modello testuale Llama 3.1 ottimizzato per istruzioni, progettato per chat multilingue, con prestazioni elevate nei benchmark di settore tra i modelli open e closed.", + "meta-llama-3.1-8b-instruct.description": "Modello testuale Llama 3.1 ottimizzato per istruzioni, progettato per chat multilingue, con prestazioni elevate nei benchmark di settore tra i modelli open e closed.", + "meta-llama/Llama-2-13b-chat-hf.description": "LLaMA-2 Chat (13B) offre una solida gestione linguistica e un'esperienza di chat affidabile.", + "meta-llama/Llama-2-70b-hf.description": "LLaMA-2 offre una solida gestione linguistica e un'interazione fluida.", + "meta-llama/Llama-3-70b-chat-hf.description": "Llama 3 70B Instruct Reference è un potente modello di chat per dialoghi complessi.", + "meta-llama/Llama-3-8b-chat-hf.description": "Llama 3 8B Instruct Reference offre supporto multilingue e conoscenza su ampi domini.", + "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo.description": "LLaMA 3.2 è progettato per compiti che combinano visione e testo. Eccelle in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "meta-llama/Llama-3.2-3B-Instruct-Turbo.description": "LLaMA 3.2 è progettato per compiti che combinano visione e testo. Eccelle in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo.description": "LLaMA 3.2 è progettato per compiti che combinano visione e testo. Eccelle in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "meta-llama/Llama-3.3-70B-Instruct-Turbo.description": "Meta Llama 3.3 è un LLM multilingue da 70B (input/output testuale), pre-addestrato e ottimizzato per istruzioni. La versione testuale ottimizzata per istruzioni è pensata per chat multilingue e supera molti modelli open e closed nei benchmark di settore.", + "meta-llama/Llama-Vision-Free.description": "LLaMA 3.2 è progettato per compiti che combinano visione e testo. Eccelle in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "meta-llama/Meta-Llama-3-70B-Instruct-Lite.description": "Llama 3 70B Instruct Lite è progettato per alte prestazioni con bassa latenza.", + "meta-llama/Meta-Llama-3-70B-Instruct-Turbo.description": "Llama 3 70B Instruct Turbo offre comprensione e generazione avanzate per i carichi di lavoro più esigenti.", + "meta-llama/Meta-Llama-3-8B-Instruct-Lite.description": "Llama 3 8B Instruct Lite bilancia prestazioni e risorse per ambienti con vincoli.", + "meta-llama/Meta-Llama-3-8B-Instruct-Turbo.description": "Llama 3 8B Instruct Turbo è un LLM ad alte prestazioni per un'ampia gamma di casi d'uso.", + "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo.description": "Il modello Llama 3.1 Turbo da 405B offre una capacità di contesto enorme per l'elaborazione di big data ed eccelle in applicazioni AI su scala ultra.", + "meta-llama/Meta-Llama-3.1-405B-Instruct.description": "Llama 3.1 è la famiglia di modelli di punta di Meta, scalabile fino a 405 miliardi di parametri per dialoghi complessi, traduzioni multilingue e analisi dei dati.", + "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo.description": "Llama 3.1 70B è finemente ottimizzato per applicazioni ad alto carico; la quantizzazione FP8 garantisce calcolo efficiente e precisione in scenari complessi.", + "meta-llama/Meta-Llama-3.1-70B.description": "Llama 3.1 è la famiglia di modelli di punta di Meta, scalabile fino a 405 miliardi di parametri per dialoghi complessi, traduzioni multilingue e analisi dei dati.", + "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo.description": "Llama 3.1 8B utilizza quantizzazione FP8, supporta fino a 131.072 token di contesto e si posiziona tra i migliori modelli open per compiti complessi su numerosi benchmark.", + "meta-llama/llama-3-70b-instruct.description": "Llama 3 70B Instruct è ottimizzato per dialoghi di alta qualità e mostra ottime prestazioni nelle valutazioni umane.", + "meta-llama/llama-3-8b-instruct.description": "Llama 3 8B Instruct è ottimizzato per dialoghi di alta qualità, superando molti modelli closed.", + "meta-llama/llama-3.1-70b-instruct.description": "La serie Llama 3.1 più recente di Meta, variante da 70B ottimizzata per istruzioni e dialoghi di alta qualità. Nei benchmark di settore, mostra prestazioni elevate rispetto ai principali modelli closed. (Disponibile solo per entità verificate aziendalmente.)", + "meta-llama/llama-3.1-8b-instruct.description": "La serie Llama 3.1 più recente di Meta, variante da 8B ottimizzata per istruzioni, particolarmente veloce ed efficiente. Nei benchmark di settore, offre prestazioni elevate, superando molti modelli closed. (Disponibile solo per entità verificate aziendalmente.)", + "meta-llama/llama-3.1-8b-instruct:free.description": "LLaMA 3.1 offre supporto multilingue ed è uno dei principali modelli generativi.", + "meta-llama/llama-3.2-11b-vision-instruct.description": "LLaMA 3.2 è progettato per compiti che combinano visione e testo. Eccelle in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "meta-llama/llama-3.2-3b-instruct.description": "meta-llama/llama-3.2-3b-instruct", + "meta-llama/llama-3.2-90b-vision-instruct.description": "LLaMA 3.2 è progettato per compiti che combinano visione e testo. Eccelle in didascalie di immagini e domande e risposte visive, unendo generazione linguistica e ragionamento visivo.", + "meta-llama/llama-3.3-70b-instruct.description": "Llama 3.3 è il modello Llama open source multilingue più avanzato, con prestazioni vicine a quelle dei modelli da 405B a costi molto contenuti. Basato su Transformer, è migliorato con SFT e RLHF per utilità e sicurezza. La versione ottimizzata per istruzioni è pensata per chat multilingue e supera molti modelli open e closed nei benchmark di settore. Data di cutoff: dicembre 2023.", + "meta-llama/llama-3.3-70b-instruct:free.description": "Llama 3.3 è il modello Llama open source multilingue più avanzato, con prestazioni vicine a quelle dei modelli da 405B a costi molto contenuti. Basato su Transformer, è migliorato con SFT e RLHF per utilità e sicurezza. La versione ottimizzata per istruzioni è pensata per chat multilingue e supera molti modelli open e closed nei benchmark di settore. Data di cutoff: dicembre 2023.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 è un LLM open-source pensato per sviluppatori, ricercatori e aziende, progettato per supportare la creazione, la sperimentazione e la scalabilità responsabile di idee basate su IA generativa. Parte integrante dell’ecosistema globale per l’innovazione comunitaria, è ideale per ambienti con risorse limitate, dispositivi edge e tempi di addestramento ridotti.", "meta/Llama-3.2-11B-Vision-Instruct.description": "Solido ragionamento visivo su immagini ad alta risoluzione, ideale per applicazioni di comprensione visiva.", "meta/Llama-3.2-90B-Vision-Instruct.description": "Ragionamento visivo avanzato per applicazioni agenti di comprensione visiva.", diff --git a/locales/ja-JP/chat.json b/locales/ja-JP/chat.json index b6f69f3520..f2cd289524 100644 --- a/locales/ja-JP/chat.json +++ b/locales/ja-JP/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "{{toolName}} を呼び出し中…", "task.activity.toolResult": "{{toolName}} の結果を取得しました", "task.batchTasks": "{{count}} 件のバッチサブタスク", + "task.groupTasks": "{{count}} 個の並列タスク", + "task.groupTasksTitle": "{{agents}} と {{count}} 件のエージェントタスク", + "task.groupTasksTitleSimple": "{{agents}} {{count}} 件のタスク", "task.instruction": "タスクの指示", "task.intermediateSteps": "{{count}} 件の中間ステップ", "task.metrics.duration": "(所要時間:{{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "回のスキル呼び出し", "task.status.cancelled": "タスクがキャンセルされました", "task.status.failed": "タスクが失敗しました", + "task.status.fetchingDetails": "詳細を取得中...", "task.status.initializing": "タスクを起動中…", "task.subtask": "サブタスク", "thread.divider": "サブトピック", diff --git a/locales/ja-JP/models.json b/locales/ja-JP/models.json index 256d0b7652..5ce401487b 100644 --- a/locales/ja-JP/models.json +++ b/locales/ja-JP/models.json @@ -827,6 +827,7 @@ "meta.llama3-1-8b-instruct-v1:0.description": "Meta Llama 3.1 8B Instructのアップデート版で、128Kのコンテキストウィンドウ、多言語対応、推論能力の向上を備えています。Llama 3.1ファミリーには、8B、70B、405Bの命令調整テキストモデルが含まれ、多言語チャットと高いベンチマーク性能に最適化されています。商用および研究用途に対応し、命令調整モデルはアシスタント型チャットに、事前学習モデルはより広範な生成タスクに適しています。Llama 3.1の出力は、合成データ生成や精緻化など、他のモデルの改善にも活用可能です。自己回帰型Transformerモデルであり、SFTとRLHFにより有用性と安全性を実現しています。", "meta.llama3-70b-instruct-v1:0.description": "Meta Llama 3は、開発者、研究者、企業向けのオープンLLMであり、生成AIのアイデアを構築・実験・拡張するための基盤です。グローバルなコミュニティによるイノベーションの基礎として、コンテンツ生成、対話型AI、言語理解、研究開発、企業アプリケーションに最適です。", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3は、開発者、研究者、企業向けのオープンLLMであり、生成AIのアイデアを構築・実験・拡張するための基盤です。限られた計算資源やエッジデバイス、短時間のトレーニングに適しており、効率的な運用が可能です。", + "meta/Llama-3.2-11B-Vision-Instruct.description": "高解像度画像における優れた画像推論能力を持ち、視覚理解アプリケーションに最適です。", "meta/Llama-3.2-90B-Vision-Instruct.description": "視覚理解エージェント向けの高度な画像推論機能。", "meta/Llama-3.3-70B-Instruct.description": "Llama 3.3 は、最先端の多言語対応オープンソース Llama モデルであり、非常に低コストで 405B に近い性能を実現します。Transformer ベースで、SFT および RLHF により有用性と安全性が向上しています。命令調整版は多言語チャットに最適化されており、業界ベンチマークで多くのオープン・クローズドチャットモデルを上回ります。知識カットオフ:2023年12月。", "meta/Meta-Llama-3-70B-Instruct.description": "推論、コーディング、幅広い言語タスクに優れた 70B パラメータの強力なモデル。", @@ -863,9 +864,48 @@ "microsoft/Phi-3.5-vision-instruct.description": "Phi-3-vision モデルの更新版。", "microsoft/WizardLM-2-8x22B.description": "WizardLM 2 は Microsoft AI による言語モデルで、複雑な対話、多言語タスク、推論、アシスタントに優れています。", "microsoft/wizardlm-2-8x22b.description": "WizardLM-2 8x22B は Microsoft AI による最先端の Wizard モデルで、非常に競争力のある性能を発揮します。", + "mimo-v2-flash.description": "MiMo-V2-Flash:推論、コーディング、エージェント基盤に特化した高効率モデルです。", "minicpm-v.description": "MiniCPM-V は OpenBMB の次世代マルチモーダルモデルで、OCR とマルチモーダル理解に優れ、幅広い用途に対応します。", + "minimax-m2.1.description": "MiniMax-M2.1はMiniMaxシリーズの最新バージョンで、多言語プログラミングや現実世界の複雑なタスクに最適化されています。AIネイティブモデルとして、モデル性能、エージェントフレームワークのサポート、多様なシナリオへの適応性において大幅な向上を実現し、企業や個人がAIネイティブな働き方やライフスタイルをより迅速に実現できるよう支援します。", + "minimax-m2.description": "MiniMax M2は、コーディングやエージェントワークフローに特化して構築された高効率な大規模言語モデルです。", + "minimax/minimax-m2.1.description": "MiniMax-M2.1は、コーディング、プロキシワークフロー、現代的なアプリ開発に最適化された軽量かつ最先端の大規模言語モデルで、より簡潔で明瞭な出力と高速な応答を提供します。", "minimax/minimax-m2.description": "MiniMax-M2 は、エンジニアリングシナリオにおけるコーディングとエージェントタスクに優れた高価値モデルです。", "minimaxai/minimax-m2.description": "MiniMax-M2 は、230B 総パラメータ中 10B アクティブのコンパクトで高速、コスト効率の高い MoE モデルで、マルチファイル編集、コード実行・修正ループ、テスト検証、複雑なツールチェーンに優れた性能を発揮します。", + "ministral-3b-latest.description": "Ministral 3Bは、Mistralの最上位エッジモデルです。", + "ministral-8b-latest.description": "Ministral 8Bは、Mistralによる高コストパフォーマンスのエッジモデルです。", + "mistral-ai/Mistral-Large-2411.description": "Mistralのフラッグシップモデルで、大規模な推論や専門性を要する複雑なタスク(合成テキスト生成、コード生成、RAG、エージェントなど)に対応します。", + "mistral-ai/Mistral-Nemo.description": "Mistral Nemoは、同サイズ帯で最先端の推論力、世界知識、コーディング能力を備えた先進的なLLMです。", + "mistral-ai/mistral-small-2503.description": "Mistral Smallは、高効率かつ低遅延を求めるあらゆる言語タスクに適しています。", + "mistral-large-instruct.description": "Mistral-Large-Instruct-2407は、123Bパラメータを持つ高密度LLMで、最先端の推論力、知識、コーディング能力を備えています。", + "mistral-large-latest.description": "Mistral Largeは、マルチリンガルタスク、複雑な推論、コード生成に強く、高度なアプリケーションに最適なフラッグシップモデルです。", + "mistral-large.description": "Mixtral Largeは、Mistralのフラッグシップモデルで、コード生成、数学、推論を128Kのコンテキストウィンドウで実現します。", + "mistral-medium-latest.description": "Mistral Medium 3は、8倍のコスト削減で最先端の性能を提供し、企業導入を簡素化します。", + "mistral-nemo-instruct.description": "Mistral-Nemo-Instruct-2407は、Mistral-Nemo-Base-2407の命令調整版です。", + "mistral-nemo.description": "Mistral Nemoは、Mistral AIとNVIDIAによる高効率な12Bモデルです。", + "mistral-small-latest.description": "Mistral Smallは、翻訳、要約、感情分析においてコスト効率が高く、迅速かつ信頼性の高い選択肢です。", + "mistral-small.description": "Mistral Smallは、高効率かつ低遅延を求めるあらゆる言語タスクに適しています。", + "mistral.description": "Mistralは、Mistral AIによる7Bモデルで、多様な言語タスクに対応します。", + "mistral/codestral-embed.description": "コードベースやリポジトリの埋め込みに対応したコード埋め込みモデルで、コーディングアシスタントを支援します。", + "mistral/codestral.description": "Mistral Codestral 25.01は、低遅延・高頻度利用に最適化された最先端のコーディングモデルで、80以上の言語をサポートし、FIM、コード修正、テスト生成に優れています。", + "mistral/devstral-small.description": "Devstralは、ソフトウェアエンジニアリングタスク向けのエージェント型LLMで、ソフトウェアエンジニアリングエージェントに最適です。", + "mistral/magistral-medium.description": "深い理解に基づく複雑な思考を支援し、透明性のある推論を提供します。タスク中でも言語間で高精度な推論を維持します。", + "mistral/magistral-small.description": "深い理解に基づく複雑な思考を支援し、透明性のある推論を提供します。タスク中でも言語間で高精度な推論を維持します。", + "mistral/ministral-3b.description": "アシスタントやローカル分析などのオンデバイスタスク向けのコンパクトで高効率なモデルで、低遅延性能を実現します。", + "mistral/ministral-8b.description": "より高性能でメモリ効率の良い推論を実現し、複雑なワークフローや高負荷なエッジアプリケーションに最適です。", + "mistral/mistral-embed.description": "意味検索、類似性評価、クラスタリング、RAGワークフローに対応した汎用テキスト埋め込みモデルです。", + "mistral/mistral-large.description": "Mistral Largeは、合成テキスト生成、コード生成、RAG、エージェントなど、強力な推論や専門性を要する複雑なタスクに最適です。", + "mistral/mistral-small.description": "Mistral Smallは、分類、カスタマーサポート、テキスト生成などのシンプルでバッチ処理可能なタスクに最適で、手頃な価格で優れた性能を発揮します。", + "mistral/mixtral-8x22b-instruct.description": "8x22B Instructモデル。8x22Bは、Mistralが提供するオープンなMoEモデルです。", + "mistral/pixtral-12b.description": "画像理解とテキスト処理を備えた12Bモデルです。", + "mistral/pixtral-large.description": "Pixtral Largeは、マルチモーダルファミリーの第2弾で、最先端の画像理解を備えています。文書、チャート、自然画像を処理しつつ、Mistral Large 2の優れたテキスト理解力を維持します。", + "mistralai/Mistral-7B-Instruct-v0.1.description": "Mistral(7B)Instructは、多くの言語タスクで優れた性能を発揮します。", + "mistralai/Mistral-7B-Instruct-v0.2.description": "Mistral(7B)Instruct v0.2は、命令処理と結果の正確性を向上させています。", + "mistralai/Mistral-7B-Instruct-v0.3.description": "Mistral(7B)Instruct v0.3は、効率的な計算と優れた言語理解を提供し、多様な用途に対応します。", + "mistralai/Mistral-7B-v0.1.description": "Mistral 7Bはコンパクトながら高性能で、分類やテキスト生成などのバッチ処理やシンプルなタスクに強く、堅実な推論力を備えています。", + "mistralai/Mixtral-8x22B-Instruct-v0.1.description": "Mixtral-8x22B Instruct(141B)は、大規模なワークロードに対応する非常に大きなLLMです。", + "mistralai/Mixtral-8x7B-Instruct-v0.1.description": "Mixtral-8x7B Instruct(46.7B)は、大規模データ処理に対応する高容量モデルです。", + "mistralai/Mixtral-8x7B-v0.1.description": "Mixtral 8x7Bは、推論速度を向上させるスパースMoEモデルで、多言語およびコード生成タスクに適しています。", + "mistralai/mistral-nemo.description": "Mistral Nemoは、マルチリンガル対応と優れたコーディング性能を備えた7.3Bモデルです。", "moonshot-v1-32k.description": "Moonshot V1 32Kは、32,768トークンの中程度の長さのコンテキストをサポートし、長文ドキュメントや複雑な対話に最適で、コンテンツ制作、レポート、チャットシステムに適しています。", "moonshot-v1-8k-vision-preview.description": "Kimi Visionモデル(moonshot-v1-8k-vision-preview、moonshot-v1-32k-vision-preview、moonshot-v1-128k-vision-previewを含む)は、テキスト、色、物体の形状などの画像内容を理解できます。", "moonshot-v1-8k.description": "Moonshot V1 8Kは、短文生成に最適化されており、効率的なパフォーマンスで8,192トークンを処理し、短いチャット、メモ、迅速なコンテンツ作成に適しています。", diff --git a/locales/ko-KR/chat.json b/locales/ko-KR/chat.json index a02b400979..dea0d0b0da 100644 --- a/locales/ko-KR/chat.json +++ b/locales/ko-KR/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "{{toolName}} 호출 중…", "task.activity.toolResult": "{{toolName}} 결과를 가져왔습니다", "task.batchTasks": "{{count}}개의 일괄 하위 작업", + "task.groupTasks": "{{count}}개의 병렬 작업", + "task.groupTasksTitle": "{{agents}} 및 {{count}}명의 에이전트 작업", + "task.groupTasksTitleSimple": "{{agents}} {{count}}개의 작업", "task.instruction": "작업 지침", "task.intermediateSteps": "{{count}}단계 중간 과정", "task.metrics.duration": "(소요 시간: {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "회 기능 호출", "task.status.cancelled": "작업이 취소되었습니다", "task.status.failed": "작업에 실패했습니다", + "task.status.fetchingDetails": "세부 정보를 불러오는 중...", "task.status.initializing": "작업 시작 중…", "task.subtask": "하위 작업", "thread.divider": "하위 주제", diff --git a/locales/ko-KR/models.json b/locales/ko-KR/models.json index b1654f982b..bf1b4b31f4 100644 --- a/locales/ko-KR/models.json +++ b/locales/ko-KR/models.json @@ -822,6 +822,12 @@ "meta-llama/llama-3.2-90b-vision-instruct.description": "LLaMA 3.2는 시각과 텍스트를 결합한 작업을 위해 설계되었으며, 이미지 캡셔닝과 시각적 질의응답에 탁월합니다.", "meta-llama/llama-3.3-70b-instruct.description": "Llama 3.3은 가장 진보된 다국어 오픈소스 Llama 모델로, 매우 낮은 비용으로 405B에 근접한 성능을 제공합니다. Transformer 기반이며, SFT 및 RLHF를 통해 유용성과 안전성이 향상되었습니다. 인스트럭션 튜닝 버전은 다국어 채팅에 최적화되어 있으며, 업계 벤치마크에서 많은 오픈 및 클로즈드 챗 모델을 능가합니다. 지식 기준일: 2023년 12월", "meta-llama/llama-3.3-70b-instruct:free.description": "Llama 3.3은 가장 진보된 다국어 오픈소스 Llama 모델로, 매우 낮은 비용으로 405B에 근접한 성능을 제공합니다. Transformer 기반이며, SFT 및 RLHF를 통해 유용성과 안전성이 향상되었습니다. 인스트럭션 튜닝 버전은 다국어 채팅에 최적화되어 있으며, 업계 벤치마크에서 많은 오픈 및 클로즈드 챗 모델을 능가합니다. 지식 기준일: 2023년 12월", + "meta.llama3-1-405b-instruct-v1:0.description": "Meta Llama 3.1 405B Instruct는 Llama 3.1 Instruct 모델 중 가장 크고 강력한 모델로, 대화 추론 및 합성 데이터 생성에 특화된 고급 모델입니다. 도메인 특화 사전 학습 또는 파인튜닝의 강력한 기반이 됩니다. Llama 3.1 다국어 LLM은 8B, 70B, 405B 크기로 사전 학습 및 지시 기반 튜닝된 생성 모델 세트이며, 텍스트 입력/출력을 지원합니다. 이 지시 기반 텍스트 모델은 다국어 대화에 최적화되어 있으며, 업계 표준 벤치마크에서 많은 오픈 챗 모델을 능가합니다. Llama 3.1은 상업적 및 연구 목적으로 다양한 언어에서 사용 가능하도록 설계되었습니다. 지시 기반 모델은 어시스턴트 스타일의 채팅에 적합하며, 사전 학습 모델은 보다 일반적인 자연어 생성 작업에 적합합니다. Llama 3.1의 출력은 합성 데이터 생성 및 정제 등 다른 모델의 성능 향상에도 활용될 수 있습니다. Llama 3.1은 최적화된 아키텍처를 갖춘 자기회귀 Transformer 모델이며, 튜닝된 버전은 SFT(지도형 파인튜닝)와 RLHF(인간 피드백 기반 강화 학습)를 통해 유용성과 안전성 측면에서 인간의 선호에 맞춰 조정됩니다.", + "meta.llama3-1-70b-instruct-v1:0.description": "Meta Llama 3.1 70B Instruct는 128K 컨텍스트 윈도우 확장, 다국어 지원, 향상된 추론 능력을 갖춘 최신 버전입니다. Llama 3.1 다국어 LLM은 8B, 70B, 405B 크기의 사전 학습 및 지시 기반 튜닝된 생성 모델 세트로, 텍스트 입력/출출을 지원합니다. 이 지시 기반 텍스트 모델은 다국어 대화에 최적화되어 있으며, 업계 표준 벤치마크에서 많은 오픈 챗 모델을 능가합니다. Llama 3.1은 다양한 언어에서 상업적 및 연구 목적으로 사용되도록 설계되었습니다. 지시 기반 모델은 어시스턴트 스타일의 채팅에 적합하며, 사전 학습 모델은 보다 일반적인 자연어 생성 작업에 적합합니다. Llama 3.1의 출력은 합성 데이터 생성 및 정제 등 다른 모델의 성능 향상에도 활용될 수 있습니다. Llama 3.1은 최적화된 아키텍처를 갖춘 자기회귀 Transformer 모델이며, 튜닝된 버전은 SFT(지도형 파인튜닝)와 RLHF(인간 피드백 기반 강화 학습)를 통해 유용성과 안전성 측면에서 인간의 선호에 맞춰 조정됩니다.", + "meta.llama3-1-8b-instruct-v1:0.description": "Meta Llama 3.1 8B Instruct는 128K 컨텍스트 윈도우, 다국어 지원, 향상된 추론 능력을 갖춘 최신 버전입니다. Llama 3.1 제품군은 8B, 70B, 405B 크기의 지시 기반 튜닝된 텍스트 모델로, 다국어 채팅과 강력한 벤치마크 성능에 최적화되어 있습니다. 다양한 언어에서 상업적 및 연구 목적으로 사용되며, 지시 기반 모델은 어시스턴트 스타일의 채팅에 적합하고, 사전 학습 모델은 보다 일반적인 생성 작업에 적합합니다. Llama 3.1의 출력은 합성 데이터 생성 및 정제 등 다른 모델의 성능 향상에도 활용될 수 있습니다. 이 모델은 자기회귀 Transformer 구조를 기반으로 하며, SFT와 RLHF를 통해 유용성과 안전성에 맞춰 조정됩니다.", + "meta.llama3-70b-instruct-v1:0.description": "Meta Llama 3는 개발자, 연구자, 기업을 위한 오픈 LLM으로, 생성형 AI 아이디어를 구축하고 실험하며 책임감 있게 확장할 수 있도록 설계되었습니다. 글로벌 커뮤니티 혁신의 기반으로서 콘텐츠 생성, 대화형 AI, 언어 이해, 연구 개발, 기업용 애플리케이션에 적합합니다.", + "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3는 개발자, 연구자, 기업을 위한 오픈 LLM으로, 생성형 AI 아이디어를 구축하고 실험하며 책임감 있게 확장할 수 있도록 설계되었습니다. 제한된 컴퓨팅 자원, 엣지 디바이스, 빠른 학습 시간에 적합한 모델입니다.", + "meta/Llama-3.2-11B-Vision-Instruct.description": "고해상도 이미지에 대한 강력한 이미지 추론 능력을 갖춘 모델로, 시각적 이해 애플리케이션에 적합합니다.", "meta/Llama-3.2-90B-Vision-Instruct.description": "시각 이해 에이전트 애플리케이션을 위한 고급 이미지 추론 기능.", "meta/Llama-3.3-70B-Instruct.description": "Llama 3.3은 가장 진보된 다국어 오픈소스 Llama 모델로, 매우 낮은 비용으로 405B에 근접한 성능을 제공합니다. Transformer 기반이며, 유용성과 안전성을 위해 SFT 및 RLHF로 개선되었습니다. 명령어 튜닝 버전은 다국어 채팅에 최적화되어 있으며, 산업 벤치마크에서 많은 오픈 및 클로즈드 채팅 모델을 능가합니다. 지식 기준일: 2023년 12월.", "meta/Meta-Llama-3-70B-Instruct.description": "추론, 코딩, 다양한 언어 작업에 뛰어난 성능을 보이는 강력한 70B 파라미터 모델.", @@ -858,7 +864,11 @@ "microsoft/Phi-3.5-vision-instruct.description": "Phi-3-vision 모델의 업데이트 버전.", "microsoft/WizardLM-2-8x22B.description": "WizardLM 2는 Microsoft AI의 언어 모델로, 복잡한 대화, 다국어 작업, 추론, 어시스턴트에 뛰어납니다.", "microsoft/wizardlm-2-8x22b.description": "WizardLM-2 8x22B는 Microsoft AI의 가장 진보된 Wizard 모델로, 매우 경쟁력 있는 성능을 제공합니다.", + "mimo-v2-flash.description": "MiMo-V2-Flash: 추론, 코딩, 에이전트 기반 작업에 최적화된 효율적인 모델입니다.", "minicpm-v.description": "MiniCPM-V는 OpenBMB의 차세대 멀티모달 모델로, 광범위한 사용 사례에 대해 뛰어난 OCR 및 멀티모달 이해력을 제공합니다.", + "minimax-m2.1.description": "MiniMax-M2.1은 MiniMax 시리즈의 최신 버전으로, 다국어 프로그래밍 및 실제 복잡한 작업에 최적화되어 있습니다. AI-네이티브 모델로서 모델 성능, 에이전트 프레임워크 지원, 다중 시나리오 적응력에서 큰 향상을 이루었으며, 기업과 개인이 AI-기반의 업무 및 라이프스타일을 더 빠르게 실현할 수 있도록 돕습니다.", + "minimax-m2.description": "MiniMax M2는 코딩 및 에이전트 워크플로우에 특화된 효율적인 대형 언어 모델입니다.", + "minimax/minimax-m2.1.description": "MiniMax-M2.1은 코딩, 프록시 워크플로우, 최신 애플리케이션 개발에 최적화된 경량 고성능 대형 언어 모델로, 더 간결하고 깔끔한 출력과 빠른 반응 속도를 제공합니다.", "minimax/minimax-m2.description": "MiniMax-M2는 다양한 엔지니어링 시나리오에서 코딩 및 에이전트 작업에 뛰어난 고가치 모델입니다.", "minimaxai/minimax-m2.description": "MiniMax-M2는 컴팩트하고 빠르며 비용 효율적인 MoE 모델(총 230B, 활성 10B)로, 다중 파일 편집, 코드 실행-수정 루프, 테스트 검증, 복잡한 툴체인에서 뛰어난 성능을 발휘하며 강력한 일반 지능을 유지합니다.", "ministral-3b-latest.description": "Ministral 3B는 Mistral의 최고급 엣지 모델입니다.", @@ -875,6 +885,27 @@ "mistral-small-latest.description": "Mistral Small은 번역, 요약, 감정 분석에 적합한 비용 효율적이고 빠르며 신뢰할 수 있는 옵션입니다.", "mistral-small.description": "Mistral Small은 고효율 및 낮은 지연 시간이 필요한 모든 언어 기반 작업에 적합합니다.", "mistral.description": "Mistral은 다양한 언어 작업에 적합한 Mistral AI의 7B 모델입니다.", + "mistral/codestral-embed.description": "코드베이스 및 저장소 임베딩을 위한 코드 임베딩 모델로, 코딩 어시스턴트를 지원합니다.", + "mistral/codestral.description": "Mistral Codestral 25.01은 저지연, 고빈도 사용에 최적화된 최첨단 코딩 모델입니다. 80개 이상의 언어를 지원하며, FIM, 코드 수정, 테스트 생성에 뛰어납니다.", + "mistral/devstral-small.description": "Devstral은 소프트웨어 엔지니어링 작업을 위한 에이전트형 LLM으로, 소프트웨어 엔지니어링 에이전트에 적합한 강력한 선택지입니다.", + "mistral/magistral-medium.description": "깊은 이해를 바탕으로 한 복잡한 사고를 지원하며, 투명한 추론 과정을 따라가고 검증할 수 있습니다. 작업 중에도 언어 간 고정밀 추론을 유지합니다.", + "mistral/magistral-small.description": "깊은 이해를 바탕으로 한 복잡한 사고를 지원하며, 투명한 추론 과정을 따라가고 검증할 수 있습니다. 작업 중에도 언어 간 고정밀 추론을 유지합니다.", + "mistral/ministral-3b.description": "어시스턴트 및 로컬 분석과 같은 온디바이스 작업에 적합한 컴팩트하고 효율적인 모델로, 저지연 성능을 제공합니다.", + "mistral/ministral-8b.description": "더 강력한 성능과 메모리 효율적인 추론을 제공하는 모델로, 복잡한 워크플로우 및 고성능 엣지 애플리케이션에 이상적입니다.", + "mistral/mistral-embed.description": "의미 기반 검색, 유사도 분석, 클러스터링, RAG 워크플로우를 위한 범용 텍스트 임베딩 모델입니다.", + "mistral/mistral-large.description": "Mistral Large는 강력한 추론 또는 전문화가 필요한 복잡한 작업에 이상적입니다. 예: 합성 텍스트 생성, 코드 생성, RAG, 에이전트 등.", + "mistral/mistral-small.description": "Mistral Small은 분류, 고객 지원, 텍스트 생성과 같은 단순하고 일괄 처리 가능한 작업에 적합하며, 합리적인 가격에 뛰어난 성능을 제공합니다.", + "mistral/mixtral-8x22b-instruct.description": "8x22B Instruct 모델. 8x22B는 Mistral이 제공하는 오픈 MoE 모델입니다.", + "mistral/pixtral-12b.description": "이미지 이해 및 텍스트 처리를 지원하는 12B 모델입니다.", + "mistral/pixtral-large.description": "Pixtral Large는 Mistral의 멀티모달 제품군 중 두 번째 모델로, 최첨단 이미지 이해 능력을 갖추고 있습니다. 문서, 차트, 자연 이미지를 처리하면서도 Mistral Large 2의 뛰어난 텍스트 이해력을 유지합니다.", + "mistralai/Mistral-7B-Instruct-v0.1.description": "Mistral (7B) Instruct는 다양한 언어 작업에서 강력한 성능을 발휘하는 것으로 알려져 있습니다.", + "mistralai/Mistral-7B-Instruct-v0.2.description": "Mistral (7B) Instruct v0.2는 지시 처리 능력과 결과 정확도를 향상시켰습니다.", + "mistralai/Mistral-7B-Instruct-v0.3.description": "Mistral (7B) Instruct v0.3는 효율적인 연산과 강력한 언어 이해력을 제공하여 다양한 사용 사례에 적합합니다.", + "mistralai/Mistral-7B-v0.1.description": "Mistral 7B는 컴팩트하지만 고성능을 자랑하며, 분류 및 텍스트 생성과 같은 단순 작업과 일괄 처리에 강력한 추론 능력을 제공합니다.", + "mistralai/Mixtral-8x22B-Instruct-v0.1.description": "Mixtral-8x22B Instruct (141B)는 대규모 작업 부하를 처리할 수 있는 초대형 LLM입니다.", + "mistralai/Mixtral-8x7B-Instruct-v0.1.description": "Mixtral-8x7B Instruct (46.7B)는 대규모 데이터 처리에 적합한 높은 용량을 제공합니다.", + "mistralai/Mixtral-8x7B-v0.1.description": "Mixtral 8x7B는 추론 속도를 높이는 희소 MoE 모델로, 다국어 및 코드 생성 작업에 적합합니다.", + "mistralai/mistral-nemo.description": "Mistral Nemo는 7.3B 모델로, 다국어 지원과 강력한 코딩 성능을 제공합니다.", "morph/morph-v3-large.description": "Morph는 최첨단 모델(예: Claude 또는 GPT-4o)이 제안한 코드 변경 사항을 기존 파일에 빠르게 적용할 수 있도록 특화된 모델입니다. 초당 2500개 이상의 토큰 처리 속도를 자랑하며, AI 코딩 워크플로우의 마지막 단계로 16K 입력/출력 토큰을 지원합니다.", "nousresearch/hermes-2-pro-llama-3-8b.description": "Hermes 2 Pro Llama 3 8B는 Nous Hermes 2의 최신 버전으로, 내부에서 개발한 최신 데이터셋을 기반으로 업데이트되었습니다.", "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF.description": "Llama 3.1 Nemotron 70B는 NVIDIA가 사용자 응답의 유용성을 향상시키기 위해 맞춤 제작한 LLM입니다. Arena Hard, AlpacaEval 2 LC, GPT-4-Turbo MT-Bench에서 모두 1위를 기록하며, 2024년 10월 1일 기준 자동 정렬 벤치마크에서 최고의 성능을 보입니다. 이 모델은 Llama-3.1-70B-Instruct를 기반으로 RLHF(REINFORCE), Llama-3.1-Nemotron-70B-Reward, HelpSteer2-Preference 프롬프트를 활용해 학습되었습니다.", diff --git a/locales/nl-NL/chat.json b/locales/nl-NL/chat.json index f78b539027..9b9296fe93 100644 --- a/locales/nl-NL/chat.json +++ b/locales/nl-NL/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "{{toolName}} wordt aangeroepen...", "task.activity.toolResult": "{{toolName}} resultaat ontvangen", "task.batchTasks": "{{count}} batch-subtaken", + "task.groupTasks": "{{count}} Parallelle Taken", + "task.groupTasksTitle": "{{agents}} en {{count}} taken van agenten", + "task.groupTasksTitleSimple": "{{agents}} {{count}} taken", "task.instruction": "Taakinstructie", "task.intermediateSteps": "{{count}} tussenstappen", "task.metrics.duration": "(duurde {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "toolgebruik", "task.status.cancelled": "Taak geannuleerd", "task.status.failed": "Taak mislukt", + "task.status.fetchingDetails": "Details ophalen...", "task.status.initializing": "Taak initialiseren...", "task.subtask": "Subtaak", "thread.divider": "Subonderwerp", diff --git a/locales/nl-NL/models.json b/locales/nl-NL/models.json index 4074ce99b7..c3a13a3b0d 100644 --- a/locales/nl-NL/models.json +++ b/locales/nl-NL/models.json @@ -854,6 +854,58 @@ "meta/llama-3.3-70b.description": "Een perfecte balans tussen prestaties en efficiëntie. Ontworpen voor hoogwaardige conversatie-AI in contentcreatie, zakelijke toepassingen en onderzoek, met sterk taalbegrip voor samenvattingen, classificatie, sentimentanalyse en codegeneratie.", "meta/llama-4-maverick.description": "De Llama 4-familie is een native multimodaal AI-model dat tekst- en multimodale ervaringen ondersteunt, met gebruik van MoE voor toonaangevend tekst- en beeldbegrip. Llama 4 Maverick is een 17B-model met 128 experts, aangeboden door DeepInfra.", "meta/llama-4-scout.description": "De Llama 4-familie is een native multimodaal AI-model dat tekst- en multimodale ervaringen ondersteunt, met gebruik van MoE voor toonaangevend tekst- en beeldbegrip. Llama 4 Scout is een 17B-model met 16 experts, aangeboden door DeepInfra.", + "microsoft/Phi-3-medium-128k-instruct.description": "Hetzelfde Phi-3-medium model met een groter contextvenster voor RAG- of few-shot prompts.", + "microsoft/Phi-3-medium-4k-instruct.description": "Een model met 14 miljard parameters van hogere kwaliteit dan Phi-3-mini, gericht op hoogwaardige, redeneerintensieve data.", + "microsoft/Phi-3-mini-128k-instruct.description": "Hetzelfde Phi-3-mini model met een groter contextvenster voor RAG- of few-shot prompts.", + "microsoft/Phi-3-mini-4k-instruct.description": "Het kleinste lid van de Phi-3-familie, geoptimaliseerd voor kwaliteit en lage latentie.", + "microsoft/Phi-3-small-128k-instruct.description": "Hetzelfde Phi-3-small model met een groter contextvenster voor RAG- of few-shot prompts.", + "microsoft/Phi-3-small-8k-instruct.description": "Een model met 7 miljard parameters van hogere kwaliteit dan Phi-3-mini, gericht op hoogwaardige, redeneerintensieve data.", + "microsoft/Phi-3.5-mini-instruct.description": "Een bijgewerkte versie van het Phi-3-mini model.", + "microsoft/Phi-3.5-vision-instruct.description": "Een bijgewerkte versie van het Phi-3-vision model.", + "microsoft/WizardLM-2-8x22B.description": "WizardLM 2 is een taalmodel van Microsoft AI dat uitblinkt in complexe dialogen, meertalige taken, redeneren en assistenttoepassingen.", + "microsoft/wizardlm-2-8x22b.description": "WizardLM-2 8x22B is Microsoft AI’s meest geavanceerde Wizard-model met zeer concurrerende prestaties.", + "mimo-v2-flash.description": "MiMo-V2-Flash: Een efficiënt model voor redeneren, programmeren en agentfundamenten.", + "minicpm-v.description": "MiniCPM-V is het volgende generatie multimodale model van OpenBMB met uitstekende OCR- en multimodale begripscapaciteiten voor uiteenlopende toepassingen.", + "minimax-m2.1.description": "MiniMax-M2.1 is de nieuwste versie in de MiniMax-serie, geoptimaliseerd voor meertalige programmering en complexe real-world taken. Als AI-native model biedt MiniMax-M2.1 aanzienlijke verbeteringen in modelprestaties, ondersteuning voor agentframeworks en aanpassing aan meerdere scenario’s, met als doel bedrijven en individuen sneller te helpen bij het vinden van AI-native werk- en levensstijlen.", + "minimax-m2.description": "MiniMax M2 is een efficiënt groot taalmodel dat speciaal is ontwikkeld voor programmeer- en agentworkflows.", + "minimax/minimax-m2.1.description": "MiniMax-M2.1 is een lichtgewicht, geavanceerd groot taalmodel geoptimaliseerd voor programmeren, proxyworkflows en moderne applicatieontwikkeling, met schonere, beknoptere output en snellere reactietijden.", + "minimax/minimax-m2.description": "MiniMax-M2 is een waardevol model dat uitblinkt in programmeer- en agenttaken voor veel technische scenario’s.", + "minimaxai/minimax-m2.description": "MiniMax-M2 is een compact, snel en kosteneffectief MoE-model (230B totaal, 10B actief) gebouwd voor topprestaties in programmeren en agenttaken, met behoud van sterke algemene intelligentie. Het blinkt uit in bewerkingen over meerdere bestanden, code-uitvoerings- en correctielussen, testvalidatie en complexe toolchains.", + "ministral-3b-latest.description": "Ministral 3B is het topmodel voor edge-toepassingen van Mistral.", + "ministral-8b-latest.description": "Ministral 8B is een zeer kosteneffectief edge-model van Mistral.", + "mistral-ai/Mistral-Large-2411.description": "Het vlaggenschipmodel van Mistral voor complexe taken die grootschalig redeneren of specialisatie vereisen (synthetische tekstgeneratie, codegeneratie, RAG of agents).", + "mistral-ai/Mistral-Nemo.description": "Mistral Nemo is een geavanceerd LLM met toonaangevend redeneervermogen, wereldkennis en programmeercapaciteiten voor zijn formaat.", + "mistral-ai/mistral-small-2503.description": "Mistral Small is geschikt voor elke taaltaak die hoge efficiëntie en lage latentie vereist.", + "mistral-large-instruct.description": "Mistral-Large-Instruct-2407 is een geavanceerd dense LLM met 123 miljard parameters en toonaangevend redeneervermogen, kennis en programmeercapaciteiten.", + "mistral-large-latest.description": "Mistral Large is het vlaggenschipmodel, sterk in meertalige taken, complexe redenering en codegeneratie—ideaal voor hoogwaardige toepassingen.", + "mistral-large.description": "Mixtral Large is het vlaggenschipmodel van Mistral, dat codegeneratie, wiskunde en redenering combineert met een contextvenster van 128K.", + "mistral-medium-latest.description": "Mistral Medium 3 levert toonaangevende prestaties tegen 8× lagere kosten en vereenvoudigt implementatie in ondernemingen.", + "mistral-nemo-instruct.description": "Mistral-Nemo-Instruct-2407 is de instructie-afgestemde versie van Mistral-Nemo-Base-2407.", + "mistral-nemo.description": "Mistral Nemo is een efficiënt 12B-model van Mistral AI en NVIDIA.", + "mistral-small-latest.description": "Mistral Small is een kosteneffectieve, snelle en betrouwbare optie voor vertaling, samenvatting en sentimentanalyse.", + "mistral-small.description": "Mistral Small is geschikt voor elke taaltaak die hoge efficiëntie en lage latentie vereist.", + "mistral.description": "Mistral is het 7B-model van Mistral AI, geschikt voor uiteenlopende taaltaken.", + "mistral/codestral-embed.description": "Een code-embeddingmodel voor het embedden van codebases en repositories ter ondersteuning van programmeerassistenten.", + "mistral/codestral.description": "Mistral Codestral 25.01 is een geavanceerd programmeermodel geoptimaliseerd voor lage latentie en veelvuldig gebruik. Het ondersteunt meer dan 80 talen en blinkt uit in FIM, codecorrectie en testgeneratie.", + "mistral/devstral-small.description": "Devstral is een agentisch LLM voor software-engineeringtaken, waardoor het een sterke keuze is voor softwareagents.", + "mistral/magistral-medium.description": "Complex denken ondersteund door diep begrip met transparante redenering die je kunt volgen en verifiëren. Behoudt nauwkeurige redenering over talen heen, zelfs midden in een taak.", + "mistral/magistral-small.description": "Complex denken ondersteund door diep begrip met transparante redenering die je kunt volgen en verifiëren. Behoudt nauwkeurige redenering over talen heen, zelfs midden in een taak.", + "mistral/ministral-3b.description": "Een compact, efficiënt model voor on-device taken zoals assistenten en lokale analyses, met lage latentieprestaties.", + "mistral/ministral-8b.description": "Een krachtiger model met snellere, geheugenefficiënte inferentie, ideaal voor complexe workflows en veeleisende edge-toepassingen.", + "mistral/mistral-embed.description": "Een algemeen tekst-embeddingmodel voor semantisch zoeken, gelijkenis, clustering en RAG-workflows.", + "mistral/mistral-large.description": "Mistral Large is ideaal voor complexe taken die sterk redeneervermogen of specialisatie vereisen—synthetische tekstgeneratie, codegeneratie, RAG of agents.", + "mistral/mistral-small.description": "Mistral Small is ideaal voor eenvoudige, batchbare taken zoals classificatie, klantenservice of tekstgeneratie, met uitstekende prestaties tegen een betaalbare prijs.", + "mistral/mixtral-8x22b-instruct.description": "8x22B Instruct model. 8x22B is een open MoE-model aangeboden door Mistral.", + "mistral/pixtral-12b.description": "Een 12B-model met beeldbegrip en tekstverwerking.", + "mistral/pixtral-large.description": "Pixtral Large is het tweede model in onze multimodale familie met geavanceerd beeldbegrip. Het verwerkt documenten, grafieken en natuurlijke beelden en behoudt het toonaangevende tekstbegrip van Mistral Large 2.", + "mistralai/Mistral-7B-Instruct-v0.1.description": "Mistral (7B) Instruct staat bekend om sterke prestaties bij veel taaltaken.", + "mistralai/Mistral-7B-Instruct-v0.2.description": "Mistral (7B) Instruct v0.2 verbetert instructieverwerking en nauwkeurigheid van resultaten.", + "mistralai/Mistral-7B-Instruct-v0.3.description": "Mistral (7B) Instruct v0.3 biedt efficiënte verwerking en sterk taalbegrip voor uiteenlopende toepassingen.", + "mistralai/Mistral-7B-v0.1.description": "Mistral 7B is compact maar krachtig, sterk in batchverwerking en eenvoudige taken zoals classificatie en tekstgeneratie, met solide redeneervermogen.", + "mistralai/Mixtral-8x22B-Instruct-v0.1.description": "Mixtral-8x22B Instruct (141B) is een zeer groot LLM voor zware werklasten.", + "mistralai/Mixtral-8x7B-Instruct-v0.1.description": "Mixtral-8x7B Instruct (46.7B) biedt hoge capaciteit voor grootschalige gegevensverwerking.", + "mistralai/Mixtral-8x7B-v0.1.description": "Mixtral 8x7B is een spars MoE-model dat de inferentiesnelheid verhoogt, geschikt voor meertalige en codegeneratietaken.", + "mistralai/mistral-nemo.description": "Mistral Nemo is een 7.3B-model met meertalige ondersteuning en sterke programmeerprestaties.", "moonshot-v1-128k-vision-preview.description": "Kimi vision-modellen (waaronder moonshot-v1-8k-vision-preview/moonshot-v1-32k-vision-preview/moonshot-v1-128k-vision-preview) begrijpen beeldinhoud zoals tekst, kleuren en objectvormen.", "moonshot-v1-128k.description": "Moonshot V1 128K biedt een ultralange context voor het genereren van zeer lange teksten, met ondersteuning tot 128.000 tokens voor onderzoeks-, academische en grootschalige documenttoepassingen.", "moonshot-v1-32k-vision-preview.description": "Kimi vision-modellen (waaronder moonshot-v1-8k-vision-preview/moonshot-v1-32k-vision-preview/moonshot-v1-128k-vision-preview) begrijpen beeldinhoud zoals tekst, kleuren en objectvormen.", diff --git a/locales/pl-PL/chat.json b/locales/pl-PL/chat.json index a3c12c6a22..0919a1137e 100644 --- a/locales/pl-PL/chat.json +++ b/locales/pl-PL/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Wywoływanie {{toolName}}...", "task.activity.toolResult": "Otrzymano wynik {{toolName}}", "task.batchTasks": "{{count}} zadań zbiorczych", + "task.groupTasks": "{{count}} równoległych zadań", + "task.groupTasksTitle": "{{agents}} i {{count}} zadań agentów", + "task.groupTasksTitleSimple": "{{agents}} {{count}} zadań", "task.instruction": "Instrukcja zadania", "task.intermediateSteps": "{{count}} kroków pośrednich", "task.metrics.duration": "(zajęło {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "użycia narzędzi", "task.status.cancelled": "Zadanie anulowane", "task.status.failed": "Zadanie nie powiodło się", + "task.status.fetchingDetails": "Pobieranie szczegółów...", "task.status.initializing": "Inicjalizacja zadania...", "task.subtask": "Podzadanie", "thread.divider": "Podtemat", diff --git a/locales/pl-PL/models.json b/locales/pl-PL/models.json index ab87627928..4da489e0bf 100644 --- a/locales/pl-PL/models.json +++ b/locales/pl-PL/models.json @@ -760,6 +760,49 @@ "learnlm-1.5-pro-experimental.description": "LearnLM to eksperymentalny model zadaniowy, trenowany zgodnie z zasadami nauki o uczeniu się, aby podążać za instrukcjami systemowymi w scenariuszach edukacyjnych, działając jako ekspert-nauczyciel.", "learnlm-2.0-flash-experimental.description": "LearnLM to eksperymentalny model zadaniowy, trenowany zgodnie z zasadami nauki o uczeniu się, aby podążać za instrukcjami systemowymi w scenariuszach edukacyjnych, działając jako ekspert-nauczyciel.", "lite.description": "Spark Lite to lekki LLM o ultraniskim opóźnieniu i wydajnym przetwarzaniu. Jest całkowicie darmowy i obsługuje wyszukiwanie w czasie rzeczywistym. Szybkie odpowiedzi sprawdzają się na urządzeniach o niskiej mocy obliczeniowej i przy dostrajaniu modeli, zapewniając wysoką efektywność kosztową i inteligentne doświadczenie, szczególnie w scenariuszach pytań i odpowiedzi, generowania treści i wyszukiwania.", + "llama-3.1-70b-versatile.description": "Llama 3.1 70B zapewnia zaawansowane wnioskowanie AI dla złożonych zastosowań, oferując wysoką wydajność i precyzję przy dużym obciążeniu obliczeniowym.", + "llama-3.1-8b-instant.description": "Llama 3.1 8B to wydajny model generujący tekst w szybkim tempie, idealny do zastosowań na dużą skalę przy niskich kosztach.", + "llama-3.1-instruct.description": "Model Llama 3.1 dostrojony do instrukcji jest zoptymalizowany pod kątem rozmów i przewyższa wiele otwartych modeli czatu w standardowych branżowych testach.", + "llama-3.2-11b-vision-instruct.description": "Silne wnioskowanie wizualne na obrazach wysokiej rozdzielczości, idealne do aplikacji zrozumienia obrazu.", + "llama-3.2-11b-vision-preview.description": "Llama 3.2 został zaprojektowany do zadań łączących obraz i tekst, doskonale radząc sobie z opisywaniem obrazów i wizualnym Q&A, łącząc generowanie języka z rozumieniem wizualnym.", + "llama-3.2-90b-vision-instruct.description": "Zaawansowane wnioskowanie wizualne dla aplikacji agentów rozumiejących obrazy.", + "llama-3.2-90b-vision-preview.description": "Llama 3.2 został zaprojektowany do zadań łączących obraz i tekst, doskonale radząc sobie z opisywaniem obrazów i wizualnym Q&A, łącząc generowanie języka z rozumieniem wizualnym.", + "llama-3.2-vision-instruct.description": "Model Llama 3.2-Vision dostrojony do instrukcji jest zoptymalizowany pod kątem rozpoznawania obrazów, wnioskowania wizualnego, opisywania oraz ogólnego Q&A opartego na obrazach.", + "llama-3.3-70b-versatile.description": "Meta Llama 3.3 to wielojęzyczny model LLM z 70 miliardami parametrów (tekst wejściowy/wyjściowy), dostępny w wersjach wstępnie wytrenowanej i dostrojonej do instrukcji. Wersja dostrojona do instrukcji jest zoptymalizowana pod kątem wielojęzycznych rozmów i przewyższa wiele otwartych i zamkniętych modeli czatu w standardowych testach branżowych.", + "llama-3.3-70b.description": "Llama 3.3 70B: średnio-duży model Llama łączący wnioskowanie i przepustowość.", + "llama-3.3-instruct.description": "Model Llama 3.3 dostrojony do instrukcji jest zoptymalizowany pod kątem rozmów i przewyższa wiele otwartych modeli czatu w standardowych testach branżowych.", + "llama3-70b-8192.description": "Meta Llama 3 70B oferuje wyjątkową zdolność obsługi złożonych zadań w wymagających projektach.", + "llama3-8b-8192.description": "Meta Llama 3 8B zapewnia silne wnioskowanie w różnorodnych scenariuszach.", + "llama3-groq-70b-8192-tool-use-preview.description": "Llama 3 Groq 70B Tool Use oferuje zaawansowane wywoływanie narzędzi do efektywnej obsługi złożonych zadań.", + "llama3-groq-8b-8192-tool-use-preview.description": "Llama 3 Groq 8B Tool Use jest zoptymalizowany pod kątem wydajnego użycia narzędzi i szybkiego przetwarzania równoległego.", + "llama3.1-8b.description": "Llama 3.1 8B: mały, niskolatencyjny wariant Llama do lekkiego wnioskowania online i czatu.", + "llama3.1.description": "Llama 3.1 to flagowy model Meta, skalujący się do 405 miliardów parametrów, przeznaczony do złożonych dialogów, tłumaczeń wielojęzycznych i analizy danych.", + "llama3.1:405b.description": "Llama 3.1 to flagowy model Meta, skalujący się do 405 miliardów parametrów, przeznaczony do złożonych dialogów, tłumaczeń wielojęzycznych i analizy danych.", + "llama3.1:70b.description": "Llama 3.1 to flagowy model Meta, skalujący się do 405 miliardów parametrów, przeznaczony do złożonych dialogów, tłumaczeń wielojęzycznych i analizy danych.", + "llava-v1.5-7b-4096-preview.description": "LLaVA 1.5 7B łączy przetwarzanie wizualne z generowaniem złożonych wyników na podstawie danych wizualnych.", + "llava.description": "LLaVA to model multimodalny łączący enkoder obrazu i Vicunę, zapewniający silne zrozumienie języka i obrazu.", + "llava:13b.description": "LLaVA to model multimodalny łączący enkoder obrazu i Vicunę, zapewniający silne zrozumienie języka i obrazu.", + "llava:34b.description": "LLaVA to model multimodalny łączący enkoder obrazu i Vicunę, zapewniający silne zrozumienie języka i obrazu.", + "magistral-medium-latest.description": "Magistral Medium 1.2 to zaawansowany model wnioskowania od Mistral AI (wrzesień 2025) z obsługą wizji.", + "magistral-small-2509.description": "Magistral Small 1.2 to otwartoźródłowy, mały model wnioskowania od Mistral AI (wrzesień 2025) z obsługą wizji.", + "mathstral.description": "MathΣtral został stworzony do badań naukowych i matematycznego wnioskowania, oferując silne możliwości obliczeniowe i wyjaśniające.", + "max-32k.description": "Spark Max 32K obsługuje przetwarzanie dużego kontekstu z lepszym rozumieniem i wnioskowaniem, wspierając wejścia do 32K tokenów dla długich dokumentów i prywatnych zapytań wiedzy.", + "megrez-3b-instruct.description": "Megrez 3B Instruct to mały, wydajny model od Wuwen Xinqiong.", + "meituan/longcat-flash-chat.description": "Otwartoźródłowy model bazowy bez wnioskowania od Meituan, zoptymalizowany do dialogów i zadań agentowych, silny w użyciu narzędzi i złożonych interakcjach wieloetapowych.", + "meta-llama-3-70b-instruct.description": "Potężny model z 70 miliardami parametrów, doskonały w wnioskowaniu, kodowaniu i szerokich zadaniach językowych.", + "meta-llama-3-8b-instruct.description": "Wszechstronny model z 8 miliardami parametrów, zoptymalizowany do czatu i generowania tekstu.", + "meta-llama-3.1-405b-instruct.description": "Model tekstowy Llama 3.1 dostrojony do instrukcji, zoptymalizowany do wielojęzycznego czatu, osiągający wysokie wyniki w standardowych testach branżowych wśród otwartych i zamkniętych modeli czatu.", + "meta-llama-3.1-70b-instruct.description": "Model tekstowy Llama 3.1 dostrojony do instrukcji, zoptymalizowany do wielojęzycznego czatu, osiągający wysokie wyniki w standardowych testach branżowych wśród otwartych i zamkniętych modeli czatu.", + "meta-llama-3.1-8b-instruct.description": "Model tekstowy Llama 3.1 dostrojony do instrukcji, zoptymalizowany do wielojęzycznego czatu, osiągający wysokie wyniki w standardowych testach branżowych wśród otwartych i zamkniętych modeli czatu.", + "meta-llama/Llama-2-13b-chat-hf.description": "LLaMA-2 Chat (13B) zapewnia solidne przetwarzanie języka i dobre doświadczenie czatu.", + "meta-llama/Llama-2-70b-hf.description": "LLaMA-2 zapewnia solidne przetwarzanie języka i dobre doświadczenie interakcji.", + "meta-llama/Llama-3-70b-chat-hf.description": "Llama 3 70B Instruct Reference to potężny model czatu do złożonych dialogów.", + "meta-llama/Llama-3-8b-chat-hf.description": "Llama 3 8B Instruct Reference oferuje wsparcie wielojęzyczne i szeroką wiedzę dziedzinową.", + "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo.description": "LLaMA 3.2 został zaprojektowany do zadań łączących obraz i tekst. Doskonale radzi sobie z opisywaniem obrazów i wizualnym Q&A, łącząc generowanie języka z rozumieniem wizualnym.", + "meta-llama/Llama-3.2-3B-Instruct-Turbo.description": "LLaMA 3.2 został zaprojektowany do zadań łączących obraz i tekst. Doskonale radzi sobie z opisywaniem obrazów i wizualnym Q&A, łącząc generowanie języka z rozumieniem wizualnym.", + "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo.description": "LLaMA 3.2 został zaprojektowany do zadań łączących obraz i tekst. Doskonale radzi sobie z opisywaniem obrazów i wizualnym Q&A, łącząc generowanie języka z rozumieniem wizualnym.", + "meta-llama/Llama-3.3-70B-Instruct-Turbo.description": "Meta Llama 3.3 to wielojęzyczny model LLM z 70 miliardami parametrów (tekst wejściowy/wyjściowy), dostępny w wersjach wstępnie wytrenowanej i dostrojonej do instrukcji. Wersja dostrojona do instrukcji jest zoptymalizowana pod kątem wielojęzycznego czatu i przewyższa wiele otwartych i zamkniętych modeli czatu w standardowych testach branżowych.", + "meta-llama/Llama-Vision-Free.description": "LLaMA 3.2 został zaprojektowany do zadań łączących obraz i tekst. Doskonale radzi sobie z opisywaniem obrazów i wizualnym Q&A, łącząc generowanie języka z rozumieniem wizualnym.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 to otwarty model językowy (LLM) stworzony z myślą o programistach, naukowcach i przedsiębiorstwach, zaprojektowany, by wspierać ich w budowaniu, eksperymentowaniu i odpowiedzialnym skalowaniu pomysłów z zakresu generatywnej sztucznej inteligencji. Jako fundament globalnej innowacji społecznościowej, doskonale sprawdza się przy ograniczonych zasobach obliczeniowych, na urządzeniach brzegowych oraz przy szybszym czasie trenowania.", "meta/Llama-3.2-11B-Vision-Instruct.description": "Zaawansowane rozumowanie obrazów w wysokiej rozdzielczości, idealne do aplikacji zrozumienia wizualnego.", "meta/Llama-3.2-90B-Vision-Instruct.description": "Zaawansowane rozumowanie obrazów dla aplikacji agentów opartych na zrozumieniu wizualnym.", diff --git a/locales/pt-BR/chat.json b/locales/pt-BR/chat.json index 52a48c9d8b..83e2960910 100644 --- a/locales/pt-BR/chat.json +++ b/locales/pt-BR/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Chamando {{toolName}}...", "task.activity.toolResult": "Resultado de {{toolName}} recebido", "task.batchTasks": "{{count}} Subtarefas em Lote", + "task.groupTasks": "{{count}} Tarefas Paralelas", + "task.groupTasksTitle": "{{agents}} e {{count}} tarefas de agentes", + "task.groupTasksTitleSimple": "{{agents}} {{count}} tarefas", "task.instruction": "Instruções da Tarefa", "task.intermediateSteps": "{{count}} etapas intermediárias", "task.metrics.duration": "(levou {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "usos de ferramenta", "task.status.cancelled": "Tarefa Cancelada", "task.status.failed": "Falha na Tarefa", + "task.status.fetchingDetails": "Buscando detalhes...", "task.status.initializing": "Inicializando tarefa...", "task.subtask": "Subtarefa", "thread.divider": "Subtópico", diff --git a/locales/pt-BR/models.json b/locales/pt-BR/models.json index f3447e05b1..95043450ac 100644 --- a/locales/pt-BR/models.json +++ b/locales/pt-BR/models.json @@ -1097,6 +1097,44 @@ "qwen3-32b.description": "Qwen3 32B é adequado para tarefas gerais que exigem maior capacidade de compreensão.", "qwen3-4b.description": "Qwen3 4B é adequado para aplicativos de pequeno a médio porte e inferência local.", "qwen3-8b.description": "Qwen3 8B é um modelo leve com implantação flexível para cargas de trabalho com alta concorrência.", + "qwen3-coder-30b-a3b-instruct.description": "Modelo de código Qwen de código aberto. O qwen3-coder-30b-a3b-instruct mais recente é baseado no Qwen3 e oferece fortes habilidades de agente de codificação, uso de ferramentas e interação com o ambiente para programação autônoma, com excelente desempenho de código e sólida capacidade geral.", + "qwen3-coder-480b-a35b-instruct.description": "Qwen3 Coder 480B A35B Instruct é um modelo de código de ponta para programação multilíngue e compreensão de código complexa.", + "qwen3-coder-flash.description": "Modelo de código Qwen. A série Qwen3-Coder mais recente é baseada no Qwen3 e oferece fortes habilidades de agente de codificação, uso de ferramentas e interação com o ambiente para programação autônoma, com excelente desempenho de código e sólida capacidade geral.", + "qwen3-coder-plus.description": "Modelo de código Qwen. A série Qwen3-Coder mais recente é baseada no Qwen3 e oferece fortes habilidades de agente de codificação, uso de ferramentas e interação com o ambiente para programação autônoma, com excelente desempenho de código e sólida capacidade geral.", + "qwen3-coder:480b.description": "Modelo de alto desempenho da Alibaba com suporte a contexto longo para tarefas de agente e codificação.", + "qwen3-max-preview.description": "Modelo Qwen com melhor desempenho para tarefas complexas e de múltiplas etapas. A prévia oferece suporte a raciocínio.", + "qwen3-max.description": "Os modelos Qwen3 Max apresentam grandes avanços em relação à série 2.5 em capacidade geral, compreensão de chinês/inglês, seguimento de instruções complexas, tarefas subjetivas abertas, capacidade multilíngue e uso de ferramentas, com menos alucinações. O qwen3-max mais recente melhora a programação agente e o uso de ferramentas em relação ao qwen3-max-preview. Esta versão atinge o estado da arte e atende a necessidades mais complexas de agentes.", + "qwen3-next-80b-a3b-instruct.description": "Modelo Qwen3 de próxima geração, de código aberto e sem raciocínio. Em comparação com a versão anterior (Qwen3-235B-A22B-Instruct-2507), possui melhor compreensão do chinês, raciocínio lógico mais forte e geração de texto aprimorada.", + "qwen3-next-80b-a3b-thinking.description": "Qwen3 Next 80B A3B Thinking é a versão de raciocínio de ponta para tarefas complexas.", + "qwen3-omni-flash.description": "Qwen-Omni aceita entradas combinadas de texto, imagens, áudio e vídeo, e gera saídas em texto ou fala. Oferece múltiplos estilos de voz natural, suporte a fala multilíngue e dialetal, e se adapta a casos como redação, reconhecimento visual e assistentes de voz.", + "qwen3-vl-235b-a22b-instruct.description": "Qwen3 VL 235B A22B Instruct é um modelo multimodal de ponta para compreensão e criação exigentes.", + "qwen3-vl-235b-a22b-thinking.description": "Qwen3 VL 235B A22B Thinking é a versão de raciocínio de ponta para planejamento e raciocínio multimodal complexos.", + "qwen3-vl-30b-a3b-instruct.description": "Qwen3 VL 30B A3B Instruct é um modelo multimodal de grande porte que equilibra precisão e desempenho de raciocínio.", + "qwen3-vl-30b-a3b-thinking.description": "Qwen3 VL 30B A3B Thinking é uma versão de raciocínio profundo para tarefas multimodais complexas.", + "qwen3-vl-32b-instruct.description": "Qwen3 VL 32B Instruct é um modelo multimodal ajustado por instruções para perguntas e respostas imagem-texto de alta qualidade e criação.", + "qwen3-vl-32b-thinking.description": "Qwen3 VL 32B Thinking é uma versão multimodal de raciocínio profundo para análise complexa e em cadeia.", + "qwen3-vl-8b-instruct.description": "Qwen3 VL 8B Instruct é um modelo multimodal leve para perguntas e respostas visuais do dia a dia e integração com aplicativos.", + "qwen3-vl-8b-thinking.description": "Qwen3 VL 8B Thinking é um modelo multimodal com raciocínio em cadeia para raciocínio visual detalhado.", + "qwen3-vl-flash.description": "Qwen3 VL Flash: versão leve e de raciocínio rápido para solicitações com baixa latência ou alto volume.", + "qwen3-vl-plus.description": "Qwen VL é um modelo de geração de texto com compreensão visual. Pode realizar OCR, além de resumir e raciocinar, como extrair atributos de fotos de produtos ou resolver problemas a partir de imagens.", + "qwen3.description": "Qwen3 é o modelo de linguagem de próxima geração da Alibaba com desempenho robusto em diversos casos de uso.", + "qwq-32b-preview.description": "QwQ é um modelo de pesquisa experimental da Qwen focado em raciocínio aprimorado.", + "qwq-32b.description": "QwQ é um modelo de raciocínio da família Qwen. Em comparação com modelos ajustados por instruções padrão, oferece raciocínio e pensamento que aumentam significativamente o desempenho em tarefas complexas. O QwQ-32B é um modelo de raciocínio de porte médio que rivaliza com os principais modelos como DeepSeek-R1 e o1-mini.", + "qwq-plus.description": "O modelo de raciocínio QwQ treinado com base no Qwen2.5 usa aprendizado por reforço (RL) para melhorar significativamente o raciocínio. Métricas principais em matemática/código (AIME 24/25, LiveCodeBench) e benchmarks gerais (IFEval, LiveBench) atingem o nível completo do DeepSeek-R1.", + "qwq.description": "QwQ é um modelo de raciocínio da família Qwen. Em comparação com modelos ajustados por instruções padrão, oferece habilidades de pensamento e raciocínio que melhoram significativamente o desempenho em tarefas difíceis. O QwQ-32B é um modelo de porte médio que compete com os principais modelos como DeepSeek-R1 e o1-mini.", + "qwq_32b.description": "Modelo de raciocínio de porte médio da família Qwen. Em comparação com modelos ajustados por instruções padrão, as habilidades de pensamento e raciocínio do QwQ aumentam significativamente o desempenho em tarefas difíceis.", + "r1-1776.description": "R1-1776 é uma variante pós-treinada do DeepSeek R1 projetada para fornecer informações factuais sem censura e imparciais.", + "solar-mini-ja.description": "Solar Mini (Ja) estende o Solar Mini com foco no japonês, mantendo desempenho eficiente e forte em inglês e coreano.", + "solar-mini.description": "Solar Mini é um LLM compacto que supera o GPT-3.5, com forte capacidade multilíngue suportando inglês e coreano, oferecendo uma solução eficiente e de baixo custo.", + "solar-pro.description": "Solar Pro é um LLM de alta inteligência da Upstage, focado em seguir instruções em uma única GPU, com pontuações IFEval acima de 80. Atualmente suporta inglês; o lançamento completo está previsto para novembro de 2024 com suporte expandido a idiomas e contexto mais longo.", + "sonar-deep-research.description": "Deep Research realiza pesquisas abrangentes em nível de especialista e as sintetiza em relatórios acessíveis e acionáveis.", + "sonar-pro.description": "Produto de busca avançada com fundamentação de pesquisa para consultas complexas e seguimentos.", + "sonar-reasoning-pro.description": "Produto de busca avançada com fundamentação de pesquisa para consultas complexas e seguimentos.", + "sonar-reasoning.description": "Produto de busca avançada com fundamentação de pesquisa para consultas complexas e seguimentos.", + "sonar.description": "Produto leve com fundamentação de busca, mais rápido e barato que o Sonar Pro.", + "spark-x.description": "Atualizações do X1.5: (1) adiciona modo de pensamento dinâmico controlado pelo campo `thinking`; (2) comprimento de contexto maior com entrada de 64K e saída de 64K; (3) suporte a FunctionCall.", + "stable-diffusion-3-medium.description": "O mais recente modelo de texto para imagem da Stability AI. Esta versão melhora significativamente a qualidade da imagem, compreensão de texto e diversidade de estilo, interpretando comandos em linguagem natural complexa com mais precisão e gerando imagens mais precisas e diversas.", + "stable-diffusion-3.5-large-turbo.description": "stable-diffusion-3.5-large-turbo aplica difusão adversarial destilada (ADD) ao stable-diffusion-3.5-large para maior velocidade.", "stable-diffusion-3.5-large.description": "stable-diffusion-3.5-large é um modelo MMDiT de texto para imagem com 800 milhões de parâmetros, oferecendo excelente qualidade e alinhamento com prompts, suportando imagens de 1 megapixel e execução eficiente em hardwares de consumo.", "stable-diffusion-v1.5.description": "stable-diffusion-v1.5 é inicializado a partir do checkpoint v1.2 e ajustado por 595 mil etapas no conjunto \"laion-aesthetics v2 5+\" com resolução de 512x512, reduzindo o condicionamento de texto em 10% para melhorar a amostragem com orientação livre de classificadores.", "stable-diffusion-xl-base-1.0.description": "Um modelo de texto para imagem de código aberto da Stability AI com geração criativa de imagens líder na indústria. Possui forte compreensão de instruções e suporta definições de prompt reverso para geração precisa.", @@ -1132,6 +1170,60 @@ "thudm/glm-4-9b-chat.description": "Versão de código aberto do mais recente modelo pré-treinado GLM-4 da Zhipu AI.", "thudm/glm-z1-32b.description": "GLM-Z1-32B-0414 é uma variante de raciocínio aprimorada do GLM-4-32B, projetada para resolução de problemas focados em matemática, lógica e código. Aplica RL expandido (preferência pareada específica e geral) para melhorar tarefas complexas de múltiplas etapas. Em comparação com o GLM-4-32B, o Z1 melhora significativamente o raciocínio estruturado e a capacidade em domínios formais.\n\nSuporta etapas de “pensamento” via engenharia de prompt, melhora a coerência em saídas longas e é otimizado para fluxos de trabalho de agentes com contexto longo (via YaRN), chamadas de ferramentas JSON e amostragem refinada para raciocínio estável. Ideal para casos que exigem derivações formais ou de múltiplas etapas cuidadosas.", "thudm/glm-z1-rumination-32b.description": "GLM Z1 Rumination 32B é um modelo de raciocínio profundo da série GLM-4-Z1, otimizado para tarefas abertas e complexas que exigem pensamento prolongado. Baseado no glm-4-32b-0414, adiciona estágios extras de RL e alinhamento em múltiplas fases, introduzindo uma capacidade de “ruminação” que simula processamento cognitivo estendido. Isso inclui raciocínio iterativo, análise em múltiplos saltos e fluxos de trabalho com ferramentas como busca, recuperação e síntese com consciência de citação.\n\nDestaca-se em redação científica, análise comparativa e perguntas e respostas complexas. Suporta chamadas de função para primitivas de busca/navegação (`search`, `click`, `open`, `finish`) em pipelines de agentes. O comportamento de ruminação é controlado por laços de múltiplas rodadas com modelagem de recompensa baseada em regras e mecanismos de decisão atrasada, testado em frameworks de pesquisa profunda como o stack interno de alinhamento da OpenAI. Esta variante prioriza profundidade em vez de velocidade.", + "tngtech/deepseek-r1t-chimera:free.description": "DeepSeek-R1T-Chimera é criado pela fusão do DeepSeek-R1 e DeepSeek-V3 (0324), combinando o raciocínio do R1 com a eficiência de tokens do V3. Baseado no DeepSeek-MoE Transformer e otimizado para geração geral de texto.\n\nFunde pesos pré-treinados para equilibrar raciocínio, eficiência e seguimento de instruções. Lançado sob a licença MIT para uso em pesquisa e comercial.", + "togethercomputer/StripedHyena-Nous-7B.description": "StripedHyena Nous (7B) oferece maior eficiência computacional por meio de sua arquitetura e estratégia.", + "tts-1-hd.description": "O mais recente modelo de texto para fala otimizado para qualidade.", + "tts-1.description": "O mais recente modelo de texto para fala otimizado para velocidade em tempo real.", + "upstage/SOLAR-10.7B-Instruct-v1.0.description": "Upstage SOLAR Instruct v1 (11B) é ajustado para tarefas de instrução precisas com forte desempenho linguístico.", + "us.anthropic.claude-3-5-sonnet-20241022-v2:0.description": "Claude 3.5 Sonnet eleva o padrão da indústria, superando concorrentes e o Claude 3 Opus em avaliações amplas, mantendo velocidade e custo intermediários.", + "us.anthropic.claude-3-7-sonnet-20250219-v1:0.description": "Claude 3.7 Sonnet é o modelo de próxima geração mais rápido da Anthropic. Em comparação com o Claude 3 Haiku, apresenta melhorias em várias habilidades e supera o antigo carro-chefe Claude 3 Opus em muitos benchmarks de inteligência.", + "us.anthropic.claude-haiku-4-5-20251001-v1:0.description": "Claude Haiku 4.5 é o modelo Haiku mais rápido e inteligente da Anthropic, com velocidade relâmpago e raciocínio estendido.", + "us.anthropic.claude-sonnet-4-5-20250929-v1:0.description": "Claude Sonnet 4.5 é o modelo mais inteligente da Anthropic até o momento.", + "v0-1.0-md.description": "v0-1.0-md é um modelo legado disponibilizado via API v0.", + "v0-1.5-lg.description": "v0-1.5-lg é adequado para tarefas avançadas de pensamento ou raciocínio.", + "v0-1.5-md.description": "v0-1.5-md é adequado para tarefas cotidianas e geração de interfaces.", + "vercel/v0-1.0-md.description": "Acesse os modelos por trás do v0 para gerar, corrigir e otimizar aplicativos web modernos com raciocínio específico de framework e conhecimento atualizado.", + "vercel/v0-1.5-md.description": "Acesse os modelos por trás do v0 para gerar, corrigir e otimizar aplicativos web modernos com raciocínio específico de framework e conhecimento atualizado.", + "volcengine/doubao-seed-code.description": "Doubao-Seed-Code é o LLM da Volcano Engine da ByteDance otimizado para programação agente, com forte desempenho em benchmarks de programação e agentes, com suporte a contexto de 256K.", + "wan2.2-t2i-flash.description": "Wanxiang 2.2 Speed é o modelo mais recente com melhorias em criatividade, estabilidade e realismo, oferecendo geração rápida e alto valor.", + "wan2.2-t2i-plus.description": "Wanxiang 2.2 Pro é o modelo mais recente com melhorias em criatividade, estabilidade e realismo, produzindo detalhes mais ricos.", + "wanx-v1.description": "Modelo base de texto para imagem. Corresponde ao Tongyi Wanxiang 1.0 General.", + "wanx2.0-t2i-turbo.description": "Excelente em retratos texturizados com velocidade moderada e menor custo. Corresponde ao Tongyi Wanxiang 2.0 Speed.", + "wanx2.1-t2i-plus.description": "Versão totalmente atualizada com mais detalhes de imagem e velocidade ligeiramente menor. Corresponde ao Tongyi Wanxiang 2.1 Pro.", + "wanx2.1-t2i-turbo.description": "Versão totalmente atualizada com geração rápida, qualidade geral forte e alto valor. Corresponde ao Tongyi Wanxiang 2.1 Speed.", + "whisper-1.description": "Modelo geral de reconhecimento de fala com suporte a ASR multilíngue, tradução de fala e identificação de idioma.", + "wizardlm2.description": "WizardLM 2 é um modelo de linguagem da Microsoft AI que se destaca em diálogos complexos, tarefas multilíngues, raciocínio e assistentes.", + "wizardlm2:8x22b.description": "WizardLM 2 é um modelo de linguagem da Microsoft AI que se destaca em diálogos complexos, tarefas multilíngues, raciocínio e assistentes.", + "x-ai/grok-4-fast-non-reasoning.description": "Grok 4 Fast (Sem Raciocínio) é o modelo multimodal de alta vazão e baixo custo da xAI (com suporte a janela de contexto de 2M) para cenários sensíveis à latência e custo que não exigem raciocínio interno. Está ao lado da versão com raciocínio do Grok 4 Fast, e o raciocínio pode ser ativado via parâmetro de API quando necessário. Prompts e respostas podem ser usados pela xAI ou OpenRouter para melhorar modelos futuros.", + "x-ai/grok-4-fast.description": "Grok 4 Fast é o modelo de alta vazão e baixo custo da xAI (com suporte a janela de contexto de 2M), ideal para casos de uso com alta concorrência e contexto longo.", + "x-ai/grok-4.1-fast-non-reasoning.description": "Grok 4 Fast (Sem Raciocínio) é o modelo multimodal de alta vazão e baixo custo da xAI (com suporte a janela de contexto de 2M) para cenários sensíveis à latência e custo que não exigem raciocínio interno. Está ao lado da versão com raciocínio do Grok 4 Fast, e o raciocínio pode ser ativado via parâmetro de API quando necessário. Prompts e respostas podem ser usados pela xAI ou OpenRouter para melhorar modelos futuros.", + "x-ai/grok-4.1-fast.description": "Grok 4 Fast é o modelo de alta vazão e baixo custo da xAI (com suporte a janela de contexto de 2M), ideal para casos de uso com alta concorrência e contexto longo.", + "x-ai/grok-4.description": "Grok 4 é o modelo de raciocínio de ponta da xAI com forte capacidade de raciocínio e multimodalidade.", + "x-ai/grok-code-fast-1.description": "Grok Code Fast 1 é o modelo de código rápido da xAI com saída legível e amigável para engenharia.", + "xai/grok-2-vision.description": "Grok 2 Vision se destaca em tarefas visuais, oferecendo desempenho de ponta em raciocínio visual matemático (MathVista) e perguntas e respostas em documentos (DocVQA). Lida com documentos, gráficos, tabelas, capturas de tela e fotos.", + "xai/grok-2.description": "Grok 2 é um modelo de fronteira com raciocínio de ponta, forte desempenho em chat, codificação e raciocínio, superando Claude 3.5 Sonnet e GPT-4 Turbo no LMSYS.", + "xai/grok-3-fast.description": "Modelo de ponta da xAI que se destaca em casos de uso corporativos como extração de dados, codificação e sumarização, com profundo conhecimento em finanças, saúde, direito e ciência. A variante rápida roda em infraestrutura mais ágil para respostas muito mais rápidas com maior custo por token.", + "xai/grok-3-mini-fast.description": "Modelo leve da xAI que pensa antes de responder, ideal para tarefas simples ou baseadas em lógica sem necessidade de conhecimento profundo. Rastros de raciocínio brutos estão disponíveis. A variante rápida roda em infraestrutura mais ágil para respostas muito mais rápidas com maior custo por token.", + "xai/grok-3-mini.description": "Modelo leve da xAI que pensa antes de responder, ideal para tarefas simples ou baseadas em lógica sem necessidade de conhecimento profundo. Rastros de raciocínio brutos estão disponíveis.", + "xai/grok-3.description": "Modelo de ponta da xAI que se destaca em casos de uso corporativos como extração de dados, codificação e sumarização, com profundo conhecimento em finanças, saúde, direito e ciência.", + "xai/grok-4.description": "O mais novo modelo de ponta da xAI com desempenho incomparável em linguagem natural, matemática e raciocínio — um modelo versátil ideal.", + "yi-large-fc.description": "Baseado no yi-large com chamadas de ferramentas aprimoradas, adequado para cenários de agentes e fluxos de trabalho.", + "yi-large-preview.description": "Uma versão inicial; recomenda-se o uso do yi-large (mais recente).", + "yi-large-rag.description": "Serviço avançado baseado no yi-large, combinando recuperação e geração para respostas precisas com busca em tempo real na web.", + "yi-large-turbo.description": "Valor e desempenho excepcionais, ajustado para um forte equilíbrio entre qualidade, velocidade e custo.", + "yi-large.description": "Novo modelo com 100 bilhões de parâmetros com forte desempenho em perguntas e respostas e geração de texto.", + "yi-lightning-lite.description": "Versão leve; recomenda-se o uso do yi-lightning.", + "yi-lightning.description": "Modelo de alto desempenho mais recente com inferência mais rápida e saída de alta qualidade.", + "yi-medium-200k.description": "Modelo de contexto longo com 200K para compreensão e geração profunda de textos longos.", + "yi-medium.description": "Modelo de porte médio ajustado com capacidade e valor equilibrados, otimizado para seguir instruções.", + "yi-spark.description": "Modelo compacto e rápido com capacidades reforçadas em matemática e codificação.", + "yi-vision-v2.description": "Modelo de visão para tarefas complexas com forte compreensão e análise de múltiplas imagens.", + "yi-vision.description": "Modelo de visão para tarefas complexas com forte compreensão e análise de imagens.", + "z-ai/glm-4.5-air.description": "GLM 4.5 Air é uma variante leve do GLM 4.5 para cenários sensíveis a custo, mantendo forte raciocínio.", + "z-ai/glm-4.5.description": "GLM 4.5 é o modelo de ponta da Z.AI com raciocínio híbrido otimizado para engenharia e tarefas com contexto longo.", + "z-ai/glm-4.6.description": "GLM 4.6 é o modelo de ponta da Z.AI com comprimento de contexto estendido e capacidade de codificação.", + "z-ai/glm-4.7.description": "GLM-4.7 é o mais novo modelo de ponta da Zhipu, oferecendo capacidades gerais aprimoradas, respostas mais simples e naturais, e uma experiência de escrita mais imersiva.", + "zai-glm-4.6.description": "Desempenho sólido em tarefas de codificação e raciocínio, com suporte a streaming e chamadas de ferramentas, adequado para codificação agente e raciocínio complexo.", "zai-org/GLM-4.5-Air.description": "GLM-4.5-Air é um modelo base para aplicações com agentes, utilizando uma arquitetura Mixture-of-Experts. Ele é otimizado para uso de ferramentas, navegação na web, engenharia de software e codificação frontend, e integra-se com agentes de código como Claude Code e Roo Code. Utiliza raciocínio híbrido para lidar tanto com cenários complexos quanto com situações do dia a dia.", "zai-org/GLM-4.5.description": "GLM-4.5 é um modelo base desenvolvido para aplicações com agentes, utilizando uma arquitetura Mixture-of-Experts. É profundamente otimizado para uso de ferramentas, navegação na web, engenharia de software e codificação frontend, e integra-se com agentes de código como Claude Code e Roo Code. Utiliza raciocínio híbrido para lidar com raciocínios complexos e situações cotidianas.", "zai-org/GLM-4.5V.description": "GLM-4.5V é o mais recente VLM da Zhipu AI, baseado no modelo de texto principal GLM-4.5-Air (106B no total, 12B ativos), com uma arquitetura MoE que oferece alto desempenho a um custo reduzido. Segue a linha de desenvolvimento do GLM-4.1V-Thinking e adiciona 3D-RoPE para melhorar o raciocínio espacial em 3D. Otimizado por meio de pré-treinamento, SFT e RL, lida com imagens, vídeos e documentos longos, e está entre os melhores modelos abertos em 41 benchmarks multimodais públicos. Um modo de alternância de raciocínio permite ao usuário equilibrar velocidade e profundidade.", diff --git a/locales/ru-RU/chat.json b/locales/ru-RU/chat.json index c9b0bd2796..19bb22ab1c 100644 --- a/locales/ru-RU/chat.json +++ b/locales/ru-RU/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Вызов {{toolName}}...", "task.activity.toolResult": "Результат {{toolName}} получен", "task.batchTasks": "{{count}} пакетных подзадач", + "task.groupTasks": "{{count}} параллельных задач", + "task.groupTasksTitle": "{{agents}} и {{count}} задач агентов", + "task.groupTasksTitleSimple": "{{agents}} {{count}} задач", "task.instruction": "Инструкция к задаче", "task.intermediateSteps": "{{count}} промежуточных этапов", "task.metrics.duration": "(заняло {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "вызовов", "task.status.cancelled": "Задача отменена", "task.status.failed": "Ошибка выполнения задачи", + "task.status.fetchingDetails": "Получение данных...", "task.status.initializing": "Инициализация задачи...", "task.subtask": "Подзадача", "thread.divider": "Подтема", diff --git a/locales/ru-RU/models.json b/locales/ru-RU/models.json index 625a12a0a8..43320cc0d2 100644 --- a/locales/ru-RU/models.json +++ b/locales/ru-RU/models.json @@ -783,6 +783,40 @@ "llava.description": "LLaVA — мультимодальная модель, объединяющая визуальный энкодер и Vicuna для мощного понимания связки зрение-язык.", "llava:13b.description": "LLaVA — мультимодальная модель, объединяющая визуальный энкодер и Vicuna для мощного понимания связки зрение-язык.", "llava:34b.description": "LLaVA — мультимодальная модель, объединяющая визуальный энкодер и Vicuna для мощного понимания связки зрение-язык.", + "magistral-medium-latest.description": "Magistral Medium 1.2 — это передовая модель рассуждений от Mistral AI (сентябрь 2025 года) с поддержкой обработки изображений.", + "magistral-small-2509.description": "Magistral Small 1.2 — это компактная модель рассуждений с открытым исходным кодом от Mistral AI (сентябрь 2025 года) с поддержкой обработки изображений.", + "mathstral.description": "MathΣtral создана для научных исследований и математических рассуждений, обладает высокой вычислительной мощностью и способностью к объяснению.", + "max-32k.description": "Spark Max 32K обеспечивает обработку больших контекстов с улучшенным пониманием и логическим мышлением, поддерживает ввод до 32K токенов для чтения длинных документов и работы с приватными знаниями.", + "megrez-3b-instruct.description": "Megrez 3B Instruct — это компактная и эффективная модель от Wuwen Xinqiong.", + "meituan/longcat-flash-chat.description": "Открытая базовая модель без рассуждений от Meituan, оптимизированная для диалогов и агентных задач, сильна в использовании инструментов и сложных многоходовых взаимодействиях.", + "meta-llama-3-70b-instruct.description": "Мощная модель с 70 миллиардами параметров, превосходно справляющаяся с рассуждениями, программированием и широким спектром языковых задач.", + "meta-llama-3-8b-instruct.description": "Универсальная модель с 8 миллиардами параметров, оптимизированная для чатов и генерации текста.", + "meta-llama-3.1-405b-instruct.description": "Llama 3.1 — модель, обученная на инструкциях, оптимизированная для многоязычных чатов, демонстрирует высокие результаты на отраслевых бенчмарках среди открытых и закрытых моделей.", + "meta-llama-3.1-70b-instruct.description": "Llama 3.1 — модель, обученная на инструкциях, оптимизированная для многоязычных чатов, демонстрирует высокие результаты на отраслевых бенчмарках среди открытых и закрытых моделей.", + "meta-llama-3.1-8b-instruct.description": "Llama 3.1 — модель, обученная на инструкциях, оптимизированная для многоязычных чатов, демонстрирует высокие результаты на отраслевых бенчмарках среди открытых и закрытых моделей.", + "meta-llama/Llama-2-13b-chat-hf.description": "LLaMA-2 Chat (13B) обеспечивает высокое качество обработки языка и стабильный опыт общения.", + "meta-llama/Llama-2-70b-hf.description": "LLaMA-2 обеспечивает высокое качество обработки языка и стабильное взаимодействие.", + "meta-llama/Llama-3-70b-chat-hf.description": "Llama 3 70B Instruct Reference — мощная модель для сложных диалогов.", + "meta-llama/Llama-3-8b-chat-hf.description": "Llama 3 8B Instruct Reference поддерживает многоязычность и обладает широкими знаниями в различных областях.", + "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo.description": "LLaMA 3.2 разработана для задач, сочетающих изображение и текст. Отлично справляется с описанием изображений и визуальными вопросами, объединяя генерацию текста и визуальное мышление.", + "meta-llama/Llama-3.2-3B-Instruct-Turbo.description": "LLaMA 3.2 разработана для задач, сочетающих изображение и текст. Отлично справляется с описанием изображений и визуальными вопросами, объединяя генерацию текста и визуальное мышление.", + "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo.description": "LLaMA 3.2 разработана для задач, сочетающих изображение и текст. Отлично справляется с описанием изображений и визуальными вопросами, объединяя генерацию текста и визуальное мышление.", + "meta-llama/Llama-3.3-70B-Instruct-Turbo.description": "Meta Llama 3.3 — многоязычная LLM с 70 миллиардами параметров (ввод/вывод текста), обученная на инструкциях. Оптимизирована для многоязычных чатов и превосходит многие открытые и закрытые модели на отраслевых бенчмарках.", + "meta-llama/Llama-Vision-Free.description": "LLaMA 3.2 разработана для задач, сочетающих изображение и текст. Отлично справляется с описанием изображений и визуальными вопросами, объединяя генерацию текста и визуальное мышление.", + "meta-llama/Meta-Llama-3-70B-Instruct-Lite.description": "Llama 3 70B Instruct Lite создана для высокой производительности с низкой задержкой.", + "meta-llama/Meta-Llama-3-70B-Instruct-Turbo.description": "Llama 3 70B Instruct Turbo обеспечивает глубокое понимание и генерацию текста для самых требовательных задач.", + "meta-llama/Meta-Llama-3-8B-Instruct-Lite.description": "Llama 3 8B Instruct Lite сбалансирована для работы в условиях ограниченных ресурсов.", + "meta-llama/Meta-Llama-3-8B-Instruct-Turbo.description": "Llama 3 8B Instruct Turbo — высокопроизводительная LLM для широкого спектра задач.", + "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo.description": "Модель Llama 3.1 Turbo с 405 миллиардами параметров обладает огромной контекстной емкостью для обработки больших данных и превосходно справляется с задачами ультра-масштабного ИИ.", + "meta-llama/Meta-Llama-3.1-405B-Instruct.description": "Llama 3.1 — флагманская модель Meta, масштабируемая до 405 миллиардов параметров, предназначена для сложных диалогов, многоязычного перевода и анализа данных.", + "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo.description": "Llama 3.1 70B точно настроена для высоконагруженных приложений; квантование FP8 обеспечивает эффективные вычисления и точность в сложных сценариях.", + "meta-llama/Meta-Llama-3.1-70B.description": "Llama 3.1 — флагманская модель Meta, масштабируемая до 405 миллиардов параметров, предназначена для сложных диалогов, многоязычного перевода и анализа данных.", + "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo.description": "Llama 3.1 8B использует квантование FP8, поддерживает до 131 072 токенов контекста и входит в число лучших открытых моделей для сложных задач по многим бенчмаркам.", + "meta-llama/llama-3-70b-instruct.description": "Llama 3 70B Instruct оптимизирована для высококачественных диалогов и демонстрирует отличные результаты в оценках с участием людей.", + "meta-llama/llama-3-8b-instruct.description": "Llama 3 8B Instruct оптимизирована для высококачественных диалогов, превосходя многие закрытые модели.", + "meta-llama/llama-3.1-70b-instruct.description": "Последняя серия Meta Llama 3.1, вариант с 70 миллиардами параметров, обученный на инструкциях и оптимизированный для высококачественных диалогов. В отраслевых оценках показывает отличные результаты по сравнению с ведущими закрытыми моделями. (Доступна только для проверенных корпоративных клиентов.)", + "meta-llama/llama-3.1-8b-instruct.description": "Последняя серия Meta Llama 3.1, вариант с 8 миллиардами параметров, обученный на инструкциях, особенно быстр и эффективен. В отраслевых оценках демонстрирует отличные результаты, превосходя многие ведущие закрытые модели. (Доступна только для проверенных корпоративных клиентов.)", + "meta-llama/llama-3.1-8b-instruct:free.description": "LLaMA 3.1 поддерживает многоязычие и является одной из ведущих генеративных моделей.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 — это открытая LLM для разработчиков, исследователей и предприятий, созданная для поддержки создания, экспериментов и ответственного масштабирования идей генеративного ИИ. Являясь частью основы для глобальных инноваций сообщества, она хорошо подходит для ограниченных вычислительных ресурсов, устройств на периферии и ускоренного обучения.", "meta/Llama-3.2-11B-Vision-Instruct.description": "Модель с высокой способностью к визуальному рассуждению на изображениях высокого разрешения, подходящая для приложений визуального понимания.", "meta/Llama-3.2-90B-Vision-Instruct.description": "Продвинутая модель визуального рассуждения для агентов, ориентированных на визуальное понимание.", diff --git a/locales/tr-TR/chat.json b/locales/tr-TR/chat.json index 01a3aea577..a20374cbf3 100644 --- a/locales/tr-TR/chat.json +++ b/locales/tr-TR/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "{{toolName}} çağrılıyor...", "task.activity.toolResult": "{{toolName}} sonucu alındı", "task.batchTasks": "{{count}} Toplu Alt Görev", + "task.groupTasks": "{{count}} Paralel Görev", + "task.groupTasksTitle": "{{agents}} ve {{count}} ajan görevi", + "task.groupTasksTitleSimple": "{{agents}} {{count}} görev", "task.instruction": "Görev Talimatı", "task.intermediateSteps": "{{count}} ara adım", "task.metrics.duration": "({{duration}} sürdü)", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "araç kullanımı", "task.status.cancelled": "Görev İptal Edildi", "task.status.failed": "Görev Başarısız Oldu", + "task.status.fetchingDetails": "Ayrıntılar getiriliyor...", "task.status.initializing": "Görev başlatılıyor...", "task.subtask": "Alt Görev", "thread.divider": "Alt Konu", diff --git a/locales/tr-TR/models.json b/locales/tr-TR/models.json index 88e00009da..f701fb3137 100644 --- a/locales/tr-TR/models.json +++ b/locales/tr-TR/models.json @@ -827,6 +827,61 @@ "meta.llama3-1-8b-instruct-v1:0.description": "128K bağlam penceresi, çok dilli destek ve geliştirilmiş akıl yürütme ile güncellenmiş Meta Llama 3.1 8B Instruct. Llama 3.1 ailesi, çok dilli sohbet ve güçlü test performansı için optimize edilmiş 8B, 70B ve 405B talimatla ayarlanmış metin modellerini içerir. Çok dilli ticari ve araştırma kullanımı için tasarlanmıştır; talimat modelleri asistan tarzı sohbet için, önceden eğitilmiş modeller ise daha geniş üretim görevleri için uygundur. Llama 3.1 çıktıları, diğer modelleri geliştirmek için de kullanılabilir (örneğin, sentetik veri ve iyileştirme). Otoregresif bir Transformer modelidir ve faydalılık ve güvenlik için SFT ve RLHF ile hizalanmıştır.", "meta.llama3-70b-instruct-v1:0.description": "Meta Llama 3, geliştiriciler, araştırmacılar ve işletmeler için açık bir LLM'dir; üretken yapay zeka fikirlerini oluşturmak, denemek ve sorumlu bir şekilde ölçeklendirmek için tasarlanmıştır. Küresel topluluk inovasyonunun temelini oluşturan bu model, içerik üretimi, sohbet yapay zekası, dil anlama, Ar-Ge ve kurumsal uygulamalar için uygundur.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3, geliştiriciler, araştırmacılar ve işletmeler için açık bir büyük dil modeli (LLM) olup, üretken yapay zeka fikirlerini oluşturma, deneme ve sorumlu bir şekilde ölçeklendirme süreçlerinde yardımcı olmak üzere tasarlanmıştır. Küresel topluluk inovasyonunun temel taşlarından biri olarak, sınırlı bilgi işlem gücü ve kaynaklara sahip ortamlar, uç cihazlar ve daha hızlı eğitim süreleri için uygundur.", + "meta/Llama-3.2-11B-Vision-Instruct.description": "Yüksek çözünürlüklü görsellerde güçlü görsel akıl yürütme yeteneği, görsel anlama uygulamaları için uygundur.", + "meta/Llama-3.2-90B-Vision-Instruct.description": "Görsel anlama ajan uygulamaları için gelişmiş görsel akıl yürütme yeteneği.", + "meta/Llama-3.3-70B-Instruct.description": "Llama 3.3, çok dilli en gelişmiş açık kaynaklı Llama modelidir ve düşük maliyetle neredeyse 405B performansı sunar. Transformer tabanlıdır ve faydalılık ve güvenlik için SFT ve RLHF ile geliştirilmiştir. Talimatlara göre ayarlanmış sürüm, çok dilli sohbet için optimize edilmiştir ve birçok açık ve kapalı sohbet modelini sektör kıyaslamalarında geride bırakır. Bilgi kesimi: Aralık 2023.", + "meta/Meta-Llama-3-70B-Instruct.description": "Muhakeme, kodlama ve geniş dil görevlerinde üstün performans gösteren güçlü bir 70B parametreli model.", + "meta/Meta-Llama-3-8B-Instruct.description": "Sohbet ve metin üretimi için optimize edilmiş çok yönlü bir 8B parametreli model.", + "meta/Meta-Llama-3.1-405B-Instruct.description": "Çok dilli sohbet için optimize edilmiş, açık ve kapalı sohbet modelleri arasında sektör kıyaslamalarında güçlü performans gösteren Llama 3.1 talimat ayarlı metin modeli.", + "meta/Meta-Llama-3.1-70B-Instruct.description": "Çok dilli sohbet için optimize edilmiş, açık ve kapalı sohbet modelleri arasında sektör kıyaslamalarında güçlü performans gösteren Llama 3.1 talimat ayarlı metin modeli.", + "meta/Meta-Llama-3.1-8B-Instruct.description": "Çok dilli sohbet için optimize edilmiş, açık ve kapalı sohbet modelleri arasında sektör kıyaslamalarında güçlü performans gösteren Llama 3.1 talimat ayarlı metin modeli.", + "meta/llama-3-70b.description": "Meta tarafından talimat takibi için ince ayarlanmış 70B açık kaynaklı model, Groq tarafından LPU donanımında hızlı ve verimli çıkarım için sunulmaktadır.", + "meta/llama-3-8b.description": "Meta tarafından talimat takibi için ince ayarlanmış 8B açık kaynaklı model, Groq tarafından LPU donanımında hızlı ve verimli çıkarım için sunulmaktadır.", + "meta/llama-3.1-405b-instruct.description": "Sohbet botları, kodlama ve alan görevleri için sentetik veri üretimi, bilgi damıtımı ve akıl yürütmeyi destekleyen gelişmiş bir büyük dil modeli.", + "meta/llama-3.1-70b-instruct.description": "Karmaşık diyaloglar için geliştirilmiş, bağlam anlama, akıl yürütme ve metin üretiminde mükemmel performans sunar.", + "meta/llama-3.1-70b.description": "128K bağlam desteği, çok dilli yetenekler ve geliştirilmiş akıl yürütme ile güncellenmiş Meta Llama 3 70B Instruct modeli.", + "meta/llama-3.1-8b-instruct.description": "Güçlü dil anlama, akıl yürütme ve metin üretimi yeteneklerine sahip son teknoloji bir model.", + "meta/llama-3.1-8b.description": "Llama 3.1 8B, 128K bağlam penceresini destekler, gerçek zamanlı sohbet ve veri analizi için idealdir ve daha büyük modellere kıyasla önemli maliyet avantajı sunar. Groq tarafından LPU donanımında hızlı ve verimli çıkarım için sunulmaktadır.", + "meta/llama-3.2-11b-vision-instruct.description": "Görsellerden yüksek kaliteli akıl yürütme konusunda üstün performans gösteren öncü bir görsel-dil modeli.", + "meta/llama-3.2-11b.description": "Görsel tanıma, görsel akıl yürütme, altyazı oluşturma ve genel görsel soru-cevap için optimize edilmiş, talimat ayarlı bir görsel akıl yürütme modeli (metin+görsel girdi, metin çıktı).", + "meta/llama-3.2-1b-instruct.description": "Güçlü anlama, akıl yürütme ve metin üretimi yeteneklerine sahip son teknoloji küçük dil modeli.", + "meta/llama-3.2-1b.description": "Çok dilli yerel arama, özetleme ve yeniden yazma gibi cihaz içi kullanım senaryoları için metin tabanlı model.", + "meta/llama-3.2-3b-instruct.description": "Güçlü anlama, akıl yürütme ve metin üretimi yeteneklerine sahip son teknoloji küçük dil modeli.", + "meta/llama-3.2-3b.description": "Çok dilli yerel arama, özetleme ve yeniden yazma gibi cihaz içi kullanım senaryoları için ince ayarlanmış metin tabanlı model.", + "meta/llama-3.2-90b-vision-instruct.description": "Görsellerden yüksek kaliteli akıl yürütme konusunda üstün performans gösteren öncü bir görsel-dil modeli.", + "meta/llama-3.2-90b.description": "Görsel tanıma, görsel akıl yürütme, altyazı oluşturma ve genel görsel soru-cevap için optimize edilmiş, talimat ayarlı bir görsel akıl yürütme modeli (metin+görsel girdi, metin çıktı).", + "meta/llama-3.3-70b-instruct.description": "Akıl yürütme, matematik, sağduyu ve fonksiyon çağrılarında güçlü gelişmiş bir büyük dil modeli.", + "meta/llama-3.3-70b.description": "Performans ve verimlilik arasında mükemmel denge. İçerik üretimi, kurumsal uygulamalar ve araştırma için yüksek performanslı sohbet yapay zekası olarak tasarlanmıştır; özetleme, sınıflandırma, duygu analizi ve kod üretimi için güçlü dil anlama yeteneği sunar.", + "meta/llama-4-maverick.description": "Llama 4 ailesi, metin ve çok modlu deneyimleri destekleyen yerel çok modlu bir yapay zeka model setidir. MoE kullanarak metin ve görsel anlama alanında liderlik sunar. Llama 4 Maverick, 128 uzmana sahip 17B parametreli bir modeldir ve DeepInfra tarafından sunulmaktadır.", + "meta/llama-4-scout.description": "Llama 4 ailesi, metin ve çok modlu deneyimleri destekleyen yerel çok modlu bir yapay zeka model setidir. MoE kullanarak metin ve görsel anlama alanında liderlik sunar. Llama 4 Scout, 16 uzmana sahip 17B parametreli bir modeldir ve DeepInfra tarafından sunulmaktadır.", + "microsoft/Phi-3-medium-128k-instruct.description": "RAG veya az örnekli istemler için daha büyük bağlam penceresine sahip aynı Phi-3-medium modeli.", + "microsoft/Phi-3-medium-4k-instruct.description": "Phi-3-mini'den daha yüksek kaliteye sahip, 14B parametreli model; yüksek kaliteli ve akıl yürütme yoğun veriye odaklanır.", + "microsoft/Phi-3-mini-128k-instruct.description": "RAG veya az örnekli istemler için daha büyük bağlam penceresine sahip aynı Phi-3-mini modeli.", + "microsoft/Phi-3-mini-4k-instruct.description": "Phi-3 ailesinin en küçük üyesi, kalite ve düşük gecikme için optimize edilmiştir.", + "microsoft/Phi-3-small-128k-instruct.description": "RAG veya az örnekli istemler için daha büyük bağlam penceresine sahip aynı Phi-3-small modeli.", + "microsoft/Phi-3-small-8k-instruct.description": "Phi-3-mini'den daha yüksek kaliteye sahip, 7B parametreli model; yüksek kaliteli ve akıl yürütme yoğun veriye odaklanır.", + "microsoft/Phi-3.5-mini-instruct.description": "Phi-3-mini modelinin güncellenmiş sürümü.", + "microsoft/Phi-3.5-vision-instruct.description": "Phi-3-vision modelinin güncellenmiş sürümü.", + "microsoft/WizardLM-2-8x22B.description": "WizardLM 2, karmaşık diyaloglar, çok dilli görevler, akıl yürütme ve asistanlar konusunda üstün performans gösteren Microsoft AI dil modelidir.", + "microsoft/wizardlm-2-8x22b.description": "WizardLM-2 8x22B, Microsoft AI’nin en gelişmiş Wizard modelidir ve son derece rekabetçi performans sunar.", + "mimo-v2-flash.description": "MiMo-V2-Flash: Akıl yürütme, kodlama ve ajan temelleri için verimli bir model.", + "minicpm-v.description": "MiniCPM-V, geniş kullanım senaryoları için mükemmel OCR ve çok modlu anlama yeteneklerine sahip OpenBMB’nin yeni nesil çok modlu modelidir.", + "minimax-m2.1.description": "MiniMax-M2.1, çok dilli programlama ve gerçek dünya karmaşık görevler için optimize edilmiş MiniMax serisinin en son sürümüdür. Yapay zeka yerel bir model olarak, model performansı, ajan çerçevesi desteği ve çok senaryolu uyumda önemli gelişmeler sağlar; işletmelerin ve bireylerin yapay zeka yerel çalışma ve yaşam tarzını daha hızlı benimsemelerine yardımcı olmayı amaçlar.", + "minimax-m2.description": "MiniMax M2, özellikle kodlama ve ajan iş akışları için oluşturulmuş verimli bir büyük dil modelidir.", + "minimax/minimax-m2.1.description": "MiniMax-M2.1, kodlama, vekil iş akışları ve modern uygulama geliştirme için optimize edilmiş, hafif ve son teknoloji bir büyük dil modelidir. Daha temiz, özlü çıktılar ve daha hızlı algısal yanıt süreleri sunar.", + "minimax/minimax-m2.description": "MiniMax-M2, birçok mühendislik senaryosu için kodlama ve ajan görevlerinde üstün performans gösteren yüksek değerli bir modeldir.", + "minimaxai/minimax-m2.description": "MiniMax-M2, üst düzey kodlama ve ajan performansı için oluşturulmuş, kompakt, hızlı ve maliyet etkin bir MoE modelidir (toplam 230B, aktif 10B). Çoklu dosya düzenlemeleri, kod çalıştır-düzelt döngüleri, test doğrulama ve karmaşık araç zincirlerinde mükemmeldir.", + "ministral-3b-latest.description": "Ministral 3B, Mistral’in en üst düzey uç modelidir.", + "ministral-8b-latest.description": "Ministral 8B, Mistral’in son derece uygun maliyetli uç modelidir.", + "mistral-ai/Mistral-Large-2411.description": "Mistral’in büyük ölçekli akıl yürütme veya uzmanlaşma gerektiren karmaşık görevler için amiral gemisi modelidir (sentetik metin üretimi, kod üretimi, RAG veya ajanlar).", + "mistral-ai/Mistral-Nemo.description": "Mistral Nemo, boyutuna göre en son akıl yürütme, dünya bilgisi ve kodlama yeteneklerine sahip son teknoloji bir büyük dil modelidir.", + "mistral-ai/mistral-small-2503.description": "Mistral Small, yüksek verimlilik ve düşük gecikme gerektiren her türlü dil tabanlı görev için uygundur.", + "mistral-large-instruct.description": "Mistral-Large-Instruct-2407, 123B parametreli gelişmiş yoğun bir büyük dil modelidir ve en son akıl yürütme, bilgi ve kodlama yeteneklerine sahiptir.", + "mistral-large-latest.description": "Mistral Large, çok dilli görevlerde, karmaşık akıl yürütmede ve kod üretiminde güçlüdür—üst düzey uygulamalar için idealdir.", + "mistral-large.description": "Mixtral Large, Mistral’in amiral gemisi modelidir; kod üretimi, matematik ve akıl yürütmeyi 128K bağlam penceresiyle birleştirir.", + "mistral-medium-latest.description": "Mistral Medium 3, 8 kat daha düşük maliyetle en son performansı sunar ve kurumsal dağıtımı kolaylaştırır.", + "mistral-nemo-instruct.description": "Mistral-Nemo-Instruct-2407, Mistral-Nemo-Base-2407’nin talimat ayarlı sürümüdür.", + "mistral-nemo.description": "Mistral Nemo, Mistral AI ve NVIDIA tarafından geliştirilen, yüksek verimliliğe sahip 12B modeldir.", "mistral-small-latest.description": "Mistral Small, çeviri, özetleme ve duygu analizi için uygun maliyetli, hızlı ve güvenilir bir seçenektir.", "mistral-small.description": "Mistral Small, yüksek verimlilik ve düşük gecikme gerektiren her türlü dil tabanlı görev için uygundur.", "mistral.description": "Mistral, Mistral AI’nin 7B modelidir ve çeşitli dil görevleri için uygundur.", diff --git a/locales/vi-VN/chat.json b/locales/vi-VN/chat.json index 88fb5cc37b..a8f67f9d86 100644 --- a/locales/vi-VN/chat.json +++ b/locales/vi-VN/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "Đang gọi {{toolName}}...", "task.activity.toolResult": "Đã nhận kết quả từ {{toolName}}", "task.batchTasks": "{{count}} nhiệm vụ hàng loạt", + "task.groupTasks": "{{count}} Nhiệm vụ song song", + "task.groupTasksTitle": "{{agents}} và {{count}} nhiệm vụ của tác nhân", + "task.groupTasksTitleSimple": "{{agents}} {{count}} nhiệm vụ", "task.instruction": "Hướng dẫn tác vụ", "task.intermediateSteps": "{{count}} bước trung gian", "task.metrics.duration": "(mất {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "lượt dùng công cụ", "task.status.cancelled": "Nhiệm vụ đã hủy", "task.status.failed": "Nhiệm vụ thất bại", + "task.status.fetchingDetails": "Đang lấy chi tiết...", "task.status.initializing": "Đang khởi tạo nhiệm vụ...", "task.subtask": "Nhiệm vụ phụ", "thread.divider": "Chủ đề phụ", diff --git a/locales/vi-VN/models.json b/locales/vi-VN/models.json index 1e0c8ff318..efa144a63f 100644 --- a/locales/vi-VN/models.json +++ b/locales/vi-VN/models.json @@ -563,6 +563,37 @@ "glm-4.5.description": "Mô hình hàng đầu của Zhipu với chế độ suy nghĩ có thể chuyển đổi, cung cấp SOTA mã nguồn mở tổng thể và hỗ trợ ngữ cảnh lên đến 128K.", "glm-4.5v.description": "Mô hình suy luận thị giác thế hệ tiếp theo của Zhipu với tổng 106B tham số, 12B hoạt động, đạt SOTA trong các mô hình đa phương thức mã nguồn mở cùng kích thước về hình ảnh, video, hiểu tài liệu và tác vụ GUI.", "glm-4.6.description": "GLM-4.6 là mô hình hàng đầu mới nhất của Zhipu (355B), vượt trội hoàn toàn so với các phiên bản trước về mã hóa nâng cao, xử lý văn bản dài, suy luận và khả năng tác tử. Đặc biệt phù hợp với Claude Sonnet 4 về khả năng lập trình, trở thành mô hình mã hóa hàng đầu tại Trung Quốc.", + "glm-4.7.description": "GLM-4.7 là mô hình chủ lực mới nhất của Zhipu, được nâng cấp cho các tình huống Lập trình Tác nhân (Agentic Coding) với khả năng lập trình vượt trội, lập kế hoạch nhiệm vụ dài hạn và phối hợp công cụ. Mô hình đạt hiệu suất hàng đầu trong số các mô hình mã nguồn mở trên nhiều bộ đánh giá công khai. Năng lực tổng quát được cải thiện với phản hồi ngắn gọn, tự nhiên hơn và khả năng viết lôi cuốn hơn. Đối với các tác vụ tác nhân phức tạp, khả năng tuân thủ hướng dẫn khi gọi công cụ được tăng cường, đồng thời giao diện người dùng và hiệu quả hoàn thành nhiệm vụ dài hạn trong Artifacts và Agentic Coding cũng được cải thiện.", + "glm-4.description": "GLM-4 là mô hình chủ lực cũ được phát hành vào tháng 1 năm 2024, hiện đã được thay thế bởi GLM-4-0520 mạnh mẽ hơn.", + "glm-4v-flash.description": "GLM-4V-Flash tập trung vào khả năng hiểu hình ảnh đơn hiệu quả cho các tình huống phân tích nhanh như xử lý hình ảnh theo thời gian thực hoặc theo lô.", + "glm-4v-plus-0111.description": "GLM-4V-Plus có khả năng hiểu video và nhiều hình ảnh, phù hợp với các tác vụ đa phương thức.", + "glm-4v-plus.description": "GLM-4V-Plus có khả năng hiểu video và nhiều hình ảnh, phù hợp với các tác vụ đa phương thức.", + "glm-4v.description": "GLM-4V cung cấp khả năng hiểu và suy luận hình ảnh mạnh mẽ trong các tác vụ thị giác.", + "glm-z1-air.description": "Mô hình suy luận với khả năng suy luận sâu cho các tác vụ yêu cầu phân tích phức tạp.", + "glm-z1-airx.description": "Suy luận siêu nhanh với chất lượng suy luận cao.", + "glm-z1-flash.description": "Dòng GLM-Z1 cung cấp khả năng suy luận phức tạp mạnh mẽ, vượt trội trong logic, toán học và lập trình.", + "glm-z1-flashx.description": "Nhanh và tiết kiệm chi phí: Được tăng tốc với khả năng suy luận siêu nhanh và hỗ trợ đồng thời cao.", + "glm-zero-preview.description": "GLM-Zero-Preview mang lại khả năng suy luận phức tạp mạnh mẽ, vượt trội trong logic, toán học và lập trình.", + "global.anthropic.claude-opus-4-5-20251101-v1:0.description": "Claude Opus 4.5 là mô hình chủ lực của Anthropic, kết hợp trí thông minh vượt trội và hiệu suất mở rộng cho các tác vụ phức tạp đòi hỏi phản hồi và suy luận chất lượng cao nhất.", + "google/gemini-2.0-flash-001.description": "Gemini 2.0 Flash mang đến các khả năng thế hệ mới, bao gồm tốc độ vượt trội, sử dụng công cụ tích hợp, tạo nội dung đa phương thức và cửa sổ ngữ cảnh lên đến 1 triệu token.", + "google/gemini-2.0-flash-exp:free.description": "Gemini 2.0 Flash Experimental là mô hình AI đa phương thức thử nghiệm mới nhất của Google với các cải tiến về chất lượng so với các phiên bản trước, đặc biệt trong kiến thức thế giới, mã hóa và ngữ cảnh dài.", + "google/gemini-2.0-flash-lite-001.description": "Gemini 2.0 Flash Lite là biến thể nhẹ của Gemini với chế độ suy nghĩ bị tắt mặc định để cải thiện độ trễ và chi phí, nhưng có thể bật thông qua tham số.", + "google/gemini-2.0-flash-lite.description": "Gemini 2.0 Flash Lite cung cấp các tính năng thế hệ mới bao gồm tốc độ vượt trội, sử dụng công cụ tích hợp, tạo nội dung đa phương thức và cửa sổ ngữ cảnh lên đến 1 triệu token.", + "google/gemini-2.0-flash.description": "Gemini 2.0 Flash là mô hình suy luận hiệu suất cao của Google dành cho các tác vụ đa phương thức mở rộng.", + "google/gemini-2.5-flash-image-free.description": "Gemini 2.5 Flash Image phiên bản miễn phí với hạn ngạch tạo nội dung đa phương thức giới hạn.", + "google/gemini-2.5-flash-image-preview.description": "Gemini 2.5 Flash là mô hình thử nghiệm hỗ trợ tạo hình ảnh.", + "google/gemini-2.5-flash-image.description": "Gemini 2.5 Flash Image (Nano Banana) là mô hình tạo hình ảnh của Google với hỗ trợ hội thoại đa phương thức.", + "google/gemini-2.5-flash-lite.description": "Gemini 2.5 Flash Lite là biến thể nhẹ của Gemini 2.5 được tối ưu hóa cho độ trễ và chi phí, phù hợp với các tình huống yêu cầu thông lượng cao.", + "google/gemini-2.5-flash-preview.description": "Gemini 2.5 Flash là mô hình chủ lực tiên tiến nhất của Google, được xây dựng cho các tác vụ suy luận, lập trình, toán học và khoa học nâng cao. Mô hình tích hợp khả năng “suy nghĩ” để cung cấp phản hồi chính xác hơn với xử lý ngữ cảnh tinh tế hơn.\n\nLưu ý: Mô hình này có hai biến thể — có suy nghĩ và không suy nghĩ. Giá đầu ra khác nhau đáng kể tùy theo việc suy nghĩ có được bật hay không. Nếu bạn chọn biến thể tiêu chuẩn (không có hậu tố “:thinking”), mô hình sẽ tránh tạo token suy nghĩ.\n\nĐể sử dụng suy nghĩ và nhận token suy nghĩ, bạn phải chọn biến thể “:thinking”, điều này sẽ tính giá cao hơn cho đầu ra suy nghĩ.\n\nGemini 2.5 Flash cũng có thể được cấu hình thông qua tham số “max reasoning tokens” như được tài liệu hóa (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).", + "google/gemini-2.5-flash-preview:thinking.description": "Gemini 2.5 Flash là mô hình chủ lực tiên tiến nhất của Google, được xây dựng cho các tác vụ suy luận, lập trình, toán học và khoa học nâng cao. Mô hình tích hợp khả năng “suy nghĩ” để cung cấp phản hồi chính xác hơn với xử lý ngữ cảnh tinh tế hơn.\n\nLưu ý: Mô hình này có hai biến thể — có suy nghĩ và không suy nghĩ. Giá đầu ra khác nhau đáng kể tùy theo việc suy nghĩ có được bật hay không. Nếu bạn chọn biến thể tiêu chuẩn (không có hậu tố “:thinking”), mô hình sẽ tránh tạo token suy nghĩ.\n\nĐể sử dụng suy nghĩ và nhận token suy nghĩ, bạn phải chọn biến thể “:thinking”, điều này sẽ tính giá cao hơn cho đầu ra suy nghĩ.\n\nGemini 2.5 Flash cũng có thể được cấu hình thông qua tham số “max reasoning tokens” như được tài liệu hóa (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).", + "google/gemini-2.5-flash.description": "Gemini 2.5 Flash (Lite/Pro/Flash) là dòng sản phẩm của Google trải dài từ độ trễ thấp đến suy luận hiệu suất cao.", + "google/gemini-2.5-pro-free.description": "Gemini 2.5 Pro phiên bản miễn phí cung cấp ngữ cảnh dài đa phương thức với hạn ngạch giới hạn, phù hợp cho thử nghiệm và quy trình nhẹ.", + "google/gemini-2.5-pro-preview.description": "Gemini 2.5 Pro Preview là mô hình suy nghĩ tiên tiến nhất của Google để suy luận các vấn đề phức tạp trong lập trình, toán học và STEM, cũng như phân tích tập dữ liệu lớn, mã nguồn và tài liệu với ngữ cảnh dài.", + "google/gemini-2.5-pro.description": "Gemini 2.5 Pro là mô hình suy luận chủ lực của Google với hỗ trợ ngữ cảnh dài cho các tác vụ phức tạp.", + "google/gemini-3-pro-image-preview-free.description": "Gemini 3 Pro Image phiên bản miễn phí với hạn ngạch tạo nội dung đa phương thức giới hạn.", + "google/gemini-3-pro-image-preview.description": "Gemini 3 Pro Image (Nano Banana Pro) là mô hình tạo hình ảnh của Google với hỗ trợ hội thoại đa phương thức.", + "google/gemini-3-pro-preview-free.description": "Gemini 3 Pro Preview Free cung cấp khả năng hiểu và suy luận đa phương thức giống như phiên bản tiêu chuẩn, nhưng có giới hạn về hạn ngạch và tốc độ, phù hợp hơn cho thử nghiệm và sử dụng tần suất thấp.", + "google/gemini-3-pro-preview.description": "Gemini 3 Pro là mô hình suy luận đa phương thức thế hệ tiếp theo trong dòng Gemini, có khả năng hiểu văn bản, âm thanh, hình ảnh và video, xử lý các tác vụ phức tạp và mã nguồn lớn.", "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 là một mô hình ngôn ngữ mở dành cho nhà phát triển, nhà nghiên cứu và doanh nghiệp, được thiết kế để hỗ trợ xây dựng, thử nghiệm và mở rộng các ý tưởng AI sinh ngữ một cách có trách nhiệm. Là một phần trong nền tảng đổi mới cộng đồng toàn cầu, mô hình này phù hợp với môi trường có tài nguyên hạn chế, thiết bị biên và yêu cầu thời gian huấn luyện nhanh hơn.", "meta/Llama-3.2-11B-Vision-Instruct.description": "Khả năng suy luận hình ảnh mạnh mẽ trên ảnh độ phân giải cao, phù hợp cho các ứng dụng hiểu thị giác.", "meta/Llama-3.2-90B-Vision-Instruct.description": "Khả năng suy luận hình ảnh tiên tiến dành cho các ứng dụng tác tử hiểu thị giác.", diff --git a/locales/zh-CN/chat.json b/locales/zh-CN/chat.json index 9c8cff6725..1593abc18e 100644 --- a/locales/zh-CN/chat.json +++ b/locales/zh-CN/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "正在调用 {{toolName}}…", "task.activity.toolResult": "已获取 {{toolName}} 结果", "task.batchTasks": "{{count}} 个批量子任务", + "task.groupTasks": "{{count}} 个并行任务", + "task.groupTasksTitle": "{{agents}} 和 {{count}} 个代理任务", + "task.groupTasksTitleSimple": "{{agents}} 共 {{count}} 个任务", "task.instruction": "任务说明", "task.intermediateSteps": "{{count}} 个中间步骤", "task.metrics.duration": "(用时 {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "次技能调用", "task.status.cancelled": "任务已取消", "task.status.failed": "任务失败", + "task.status.fetchingDetails": "正在获取详情...", "task.status.initializing": "任务启动中…", "task.subtask": "子任务", "thread.divider": "子话题", diff --git a/locales/zh-TW/chat.json b/locales/zh-TW/chat.json index 6dba35323b..aa9fb22089 100644 --- a/locales/zh-TW/chat.json +++ b/locales/zh-TW/chat.json @@ -338,6 +338,9 @@ "task.activity.toolCalling": "正在呼叫 {{toolName}}...", "task.activity.toolResult": "已取得 {{toolName}} 結果", "task.batchTasks": "{{count}} 個批次子任務", + "task.groupTasks": "{{count}} 個並行任務", + "task.groupTasksTitle": "{{agents}} 和 {{count}} 個代理任務", + "task.groupTasksTitleSimple": "{{agents}} {{count}} 個任務", "task.instruction": "任務指示", "task.intermediateSteps": "{{count}} 個中間步驟", "task.metrics.duration": "(耗時 {{duration}})", @@ -345,6 +348,7 @@ "task.metrics.toolCallsShort": "次使用工具", "task.status.cancelled": "任務已取消", "task.status.failed": "任務失敗", + "task.status.fetchingDetails": "正在擷取詳細資料...", "task.status.initializing": "任務啟動中...", "task.subtask": "子任務", "thread.divider": "子話題", diff --git a/packages/agent-runtime/src/groupOrchestration/GroupOrchestrationSupervisor.ts b/packages/agent-runtime/src/groupOrchestration/GroupOrchestrationSupervisor.ts index 57b957b2a9..c35c48c499 100644 --- a/packages/agent-runtime/src/groupOrchestration/GroupOrchestrationSupervisor.ts +++ b/packages/agent-runtime/src/groupOrchestration/GroupOrchestrationSupervisor.ts @@ -3,6 +3,7 @@ import type { ExecutorResult, IGroupOrchestrationSupervisor, SupervisorInstruction, + SupervisorInstructionBatchExecAsyncTasks, SupervisorInstructionCallAgent, SupervisorInstructionCallSupervisor, SupervisorInstructionDelegate, @@ -29,6 +30,7 @@ export interface GroupOrchestrationSupervisorConfig { * - supervisor_decided(broadcast) → parallel_call_agents * - supervisor_decided(delegate) → delegate * - supervisor_decided(execute_task) → exec_async_task + * - supervisor_decided(execute_tasks) → batch_exec_async_tasks * - supervisor_decided(finish) → finish * - agent_spoke / agents_broadcasted / task_completed / tasks_completed → call_supervisor OR finish * - delegated → finish @@ -99,7 +101,7 @@ export class GroupOrchestrationSupervisor implements IGroupOrchestrationSupervis case 'execute_task': { const instructionPayload = { agentId: params.agentId as string, - task: params.task as string, + instruction: params.instruction as string, timeout: params.timeout as number | undefined, title: params.title as string | undefined, toolMessageId: params.toolMessageId as string, @@ -119,6 +121,21 @@ export class GroupOrchestrationSupervisor implements IGroupOrchestrationSupervis } as SupervisorInstructionExecAsyncTask; } + case 'execute_tasks': { + return { + payload: { + tasks: params.tasks as Array<{ + agentId: string; + instruction: string; + timeout?: number; + title?: string; + }>, + toolMessageId: params.toolMessageId as string, + }, + type: 'batch_exec_async_tasks', + } as SupervisorInstructionBatchExecAsyncTasks; + } + case 'finish': { return { reason: (params.reason as string) || 'supervisor_finished', diff --git a/packages/agent-runtime/src/groupOrchestration/__tests__/GroupOrchestrationSupervisor.test.ts b/packages/agent-runtime/src/groupOrchestration/__tests__/GroupOrchestrationSupervisor.test.ts index c7d65593cc..d913d898e0 100644 --- a/packages/agent-runtime/src/groupOrchestration/__tests__/GroupOrchestrationSupervisor.test.ts +++ b/packages/agent-runtime/src/groupOrchestration/__tests__/GroupOrchestrationSupervisor.test.ts @@ -161,7 +161,7 @@ describe('GroupOrchestrationSupervisor', () => { decision: 'execute_task', params: { agentId: 'agent-1', - task: 'Analyze data', + instruction: 'Analyze data', timeout: 30000, toolMessageId: 'tool-msg-1', }, @@ -175,7 +175,7 @@ describe('GroupOrchestrationSupervisor', () => { type: 'exec_async_task', payload: { agentId: 'agent-1', - task: 'Analyze data', + instruction: 'Analyze data', timeout: 30000, title: undefined, toolMessageId: 'tool-msg-1', @@ -193,7 +193,7 @@ describe('GroupOrchestrationSupervisor', () => { decision: 'execute_task', params: { agentId: 'agent-1', - task: 'Analyze data', + instruction: 'Analyze data', timeout: 30000, title: 'Data Analysis Task', toolMessageId: 'tool-msg-1', @@ -208,7 +208,7 @@ describe('GroupOrchestrationSupervisor', () => { type: 'exec_async_task', payload: { agentId: 'agent-1', - task: 'Analyze data', + instruction: 'Analyze data', timeout: 30000, title: 'Data Analysis Task', toolMessageId: 'tool-msg-1', @@ -409,7 +409,7 @@ describe('GroupOrchestrationSupervisor', () => { type: 'supervisor_decided', payload: { decision: 'execute_task', - params: { agentId: 'agent-1', task: 'Do something', toolMessageId: 'tool-1' }, + params: { agentId: 'agent-1', instruction: 'Do something', toolMessageId: 'tool-1' }, skipCallSupervisor: false, }, }, @@ -427,6 +427,77 @@ describe('GroupOrchestrationSupervisor', () => { }); }); + describe('decide - execute_tasks decision', () => { + it('should return batch_exec_async_tasks instruction for execute_tasks decision', async () => { + // Regression test: execute_tasks decision should return batch_exec_async_tasks instruction + // Previously, execute_tasks was not implemented and would fall through to default case, + // returning { type: 'finish', reason: 'unknown_decision: execute_tasks' } + const supervisor = new GroupOrchestrationSupervisor(defaultConfig); + const state = createMockState(); + + const result: ExecutorResult = { + type: 'supervisor_decided', + payload: { + decision: 'execute_tasks', + params: { + tasks: [ + { agentId: 'agent-1', title: 'Task 1', instruction: 'Do task 1' }, + { agentId: 'agent-2', title: 'Task 2', instruction: 'Do task 2' }, + ], + toolMessageId: 'tool-msg-1', + }, + skipCallSupervisor: false, + }, + }; + + const instruction = await supervisor.decide(result, state); + + // Should return batch_exec_async_tasks, NOT finish with unknown_decision + expect(instruction.type).toBe('batch_exec_async_tasks'); + expect((instruction as any).payload).toEqual({ + tasks: [ + { agentId: 'agent-1', title: 'Task 1', instruction: 'Do task 1' }, + { agentId: 'agent-2', title: 'Task 2', instruction: 'Do task 2' }, + ], + toolMessageId: 'tool-msg-1', + }); + }); + + it('should handle execute_tasks with skipCallSupervisor flag', async () => { + const supervisor = new GroupOrchestrationSupervisor(defaultConfig); + const state = createMockState(); + + // First, trigger execute_tasks with skipCallSupervisor: true + await supervisor.decide( + { + type: 'supervisor_decided', + payload: { + decision: 'execute_tasks', + params: { + tasks: [{ agentId: 'agent-1', title: 'Task 1', instruction: 'Do task 1' }], + toolMessageId: 'tool-msg-1', + }, + skipCallSupervisor: true, + }, + }, + state, + ); + + // When tasks_completed, should finish (not call supervisor again) + const result: ExecutorResult = { + type: 'tasks_completed', + payload: { results: [{ agentId: 'agent-1', success: true }] }, + }; + + const instruction = await supervisor.decide(result, state); + + expect(instruction).toEqual({ + type: 'finish', + reason: 'skip_call_supervisor', + }); + }); + }); + describe('decide - delegated result', () => { it('should always return finish instruction', async () => { const supervisor = new GroupOrchestrationSupervisor(defaultConfig); diff --git a/packages/agent-runtime/src/groupOrchestration/types.ts b/packages/agent-runtime/src/groupOrchestration/types.ts index 69e4a8b3d0..3181cce9fe 100644 --- a/packages/agent-runtime/src/groupOrchestration/types.ts +++ b/packages/agent-runtime/src/groupOrchestration/types.ts @@ -53,7 +53,7 @@ export interface SupervisorInstructionParallelCallAgents { export interface SupervisorInstructionExecAsyncTask { payload: { agentId: string; - task: string; + instruction: string; timeout?: number; /** Task title (shown in UI, used as thread title) */ title?: string; @@ -69,7 +69,7 @@ export interface SupervisorInstructionExecAsyncTask { export interface SupervisorInstructionExecClientAsyncTask { payload: { agentId: string; - task: string; + instruction: string; timeout?: number; /** Task title (shown in UI, used as thread title) */ title?: string; @@ -85,7 +85,7 @@ export interface SupervisorInstructionBatchExecAsyncTasks { payload: { tasks: Array<{ agentId: string; - task: string; + instruction: string; timeout?: number; /** Task title (shown in UI, used as thread title) */ title?: string; diff --git a/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTask.tsx b/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTask.tsx index 7f8d04d6c8..8ddc6b535e 100644 --- a/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTask.tsx +++ b/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTask.tsx @@ -81,21 +81,21 @@ const ExecuteTaskIntervention = memo ); // Local state - const [task, setTask] = useState(args?.task || ''); + const [instruction, setInstruction] = useState(args?.instruction || ''); const [timeout, setTimeout] = useState(args?.timeout ?? DEFAULT_TIMEOUT); const [hasChanges, setHasChanges] = useState(false); // Sync local state when args change externally useEffect(() => { if (!hasChanges) { - setTask(args?.task || ''); + setInstruction(args?.instruction || ''); setTimeout(args?.timeout ?? DEFAULT_TIMEOUT); } - }, [args?.task, args?.timeout, hasChanges]); + }, [args?.instruction, args?.timeout, hasChanges]); - // Handle task change - const handleTaskChange = useCallback((e: ChangeEvent) => { - setTask(e.target.value); + // Handle instruction change + const handleInstructionChange = useCallback((e: ChangeEvent) => { + setInstruction(e.target.value); setHasChanges(true); }, []); @@ -113,12 +113,12 @@ const ExecuteTaskIntervention = memo const cleanup = registerBeforeApprove('executeTask', async () => { if (hasChanges && onArgsChange) { - await onArgsChange({ ...args, task, timeout }); + await onArgsChange({ ...args, instruction, timeout }); } }); return cleanup; - }, [registerBeforeApprove, hasChanges, task, timeout, args, onArgsChange]); + }, [registerBeforeApprove, hasChanges, instruction, timeout, args, onArgsChange]); return ( @@ -154,12 +154,12 @@ const ExecuteTaskIntervention = memo - {/* Task input */} + {/* Instruction input */} ); diff --git a/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTasks.tsx b/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTasks.tsx index 2c5148d4c6..35b82ed07e 100644 --- a/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTasks.tsx +++ b/packages/builtin-tool-group-management/src/client/Intervention/ExecuteTasks.tsx @@ -2,8 +2,8 @@ import { DEFAULT_AVATAR } from '@lobechat/const'; import { BuiltinInterventionProps } from '@lobechat/types'; -import { Avatar, Flexbox, Icon, Tooltip } from '@lobehub/ui'; -import { Collapse, Input, InputNumber } from 'antd'; +import { Accordion, AccordionItem, Avatar, Flexbox, Icon, Tooltip } from '@lobehub/ui'; +import { Input, InputNumber } from 'antd'; import { createStaticStyles, useTheme } from 'antd-style'; import isEqual from 'fast-deep-equal'; import { Clock, Trash2 } from 'lucide-react'; @@ -16,10 +16,14 @@ import { agentGroupSelectors } from '@/store/agentGroup/selectors'; import type { ExecuteTasksParams, TaskItem } from '../../types'; const styles = createStaticStyles(({ css, cssVar }) => ({ - agentTitle: css` - font-size: 14px; - font-weight: 500; - color: ${cssVar.colorText}; + assignee: css` + display: flex; + flex-shrink: 0; + gap: 6px; + align-items: center; + + font-size: 12px; + color: ${cssVar.colorTextSecondary}; `, container: css` padding-block: 12px; @@ -34,20 +38,18 @@ const styles = createStaticStyles(({ css, cssVar }) => ({ color: ${cssVar.colorError}; } `, - taskCard: css` - .ant-collapse-header { - padding-block: 8px !important; - padding-inline: 12px !important; - } - - .ant-collapse-content-box { - padding: 12px !important; - } + index: css` + flex-shrink: 0; + font-size: 12px; + color: ${cssVar.colorTextQuaternary}; `, taskTitle: css` - font-size: 13px; + overflow: hidden; + + font-size: 14px; font-weight: 500; - color: ${cssVar.colorText}; + text-overflow: ellipsis; + white-space: nowrap; `, timeoutInput: css` width: 100px; @@ -102,69 +104,66 @@ const TaskEditor = memo(({ task, index, onChange, onDelete }) = onDelete(index); }, [index, onDelete]); - const header = ( - - - - {task.title || agent?.title || 'Task'} - - e.stopPropagation()}> - - - - +
+ + {agent?.title} +
+
+ } + > + + + + e.stopPropagation()}> + + + + + + + + - -
- ); - - return ( - - - - - ), - key: index, - label: header, - }, - ]} - /> + ); }); @@ -216,7 +215,7 @@ const ExecuteTasksIntervention = memo + {tasks.map((task, index) => ( ))} - + ); }, isEqual, diff --git a/packages/builtin-tool-group-management/src/client/Render/ExecuteTask/index.tsx b/packages/builtin-tool-group-management/src/client/Render/ExecuteTask/index.tsx index 79f5d1a833..282428af8e 100644 --- a/packages/builtin-tool-group-management/src/client/Render/ExecuteTask/index.tsx +++ b/packages/builtin-tool-group-management/src/client/Render/ExecuteTask/index.tsx @@ -54,10 +54,10 @@ const ExecuteTaskRender = memo - {/* Task content (read-only) */} - {args?.task && ( + {/* Instruction content (read-only) */} + {args?.instruction && ( - {args.task} + {args.instruction} )} diff --git a/packages/builtin-tool-group-management/src/client/Render/ExecuteTasks/index.tsx b/packages/builtin-tool-group-management/src/client/Render/ExecuteTasks/index.tsx index 852fff3c93..7e43efc1cd 100644 --- a/packages/builtin-tool-group-management/src/client/Render/ExecuteTasks/index.tsx +++ b/packages/builtin-tool-group-management/src/client/Render/ExecuteTasks/index.tsx @@ -2,11 +2,9 @@ import { DEFAULT_AVATAR } from '@lobechat/const'; import type { AgentGroupMember, BuiltinRenderProps } from '@lobechat/types'; -import { Avatar, Flexbox, Text } from '@lobehub/ui'; +import { Accordion, AccordionItem, Avatar, Block, Flexbox, Text } from '@lobehub/ui'; import { createStaticStyles, useTheme } from 'antd-style'; -import { Clock } from 'lucide-react'; import { memo, useMemo } from 'react'; -import { useTranslation } from 'react-i18next'; import { useAgentGroupStore } from '@/store/agentGroup'; import { agentGroupSelectors } from '@/store/agentGroup/selectors'; @@ -14,40 +12,45 @@ import { agentGroupSelectors } from '@/store/agentGroup/selectors'; import type { ExecuteTasksParams } from '../../../types'; const styles = createStaticStyles(({ css, cssVar }) => ({ - container: css` + assignee: css` display: flex; - flex-direction: column; - gap: 12px; - padding-block: 12px; - `, - taskCard: css` - padding: 12px; - border-radius: 8px; - background: ${cssVar.colorFillQuaternary}; - `, - taskContent: css` - padding-block: 8px; - padding-inline: 12px; - border-radius: ${cssVar.borderRadius}; - background: ${cssVar.colorFillTertiary}; - `, - taskHeader: css` - font-size: 13px; - font-weight: 500; - color: ${cssVar.colorText}; - `, - timeout: css` + flex-shrink: 0; + gap: 6px; + align-items: center; + font-size: 12px; - color: ${cssVar.colorTextTertiary}; + color: ${cssVar.colorTextSecondary}; + `, + container: css` + .accordion-action { + margin-inline-end: 8px; + opacity: 1 !important; + } + `, + index: css` + flex-shrink: 0; + font-size: 12px; + color: ${cssVar.colorTextQuaternary}; + `, + instruction: css` + font-size: 13px; + line-height: 1.6; + color: ${cssVar.colorTextSecondary}; + `, + + taskTitle: css` + overflow: hidden; + font-size: 14px; + text-overflow: ellipsis; + white-space: nowrap; `, })); /** * ExecuteTasks Render component for Group Management tool - * Read-only display of multiple task execution requests + * Accordion-style task list with expandable instruction and assignee on the right */ const ExecuteTasksRender = memo>(({ args }) => { - const { t } = useTranslation('tool'); const theme = useTheme(); const { tasks } = args || {}; @@ -69,46 +72,41 @@ const ExecuteTasksRender = memo>(({ args if (!tasksWithAgents.length) return null; return ( -
- {tasksWithAgents.map((task, index) => { - const timeoutMinutes = task.timeout ? Math.round(task.timeout / 60_000) : 30; - - return ( -
- - {/* Header: Agent info + Timeout */} - - - - - {task.title || task.agent?.title || 'Task'} - - - - - - {timeoutMinutes}{' '} - {t('agentGroupManagement.executeTask.intervention.timeoutUnit')} - - - - - {/* Task content (read-only) */} - {task.instruction && ( - - {task.instruction} - - )} + + {tasksWithAgents.map((task, index) => ( + + + {task.agent?.title} +
+ } + itemKey={task.agentId || String(index)} + key={task.agentId || index} + paddingBlock={8} + paddingInline={4} + title={ + + {index + 1}. + + {task.title || 'Task'} + -
- ); - })} - + } + > + {task.instruction && ( + + {task.instruction} + + )} + + ))} + ); }); diff --git a/packages/builtin-tool-group-management/src/client/Streaming/ExecuteTask/index.tsx b/packages/builtin-tool-group-management/src/client/Streaming/ExecuteTask/index.tsx index 17afba152b..6547658299 100644 --- a/packages/builtin-tool-group-management/src/client/Streaming/ExecuteTask/index.tsx +++ b/packages/builtin-tool-group-management/src/client/Streaming/ExecuteTask/index.tsx @@ -29,7 +29,7 @@ const styles = createStaticStyles(({ css, cssVar }) => ({ })); export const ExecuteTaskStreaming = memo>(({ args }) => { - const { agentId, task } = args || {}; + const { agentId, instruction } = args || {}; const theme = useTheme(); // Get active group ID and agent from store @@ -40,7 +40,7 @@ export const ExecuteTaskStreaming = memo @@ -56,7 +56,7 @@ export const ExecuteTaskStreaming = memo
- {task} + {instruction}
diff --git a/packages/builtin-tool-group-management/src/executor.test.ts b/packages/builtin-tool-group-management/src/executor.test.ts index f85a56b177..e0210581d5 100644 --- a/packages/builtin-tool-group-management/src/executor.test.ts +++ b/packages/builtin-tool-group-management/src/executor.test.ts @@ -284,7 +284,7 @@ describe('GroupManagementExecutor', () => { const ctx = createMockContext(); const result = await groupManagementExecutor.executeAgentTask( - { agentId: 'agent-1', task: 'Do something', title: 'Test Task' }, + { agentId: 'agent-1', instruction: 'Do something', title: 'Test Task' }, ctx, ); @@ -293,9 +293,8 @@ describe('GroupManagementExecutor', () => { expect(result.content).toBe('Triggered async task for agent "agent-1".'); expect(result.state).toEqual({ agentId: 'agent-1', - task: 'Do something', + instruction: 'Do something', timeout: undefined, - title: 'Test Task', type: 'executeAgentTask', }); }); @@ -320,7 +319,7 @@ describe('GroupManagementExecutor', () => { ); await groupManagementExecutor.executeAgentTask( - { agentId: 'agent-1', task: 'Do something', timeout: 30000, title: 'Test Task' }, + { agentId: 'agent-1', instruction: 'Do something', timeout: 30000, title: 'Test Task' }, ctx, ); @@ -334,8 +333,8 @@ describe('GroupManagementExecutor', () => { // Now triggerExecuteTask should have been called expect(triggerExecuteTask).toHaveBeenCalledWith({ agentId: 'agent-1', + instruction: 'Do something', supervisorAgentId: 'supervisor-agent', - task: 'Do something', timeout: 30000, toolMessageId: 'test-message-id', }); @@ -345,7 +344,7 @@ describe('GroupManagementExecutor', () => { const ctx = createMockContext(); const result = await groupManagementExecutor.executeAgentTask( - { agentId: 'agent-1', task: 'Do something', title: 'Test Task' }, + { agentId: 'agent-1', instruction: 'Do something', title: 'Test Task' }, ctx, ); @@ -357,16 +356,15 @@ describe('GroupManagementExecutor', () => { const ctx = createMockContext(); const result = await groupManagementExecutor.executeAgentTask( - { agentId: 'agent-1', task: 'Do something', timeout: 60000, title: 'Test Task' }, + { agentId: 'agent-1', instruction: 'Do something', timeout: 60000, title: 'Test Task' }, ctx, ); expect(result.success).toBe(true); expect(result.state).toEqual({ agentId: 'agent-1', - task: 'Do something', + instruction: 'Do something', timeout: 60000, - title: 'Test Task', type: 'executeAgentTask', }); }); diff --git a/packages/builtin-tool-group-management/src/executor.ts b/packages/builtin-tool-group-management/src/executor.ts index e165e333f3..bdaddce572 100644 --- a/packages/builtin-tool-group-management/src/executor.ts +++ b/packages/builtin-tool-group-management/src/executor.ts @@ -124,7 +124,7 @@ class GroupManagementExecutor extends BaseExecutor => { - const { agentId, task, timeout, skipCallSupervisor, runInClient } = params; + const { agentId, instruction, timeout, skipCallSupervisor, runInClient } = params; // Register afterCompletion callback to trigger async task execution after AgentRuntime completes // This follows the same pattern as speak/broadcast - trigger mode, not blocking @@ -132,10 +132,10 @@ class GroupManagementExecutor extends BaseExecutor ctx.groupOrchestration!.triggerExecuteTask({ agentId, + instruction, runInClient, skipCallSupervisor, supervisorAgentId: ctx.agentId!, - task, timeout, toolMessageId: ctx.messageId, }), @@ -147,9 +147,9 @@ class GroupManagementExecutor extends BaseExecutor @@ -61,16 +108,40 @@ Before responding, analyze the user's intent: - "What do you think about...", "Any ideas for...", "How should we..." - "Review this...", "Give me feedback on...", "Critique..." - "Explain...", "Compare...", "Summarize..." -- Requests for opinions, perspectives, or expertise-based answers -- Questions that benefit from diverse viewpoints +- Requests for **quick opinions or perspectives based on existing knowledge** +- Questions that benefit from diverse viewpoints **without requiring research or investigation** + +⚠️ **NOT broadcast** (use executeAgentTask/executeAgentTasks instead): +- "Research...", "Investigate...", "Analyze in depth..." - these require actual work, not just opinions +- "Everyone research/investigate..." - this means each agent should do research work, not just share opinions **Signals for Single Agent (speak):** - Explicit request: "Ask [Agent Name] to...", "Let [Agent Name] answer..." - Follow-up to a specific agent's previous response - Task clearly matches only one agent's expertise +**Signals for Single Task Execution (executeAgentTask):** +- Complex multi-step work: "Develop a...", "Design and implement...", "Create a complete..." +- Extended creation: "Write a full...", "Generate a comprehensive...", "Build an entire..." +- Deep research: "Do thorough research on...", "Investigate in depth...", "Analyze extensively..." +- Time-intensive requests: Tasks that clearly need extended processing time + +**Signals for Parallel Task Execution (executeAgentTasks):** +- Distributed work: "Have multiple agents work on...", "Split this into parallel tasks..." +- Multi-aspect implementation: "Build the frontend and backend...", "Create X, Y, and Z components..." +- Speed-critical requests: "Get this done as fast as possible by having agents work in parallel" +- Independent subtasks: When the problem can be decomposed into non-dependent parts +- Batch processing: "Do X for each of these: A, B, C...", "Research these 3 competitors...", "Write posts about these topics..." +- **Parallel research/investigation**: "Everyone investigate...", "Each of you research...", "All of you look into..." - when multiple agents need to do actual research work and provide findings + **Default Behavior:** - When in doubt about single vs multiple agents → Lean towards broadcast for diverse perspectives +- When task involves extended, multi-step work → Use executeAgentTask for single agent, executeAgentTasks for parallel work + +**Key Distinction - Opinion vs Research:** +- "Give opinions/thoughts/feedback" → broadcast (quick response from knowledge) +- "Research/investigate/analyze" → executeAgentTask/executeAgentTasks (requires actual work) +- Even if user says "give conclusions", if the task involves research or investigation, use task execution @@ -133,6 +204,10 @@ When a user's request is broad or unclear, ask 1-2 focused questions to understa - **speak**: Single agent responds synchronously in group context - **broadcast**: Multiple agents respond in parallel in group context +**Task Execution:** +- **executeAgentTask**: Assign async task to single agent for extended, multi-step work +- **executeAgentTasks**: Assign multiple async tasks to different agents in parallel + **Flow Control:** - **vote**: Initiate voting among agents @@ -169,6 +244,75 @@ User: "Ask the frontend expert about React performance" Analysis: User explicitly requested specific agent Action: speak to frontend expert with the question \`\`\` + +### Pattern 4: Delegated Task Execution (executeAgentTask) +When a single agent needs extended, multi-step work that benefits from focused execution. + +\`\`\` +User: "Write a complete REST API for user authentication" +Analysis: Complex multi-step task requiring extended work +Action: executeAgentTask to Backend - "Implement REST API for user authentication with JWT tokens, including login, register, and refresh endpoints" +\`\`\` + +\`\`\` +User: "Do thorough research on the latest trends in AI for our product roadmap" +Analysis: Deep research requiring extensive investigation and synthesis +Action: executeAgentTask to Researcher - "Research current AI trends relevant to [product context], compile findings with sources and recommendations" +\`\`\` + +### Pattern 5: Parallel Task Execution (executeAgentTasks) +When multiple tasks can run simultaneously - either by different agents OR the same agent with different instructions. + +**Different agents working on different parts:** +\`\`\` +User: "Build a user dashboard with frontend, backend API, and database schema" +Analysis: Can be split into independent parallel tasks for each agent +Action: executeAgentTasks with: + - Frontend: "Build React dashboard UI with charts and user stats" + - Backend: "Implement REST API endpoints for dashboard data" + - DBA: "Design database schema for user metrics and analytics" +\`\`\` + +**Same agent with different instructions (batch processing):** +\`\`\` +User: "Research these 3 competitors: Company A, Company B, Company C" +Analysis: Same type of task with different inputs - assign to same agent 3 times +Action: executeAgentTasks with: + - Researcher: "Research Company A - analyze their product, pricing, and market position" + - Researcher: "Research Company B - analyze their product, pricing, and market position" + - Researcher: "Research Company C - analyze their product, pricing, and market position" +\`\`\` + +\`\`\` +User: "Write blog posts for each of these 3 topics: AI trends, Cloud computing, DevOps best practices" +Analysis: Same agent can write multiple posts in parallel +Action: executeAgentTasks with: + - Writer: "Write a blog post about AI trends in 2024" + - Writer: "Write a blog post about Cloud computing adoption" + - Writer: "Write a blog post about DevOps best practices" +\`\`\` + +**Multiple agents doing research (NOT broadcast!):** +\`\`\` +User: "Help me research how X is implemented, everyone investigate and give me your conclusions" +Analysis: "research/investigate" means actual work, NOT just opinions. Each agent needs to do research and provide findings. +Action: executeAgentTasks with: + - Developer A: "Research how X implements feature Y, analyze the code structure and patterns" + - Developer B: "Research how X handles Z, document the approach and trade-offs" + - Developer C: "Research X's architecture for W, summarize key design decisions" +⚠️ DO NOT use broadcast - "research/investigate" requires investigation work, not quick opinions! +\`\`\` + +### Pattern 6: Hybrid Workflow (Discuss then Execute) +When you need input before execution. + +\`\`\` +User: "Help me build a dashboard for analytics" +Analysis: Benefits from initial discussion, then requires implementation +Action: +1. broadcast to [Designer, Frontend, Data] - "What key metrics and layout should this analytics dashboard include?" +2. After consensus → executeAgentTask to Frontend - "Implement dashboard based on discussed requirements" +\`\`\` @@ -176,6 +320,10 @@ Action: speak to frontend expert with the question - speak: \`agentId\`, \`instruction\` (optional guidance) - broadcast: \`agentIds\` (array), \`instruction\` (optional shared guidance) +**Task Execution:** +- executeAgentTask: \`agentId\`, \`title\` (brief UI label), \`task\` (detailed instructions with expected deliverables), \`timeout\` (optional, default 30min) +- executeAgentTasks: \`tasks\` (array of {agentId, title, task, timeout?}), \`skipCallSupervisor\` (optional) + **Flow Control:** - vote: \`question\`, \`options\` (array of {id, label, description}), \`voterAgentIds\` (optional), \`requireReasoning\` (default true) diff --git a/packages/builtin-tool-group-management/src/types.ts b/packages/builtin-tool-group-management/src/types.ts index 9d7ba2e368..71bca0b1fc 100644 --- a/packages/builtin-tool-group-management/src/types.ts +++ b/packages/builtin-tool-group-management/src/types.ts @@ -72,6 +72,8 @@ export interface DelegateParams { export interface ExecuteTaskParams { agentId: string; + /** Clear instruction describing the task to perform */ + instruction: string; /** * Whether to run on the desktop client (for local file/shell access). * MUST be true when task requires local-system tools. Default is false (server execution). @@ -83,7 +85,6 @@ export interface ExecuteTaskParams { * Use this when the task is the final action needed. */ skipCallSupervisor?: boolean; - task: string; timeout?: number; /** Brief title describing what this task does (shown in UI) */ title: string; @@ -92,7 +93,7 @@ export interface ExecuteTaskParams { export interface TaskItem { /** The ID of the agent to execute this task */ agentId: string; - /** Detailed instruction/prompt for the task execution */ + /** Detailed instruction for the agent to execute */ instruction: string; /** Optional timeout in milliseconds for this specific task */ timeout?: number; diff --git a/packages/builtin-tool-gtd/src/systemRole.ts b/packages/builtin-tool-gtd/src/systemRole.ts index 9cdfb2ae14..e52f0cfa3b 100644 --- a/packages/builtin-tool-gtd/src/systemRole.ts +++ b/packages/builtin-tool-gtd/src/systemRole.ts @@ -38,8 +38,8 @@ export const systemPrompt = `You have GTD (Getting Things Done) tools to help ma **Planning Tools** - For high-level goal documentation: -- \`createPlan\`: Create a strategic plan document with goal and context -- \`updatePlan\`: Update plan details +- \`createPlan\`: Create a strategic plan document. **Required params: goal, description (brief summary), context** - all three must be provided +- \`updatePlan\`: Update plan details (only planId is required) **Todo Tools** - For actionable execution items: - \`createTodos\`: Create new todo items from text array @@ -49,8 +49,8 @@ export const systemPrompt = `You have GTD (Getting Things Done) tools to help ma **Todo Status Workflow:** todo → processing → completed (use "processing" when actively working on an item) **Async Task Tools** - For long-running background tasks: -- \`execTask\`: Execute a single async task in isolated context -- \`execTasks\`: Execute multiple async tasks in parallel +- \`execTask\`: Execute a single async task. **Required params: description (brief UI label), instruction (detailed prompt)** - both must be provided +- \`execTasks\`: Execute multiple async tasks in parallel. Each task requires **description** and **instruction** diff --git a/packages/context-engine/src/processors/TasksFlatten.ts b/packages/context-engine/src/processors/TasksFlatten.ts index 9de858a3f3..384496fb9d 100644 --- a/packages/context-engine/src/processors/TasksFlatten.ts +++ b/packages/context-engine/src/processors/TasksFlatten.ts @@ -7,9 +7,11 @@ const log = debug('context-engine:processor:TasksFlattenProcessor'); /** * Tasks Flatten Processor - * Responsible for flattening role=tasks messages into individual task messages + * Responsible for flattening role=tasks and role=groupTasks messages into individual task messages + * + * - tasks: Multiple task messages with same agentId aggregated + * - groupTasks: Multiple task messages with different agentIds aggregated * - * Tasks messages are created when multiple task messages with the same parentId are aggregated. * This processor converts them back to individual task messages that can be processed by TaskMessageProcessor. */ export class TasksFlattenProcessor extends BaseProcessor { @@ -30,8 +32,8 @@ export class TasksFlattenProcessor extends BaseProcessor { // Process each message for (const message of clonedContext.messages) { - // Check if this is a tasks message with tasks field - if (message.role === 'tasks' && message.tasks) { + // Check if this is a tasks or groupTasks message with tasks field + if (['tasks', 'groupTasks'].includes(message.role) && message.tasks) { // If tasks array is empty, skip this message entirely (no content to flatten) if (message.tasks.length === 0) { continue; @@ -40,7 +42,7 @@ export class TasksFlattenProcessor extends BaseProcessor { processedCount++; tasksMessagesFlattened++; - log(`Flattening tasks message ${message.id} with ${message.tasks.length} tasks`); + log(`Flattening ${message.role} message ${message.id} with ${message.tasks.length} tasks`); // Flatten each task for (const task of message.tasks) { diff --git a/packages/context-engine/src/processors/__tests__/TasksFlatten.test.ts b/packages/context-engine/src/processors/__tests__/TasksFlatten.test.ts index 3eaad8cb8f..87987f6758 100644 --- a/packages/context-engine/src/processors/__tests__/TasksFlatten.test.ts +++ b/packages/context-engine/src/processors/__tests__/TasksFlatten.test.ts @@ -188,4 +188,168 @@ describe('TasksFlattenProcessor', () => { expect(result.metadata.tasksMessagesFlattened).toBe(1); expect(result.metadata.taskMessagesCreated).toBe(2); }); + + describe('groupTasks handling', () => { + it('should flatten groupTasks message into individual task messages', async () => { + const processor = new TasksFlattenProcessor(); + + const context = { + messages: [ + { + content: 'Hello', + id: 'msg-1', + role: 'user', + }, + { + id: 'groupTasks-1', + role: 'groupTasks', + tasks: [ + { + agentId: 'agent-1', + content: 'Task 1 result', + id: 'task-1', + metadata: { instruction: 'Do task 1' }, + role: 'task', + }, + { + agentId: 'agent-2', + content: 'Task 2 result', + id: 'task-2', + metadata: { instruction: 'Do task 2' }, + role: 'task', + }, + { + agentId: 'agent-3', + content: 'Task 3 result', + id: 'task-3', + metadata: { instruction: 'Do task 3' }, + role: 'task', + }, + ], + }, + ], + metadata: {}, + stats: { startTime: Date.now() }, + }; + + const result = await processor.process(context as any); + + // Should have 4 messages: 1 user + 3 flattened tasks + expect(result.messages).toHaveLength(4); + expect(result.messages[0].role).toBe('user'); + expect(result.messages[1].role).toBe('task'); + expect(result.messages[1].id).toBe('task-1'); + expect(result.messages[1].agentId).toBe('agent-1'); + expect(result.messages[2].role).toBe('task'); + expect(result.messages[2].id).toBe('task-2'); + expect(result.messages[2].agentId).toBe('agent-2'); + expect(result.messages[3].role).toBe('task'); + expect(result.messages[3].id).toBe('task-3'); + expect(result.messages[3].agentId).toBe('agent-3'); + }); + + it('should skip empty groupTasks array', async () => { + const processor = new TasksFlattenProcessor(); + + const context = { + messages: [ + { + content: 'Hello', + id: 'msg-1', + role: 'user', + }, + { + id: 'groupTasks-1', + role: 'groupTasks', + tasks: [], + }, + ], + metadata: {}, + stats: { startTime: Date.now() }, + }; + + const result = await processor.process(context as any); + + // Should only have user message, empty groupTasks should be skipped + expect(result.messages).toHaveLength(1); + expect(result.messages[0].role).toBe('user'); + }); + + it('should preserve parent message references for groupTasks', async () => { + const processor = new TasksFlattenProcessor(); + + const context = { + messages: [ + { + groupId: 'group-1', + id: 'groupTasks-1', + parentId: 'parent-1', + role: 'groupTasks', + tasks: [ + { + agentId: 'agent-1', + content: 'Task result', + id: 'task-1', + role: 'task', + }, + ], + threadId: 'thread-1', + topicId: 'topic-1', + }, + ], + metadata: {}, + stats: { startTime: Date.now() }, + }; + + const result = await processor.process(context as any); + + expect(result.messages).toHaveLength(1); + expect(result.messages[0].parentId).toBe('parent-1'); + expect(result.messages[0].threadId).toBe('thread-1'); + expect(result.messages[0].groupId).toBe('group-1'); + expect(result.messages[0].topicId).toBe('topic-1'); + }); + + it('should handle mixed tasks and groupTasks messages', async () => { + const processor = new TasksFlattenProcessor(); + + const context = { + messages: [ + { + id: 'tasks-1', + role: 'tasks', + tasks: [{ content: 'Task A', id: 'task-a', role: 'task' }], + }, + { + content: 'User message', + id: 'msg-1', + role: 'user', + }, + { + id: 'groupTasks-1', + role: 'groupTasks', + tasks: [ + { agentId: 'agent-1', content: 'Task B', id: 'task-b', role: 'task' }, + { agentId: 'agent-2', content: 'Task C', id: 'task-c', role: 'task' }, + ], + }, + ], + metadata: {}, + stats: { startTime: Date.now() }, + }; + + const result = await processor.process(context as any); + + // Should have 4 messages: 1 from tasks + 1 user + 2 from groupTasks + expect(result.messages).toHaveLength(4); + expect(result.messages[0].id).toBe('task-a'); + expect(result.messages[1].id).toBe('msg-1'); + expect(result.messages[2].id).toBe('task-b'); + expect(result.messages[3].id).toBe('task-c'); + + // Metadata should count both + expect(result.metadata.tasksMessagesFlattened).toBe(2); + expect(result.metadata.taskMessagesCreated).toBe(3); + }); + }); }); diff --git a/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/index.ts b/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/index.ts index f53437750f..84b0b9b36e 100644 --- a/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/index.ts +++ b/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/index.ts @@ -1,6 +1,10 @@ import type { Message } from '../../../../types'; import speakDifferentAgent from './speak-different-agent.json'; +import supervisorAfterMultiTasks from './supervisor-after-multi-tasks.json'; +import supervisorContentOnly from './supervisor-content-only.json'; export const agentGroup = { speakDifferentAgent: speakDifferentAgent as Message[], + supervisorAfterMultiTasks: supervisorAfterMultiTasks as Message[], + supervisorContentOnly: supervisorContentOnly as Message[], }; diff --git a/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/supervisor-after-multi-tasks.json b/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/supervisor-after-multi-tasks.json new file mode 100644 index 0000000000..570b3c9764 --- /dev/null +++ b/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/supervisor-after-multi-tasks.json @@ -0,0 +1,91 @@ +[ + { + "id": "msg-user-1", + "role": "user", + "content": "帮我调研下 clawdbot", + "parentId": null, + "createdAt": 1704067200000, + "updatedAt": 1704067200000 + }, + { + "id": "msg-supervisor-1", + "role": "assistant", + "agentId": "supervisor", + "content": "我会安排团队成员进行调研。", + "parentId": "msg-user-1", + "tools": [ + { + "id": "call_tasks_1", + "type": "builtin", + "apiName": "executeAgentTasks", + "arguments": "{\"tasks\":[{\"agentId\":\"agent-1\",\"title\":\"任务1\"},{\"agentId\":\"agent-2\",\"title\":\"任务2\"}]}", + "identifier": "lobe-group-management" + } + ], + "createdAt": 1704067201000, + "updatedAt": 1704067201000, + "metadata": { + "isSupervisor": true + } + }, + { + "id": "msg-tool-1", + "role": "tool", + "content": "Triggered 2 tasks.", + "parentId": "msg-supervisor-1", + "tool_call_id": "call_tasks_1", + "createdAt": 1704067202000, + "updatedAt": 1704067202000 + }, + { + "id": "msg-task-1", + "role": "task", + "content": "调研结果1...", + "parentId": "msg-tool-1", + "agentId": "agent-1", + "createdAt": 1704067203000, + "updatedAt": 1704067203000, + "metadata": { + "taskTitle": "任务1" + }, + "taskDetail": { + "status": "completed", + "threadId": "thd_xxx1", + "title": "任务1" + } + }, + { + "id": "msg-task-2", + "role": "task", + "content": "调研结果2...", + "parentId": "msg-tool-1", + "agentId": "agent-2", + "createdAt": 1704067204000, + "updatedAt": 1704067204000, + "metadata": { + "taskTitle": "任务2" + }, + "taskDetail": { + "status": "completed", + "threadId": "thd_xxx2", + "title": "任务2" + } + }, + { + "id": "msg-supervisor-summary", + "role": "assistant", + "agentId": "supervisor", + "content": "调研完成!这是综合汇总报告...", + "parentId": "msg-task-2", + "tools": null, + "createdAt": 1704067210000, + "updatedAt": 1704067210000, + "metadata": { + "isSupervisor": true, + "tps": 78.99, + "cost": 0.208 + }, + "model": "claude-sonnet-4", + "provider": "anthropic" + } +] diff --git a/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/supervisor-content-only.json b/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/supervisor-content-only.json new file mode 100644 index 0000000000..c8f84bb653 --- /dev/null +++ b/packages/conversation-flow/src/__tests__/fixtures/inputs/agentGroup/supervisor-content-only.json @@ -0,0 +1,74 @@ +[ + { + "id": "msg-user-1", + "role": "user", + "content": "帮我调研下 clawdbot", + "parentId": null, + "createdAt": 1704067200000, + "updatedAt": 1704067200000 + }, + { + "id": "msg-supervisor-1", + "role": "assistant", + "agentId": "supervisor", + "content": "我会安排团队成员进行调研。", + "parentId": "msg-user-1", + "tools": [ + { + "id": "call_tasks_1", + "type": "builtin", + "apiName": "executeAgentTasks", + "arguments": "{\"tasks\":[{\"agentId\":\"agent-1\",\"title\":\"调研任务\"}]}", + "identifier": "lobe-group-management" + } + ], + "createdAt": 1704067201000, + "updatedAt": 1704067201000, + "metadata": { + "isSupervisor": true + } + }, + { + "id": "msg-tool-1", + "role": "tool", + "content": "Triggered 1 task.", + "parentId": "msg-supervisor-1", + "tool_call_id": "call_tasks_1", + "createdAt": 1704067202000, + "updatedAt": 1704067202000 + }, + { + "id": "msg-task-1", + "role": "task", + "content": "调研结果内容...", + "parentId": "msg-tool-1", + "agentId": "agent-1", + "createdAt": 1704067203000, + "updatedAt": 1704067203000, + "metadata": { + "taskTitle": "调研任务" + }, + "taskDetail": { + "status": "completed", + "threadId": "thd_xxx", + "title": "调研任务" + } + }, + { + "id": "msg-supervisor-summary", + "role": "assistant", + "agentId": "supervisor", + "content": "调研完成!这是综合汇总报告...", + "parentId": "msg-task-1", + "tools": null, + "createdAt": 1704067210000, + "updatedAt": 1704067210000, + "metadata": { + "isSupervisor": true, + "tps": 78.99, + "cost": 0.208 + }, + "model": "claude-sonnet-4", + "provider": "anthropic" + } +] diff --git a/packages/conversation-flow/src/__tests__/parse.test.ts b/packages/conversation-flow/src/__tests__/parse.test.ts index edbd96ddd1..caedaf6ad3 100644 --- a/packages/conversation-flow/src/__tests__/parse.test.ts +++ b/packages/conversation-flow/src/__tests__/parse.test.ts @@ -153,6 +153,43 @@ describe('parse', () => { expect(serializeParseResult(result)).toEqual(outputs.agentGroup.speakDifferentAgent); }); + + it('should handle supervisor content-only message (no tools)', () => { + const result = parse(inputs.agentGroup.supervisorContentOnly); + + // The critical assertions: + // 1. The final supervisor message (content-only, no tools) should be transformed to role='supervisor' + // 2. Its content should be moved to children array + const supervisorSummary = result.flatList.find((m) => m.id === 'msg-supervisor-summary'); + expect(supervisorSummary).toBeDefined(); + expect(supervisorSummary?.role).toBe('supervisor'); + expect((supervisorSummary as any)?.children).toHaveLength(1); + expect((supervisorSummary as any)?.children[0].content).toBe('调研完成!这是综合汇总报告...'); + // The top-level content should be empty + expect(supervisorSummary?.content).toBe(''); + }); + + it('should handle supervisor summary after multiple tasks (content folded into children)', () => { + const result = parse(inputs.agentGroup.supervisorAfterMultiTasks); + + // The critical assertions: + // 1. flatList should have: user, supervisor(+tool), groupTasks(2 tasks), supervisor-summary + expect(result.flatList).toHaveLength(4); + expect(result.flatList[0].role).toBe('user'); + expect(result.flatList[1].role).toBe('supervisor'); + expect(result.flatList[2].role).toBe('groupTasks'); + expect(result.flatList[3].role).toBe('supervisor'); + + // 2. groupTasks should have 2 tasks + expect((result.flatList[2] as any).tasks).toHaveLength(2); + + // 3. The supervisor summary (no tools) should have content folded into children + const supervisorSummary = result.flatList[3]; + expect(supervisorSummary.id).toBe('msg-supervisor-summary'); + expect(supervisorSummary.content).toBe(''); // content should be empty + expect((supervisorSummary as any).children).toHaveLength(1); + expect((supervisorSummary as any).children[0].content).toBe('调研完成!这是综合汇总报告...'); + }); }); describe('Tasks Aggregation', () => { diff --git a/packages/conversation-flow/src/transformation/FlatListBuilder.ts b/packages/conversation-flow/src/transformation/FlatListBuilder.ts index 0caebdcd41..6d8e36460c 100644 --- a/packages/conversation-flow/src/transformation/FlatListBuilder.ts +++ b/packages/conversation-flow/src/transformation/FlatListBuilder.ts @@ -98,8 +98,16 @@ export class FlatListBuilder { return child?.role !== 'task'; }); - // Create tasks virtual message - const tasksMessage = this.createTasksMessage(parentMessage, taskChildren, processedIds); + // Check if tasks have different agentIds (groupTasks) or same agentId (tasks) + const taskAgentIds = new Set( + taskChildren.map((childId) => this.messageMap.get(childId)?.agentId).filter(Boolean), + ); + const isGroupTasks = taskAgentIds.size > 1; + + // Create appropriate virtual message based on agent diversity + const tasksMessage = isGroupTasks + ? this.createGroupTasksMessage(parentMessage, taskChildren, processedIds) + : this.createTasksMessage(parentMessage, taskChildren, processedIds); flatList.push(tasksMessage); // Continue with non-task children (e.g., final summary from assistant) @@ -136,8 +144,18 @@ export class FlatListBuilder { taskGrandchild.tools && taskGrandchild.tools.length > 0 ) { - this.processAssistantGroup( - taskGrandchild, + this.processAssistantGroup(taskGrandchild, flatList, processedIds, allMessages); + } else if ( + // Check if it's a supervisor message without tools (content-only) + taskGrandchild.role === 'assistant' && + taskGrandchild.metadata?.isSupervisor && + (!taskGrandchild.tools || taskGrandchild.tools.length === 0) + ) { + const supervisorMessage = this.createSupervisorContentMessage(taskGrandchild); + flatList.push(supervisorMessage); + processedIds.add(taskGrandchildId); + this.buildFlatListRecursive( + taskGrandchildId, flatList, processedIds, allMessages, @@ -1037,4 +1055,52 @@ export class FlatListBuilder { updatedAt, } as Message; } + + /** + * Create a virtual groupTasks message for multiple tasks with different agentIds + */ + private createGroupTasksMessage( + parentMessage: Message, + taskChildIds: string[], + processedIds: Set, + ): Message { + const taskMessages: Message[] = []; + + for (const taskId of taskChildIds) { + const taskMessage = this.messageMap.get(taskId); + if (taskMessage) { + taskMessages.push(taskMessage); + processedIds.add(taskId); + } + } + + // Sort by createdAt to maintain order + taskMessages.sort((a, b) => a.createdAt - b.createdAt); + + // Generate ID with parent message id and all task message ids + const taskIdsStr = taskMessages.map((t) => t.id).join('-'); + const groupTasksId = `groupTasks-${parentMessage.id}-${taskIdsStr}`; + + // Calculate timestamps from task messages + const createdAt = + taskMessages.length > 0 + ? Math.min(...taskMessages.map((m) => m.createdAt)) + : parentMessage.createdAt; + const updatedAt = + taskMessages.length > 0 + ? Math.max(...taskMessages.map((m) => m.updatedAt)) + : parentMessage.updatedAt; + + return { + content: '', + createdAt, + extra: { + parentMessageId: parentMessage.id, + }, + id: groupTasksId, + role: 'groupTasks' as any, + tasks: taskMessages as any, + updatedAt, + } as Message; + } } diff --git a/packages/conversation-flow/src/transformation/__tests__/FlatListBuilder.test.ts b/packages/conversation-flow/src/transformation/__tests__/FlatListBuilder.test.ts index 97906045ad..af84ef5fd6 100644 --- a/packages/conversation-flow/src/transformation/__tests__/FlatListBuilder.test.ts +++ b/packages/conversation-flow/src/transformation/__tests__/FlatListBuilder.test.ts @@ -557,5 +557,152 @@ describe('FlatListBuilder', () => { expect(result[1].id).toBe('msg-2'); expect(result[2].id).toBe('msg-3'); }); + + it('should create tasks message when multiple tasks have same agentId', () => { + const messages: Message[] = [ + { + content: 'User request', + createdAt: 0, + id: 'msg-1', + role: 'user', + updatedAt: 0, + }, + { + content: 'Tool message', + createdAt: 0, + id: 'tool-1', + parentId: 'msg-1', + role: 'tool', + updatedAt: 0, + }, + { + agentId: 'agent-1', + content: 'Task 1 result', + createdAt: 1, + id: 'task-1', + parentId: 'tool-1', + role: 'task', + updatedAt: 1, + }, + { + agentId: 'agent-1', + content: 'Task 2 result', + createdAt: 2, + id: 'task-2', + parentId: 'tool-1', + role: 'task', + updatedAt: 2, + }, + ]; + + const builder = createBuilder(messages); + const result = builder.flatten(messages); + + // Should create tasks (not groupTasks) since all tasks have same agentId + expect(result).toHaveLength(3); + expect(result[0].id).toBe('msg-1'); + expect(result[1].id).toBe('tool-1'); + expect(result[2].role).toBe('tasks'); + expect((result[2] as any).tasks).toHaveLength(2); + }); + + it('should create groupTasks message when multiple tasks have different agentIds', () => { + const messages: Message[] = [ + { + content: 'User request', + createdAt: 0, + id: 'msg-1', + role: 'user', + updatedAt: 0, + }, + { + content: 'Tool message', + createdAt: 0, + id: 'tool-1', + parentId: 'msg-1', + role: 'tool', + updatedAt: 0, + }, + { + agentId: 'agent-1', + content: 'Task 1 result', + createdAt: 1, + id: 'task-1', + parentId: 'tool-1', + role: 'task', + updatedAt: 1, + }, + { + agentId: 'agent-2', + content: 'Task 2 result', + createdAt: 2, + id: 'task-2', + parentId: 'tool-1', + role: 'task', + updatedAt: 2, + }, + { + agentId: 'agent-3', + content: 'Task 3 result', + createdAt: 3, + id: 'task-3', + parentId: 'tool-1', + role: 'task', + updatedAt: 3, + }, + ]; + + const builder = createBuilder(messages); + const result = builder.flatten(messages); + + // Should create groupTasks since tasks have different agentIds + expect(result).toHaveLength(3); + expect(result[0].id).toBe('msg-1'); + expect(result[1].id).toBe('tool-1'); + expect(result[2].role).toBe('groupTasks'); + expect((result[2] as any).tasks).toHaveLength(3); + // Verify ID format + expect(result[2].id).toContain('groupTasks-'); + }); + + it('should create groupTasks with correct timestamps from task messages', () => { + const messages: Message[] = [ + { + content: 'Tool message', + createdAt: 0, + id: 'tool-1', + role: 'tool', + updatedAt: 0, + }, + { + agentId: 'agent-1', + content: 'Task 1', + createdAt: 100, + id: 'task-1', + parentId: 'tool-1', + role: 'task', + updatedAt: 150, + }, + { + agentId: 'agent-2', + content: 'Task 2', + createdAt: 200, + id: 'task-2', + parentId: 'tool-1', + role: 'task', + updatedAt: 300, + }, + ]; + + const builder = createBuilder(messages); + const result = builder.flatten(messages); + + const groupTasksMsg = result.find((m) => m.role === 'groupTasks'); + expect(groupTasksMsg).toBeDefined(); + // createdAt should be min of task createdAt + expect(groupTasksMsg!.createdAt).toBe(100); + // updatedAt should be max of task updatedAt + expect(groupTasksMsg!.updatedAt).toBe(300); + }); }); }); diff --git a/packages/types/src/message/ui/chat.ts b/packages/types/src/message/ui/chat.ts index 30fdfb746b..2e088d1d1a 100644 --- a/packages/types/src/message/ui/chat.ts +++ b/packages/types/src/message/ui/chat.ts @@ -25,6 +25,7 @@ export type UIMessageRoleType = | 'tool' | 'task' | 'tasks' + | 'groupTasks' | 'supervisor' | 'assistantGroup' | 'agentCouncil' @@ -185,6 +186,7 @@ export interface UIChatMessage { /** * Task messages for role='tasks' virtual message * Contains aggregated task messages with same parentId + * Also used to store task execution messages (intermediate steps) from polling */ tasks?: UIChatMessage[]; threadId?: string | null; diff --git a/packages/types/src/tool/builtin.ts b/packages/types/src/tool/builtin.ts index 0614c8e4ca..241f71b17d 100644 --- a/packages/types/src/tool/builtin.ts +++ b/packages/types/src/tool/builtin.ts @@ -433,6 +433,10 @@ export interface TriggerExecuteTaskParams extends GroupOrchestrationBaseParams { * The agent ID to execute the task */ agentId: string; + /** + * The instruction/task description for the agent + */ + instruction: string; /** * Whether to run on the desktop client (for local file/shell access). * MUST be true when task requires local-system tools. Default is false (server execution). @@ -443,10 +447,6 @@ export interface TriggerExecuteTaskParams extends GroupOrchestrationBaseParams { * without calling the supervisor again. */ skipCallSupervisor?: boolean; - /** - * The task description for the agent - */ - task: string; /** * Optional timeout in milliseconds */ @@ -466,7 +466,7 @@ export interface TriggerExecuteTaskItem { */ agentId: string; /** - * Detailed instruction/prompt for the task execution + * Detailed instruction for the agent to execute */ instruction: string; /** diff --git a/src/features/Conversation/ChatItem/components/Title.tsx b/src/features/Conversation/ChatItem/components/Title.tsx index 4b3e0b94dc..a95492932a 100644 --- a/src/features/Conversation/ChatItem/components/Title.tsx +++ b/src/features/Conversation/ChatItem/components/Title.tsx @@ -24,7 +24,7 @@ const Title = memo(({ showTitle, time, avatar, titleAddon }) => { )} {showTitle ? titleAddon : undefined} - {time && ( + {!time ? null : ( (({ disableActionsBar, welcome, itemContent diff --git a/src/features/Conversation/Messages/GroupTasks/TaskItem/ClientTaskItem.tsx b/src/features/Conversation/Messages/GroupTasks/TaskItem/ClientTaskItem.tsx new file mode 100644 index 0000000000..e3994aafca --- /dev/null +++ b/src/features/Conversation/Messages/GroupTasks/TaskItem/ClientTaskItem.tsx @@ -0,0 +1,183 @@ +'use client'; + +import { AccordionItem, Block, Text } from '@lobehub/ui'; +import { memo, useMemo, useState } from 'react'; + +import { useAgentGroupStore } from '@/store/agentGroup'; +import { agentGroupSelectors } from '@/store/agentGroup/selectors'; +import { useChatStore } from '@/store/chat'; +import { displayMessageSelectors } from '@/store/chat/selectors'; +import { messageMapKey } from '@/store/chat/utils/messageMapKey'; +import { ThreadStatus } from '@/types/index'; +import type { UIChatMessage } from '@/types/index'; + +import { + ErrorState, + InitializingState, + TaskMessages, + isProcessingStatus, +} from '../../Tasks/shared'; +import TaskTitle, { type TaskMetrics } from './TaskTitle'; + +interface ClientTaskItemProps { + item: UIChatMessage; +} + +const ClientTaskItem = memo(({ item }) => { + const { id, agentId: itemAgentId, groupId: itemGroupId, metadata, taskDetail } = item; + const [expanded, setExpanded] = useState(false); + + const title = taskDetail?.title || metadata?.taskTitle; + const instruction = metadata?.instruction; + const status = taskDetail?.status; + const threadId = taskDetail?.threadId; + + const isProcessing = isProcessingStatus(status); + const isCompleted = status === ThreadStatus.Completed; + const isError = status === ThreadStatus.Failed || status === ThreadStatus.Cancel; + const isInitializing = !taskDetail || !status; + + // Fetch thread messages for client mode + // Use item's agentId (from task message) to query with the correct SubAgent ID that created the thread + // Fall back to activeAgentId if task message doesn't have agentId (shouldn't happen normally) + const [activeAgentId, activeTopicId, useFetchMessages] = useChatStore((s) => [ + s.activeAgentId, + s.activeTopicId, + s.useFetchMessages, + ]); + + // Use task message's agentId (skip 'supervisor' as it's not a valid agent ID for queries) + // Fall back to activeAgentId if not available + const agentId = itemAgentId && itemAgentId !== 'supervisor' ? itemAgentId : activeAgentId; + + // Get agent info from store for displaying + const activeGroupId = useAgentGroupStore(agentGroupSelectors.activeGroupId); + const agent = useAgentGroupStore((s) => + activeGroupId && itemAgentId + ? agentGroupSelectors.getAgentByIdFromGroup(activeGroupId, itemAgentId)(s) + : null, + ); + + const threadContext = useMemo( + () => ({ + agentId, + groupId: itemGroupId, + scope: 'thread' as const, + threadId, + topicId: activeTopicId, + }), + [agentId, itemGroupId, activeTopicId, threadId], + ); + + const threadMessageKey = useMemo( + () => (threadId ? messageMapKey(threadContext) : null), + [threadId, threadContext], + ); + + // Fetch thread messages (skip when executing - messages come from real-time updates) + useFetchMessages(threadContext, isProcessing); + + // Get thread messages from store using selector + const threadMessages = useChatStore((s) => + threadMessageKey + ? displayMessageSelectors.getDisplayMessagesByKey(threadMessageKey)(s) + : undefined, + ); + + // Find the assistantGroup message which contains the children blocks + const assistantGroupMessage = threadMessages?.find((item) => item.role === 'assistantGroup'); + const blocks = assistantGroupMessage?.children; + const childrenCount = blocks?.length ?? 0; + + // Get model/provider from assistantGroup message + const model = assistantGroupMessage?.model; + const provider = assistantGroupMessage?.provider; + + // Build metrics for TaskTitle based on blocks data + const metrics: TaskMetrics | undefined = useMemo(() => { + if (isProcessing && blocks) { + const toolCalls = blocks.reduce((sum, block) => sum + (block.tools?.length || 0), 0); + return { + isLoading: false, + startTime: assistantGroupMessage?.createdAt, + steps: blocks.length, + toolCalls, + }; + } + if (isCompleted || isError) { + return { + duration: taskDetail?.duration, + steps: taskDetail?.totalSteps, + toolCalls: taskDetail?.totalToolCalls, + }; + } + return undefined; + }, [ + isProcessing, + isCompleted, + isError, + blocks, + assistantGroupMessage?.createdAt, + taskDetail?.duration, + taskDetail?.totalSteps, + taskDetail?.totalToolCalls, + ]); + + // Check if we have blocks to show (for Processing and Completed states) + const hasBlocks = blocks && childrenCount > 0; + + return ( + + } + > + + {instruction && ( + + + {instruction} + + + )} + + {/* Initializing State - no taskDetail yet or no blocks */} + {(isInitializing || (isProcessing && !hasBlocks)) && } + + {/* Processing or Completed State - show blocks via TaskMessages */} + {!isInitializing && (isProcessing || isCompleted) && hasBlocks && threadMessages && ( + + )} + + {/* Error State */} + {!isInitializing && isError && taskDetail && } + + + ); +}, Object.is); + +ClientTaskItem.displayName = 'ClientTaskItem'; + +export default ClientTaskItem; diff --git a/src/features/Conversation/Messages/GroupTasks/TaskItem/ServerTaskItem.tsx b/src/features/Conversation/Messages/GroupTasks/TaskItem/ServerTaskItem.tsx new file mode 100644 index 0000000000..551cc4a9ae --- /dev/null +++ b/src/features/Conversation/Messages/GroupTasks/TaskItem/ServerTaskItem.tsx @@ -0,0 +1,94 @@ +'use client'; + +import { ThreadStatus } from '@lobechat/types'; +import type { UIChatMessage } from '@lobechat/types'; +import { AccordionItem, Block } from '@lobehub/ui'; +import isEqual from 'fast-deep-equal'; +import { memo, useMemo, useState } from 'react'; + +import { useAgentGroupStore } from '@/store/agentGroup'; +import { agentGroupSelectors } from '@/store/agentGroup/selectors'; + +import { TaskContent } from '../../Tasks/shared'; +import TaskTitle, { type TaskMetrics } from './TaskTitle'; + +interface ServerTaskItemProps { + item: UIChatMessage; +} + +const ServerTaskItem = memo(({ item }) => { + const { id, agentId, metadata, taskDetail, tasks } = item; + const [expanded, setExpanded] = useState(false); + + const title = taskDetail?.title || metadata?.taskTitle; + const status = taskDetail?.status; + const threadId = taskDetail?.threadId; + + const isCompleted = status === ThreadStatus.Completed; + const isError = status === ThreadStatus.Failed || status === ThreadStatus.Cancel; + + // Get agent info from store + const activeGroupId = useAgentGroupStore(agentGroupSelectors.activeGroupId); + const agent = useAgentGroupStore((s) => + activeGroupId && agentId + ? agentGroupSelectors.getAgentByIdFromGroup(activeGroupId, agentId)(s) + : null, + ); + + // Build metrics for TaskTitle (only for completed/error states) + const metrics: TaskMetrics | undefined = useMemo(() => { + if (isCompleted || isError) { + return { + duration: taskDetail?.duration, + steps: taskDetail?.totalSteps, + toolCalls: taskDetail?.totalToolCalls, + }; + } + return undefined; + }, [ + isCompleted, + isError, + taskDetail?.duration, + taskDetail?.totalSteps, + taskDetail?.totalToolCalls, + ]); + + return ( + + } + > + + {expanded && ( + + )} + + + ); +}, isEqual); + +ServerTaskItem.displayName = 'ServerTaskItem'; + +export default ServerTaskItem; diff --git a/src/features/Conversation/Messages/GroupTasks/TaskItem/TaskTitle.tsx b/src/features/Conversation/Messages/GroupTasks/TaskItem/TaskTitle.tsx new file mode 100644 index 0000000000..635b356be6 --- /dev/null +++ b/src/features/Conversation/Messages/GroupTasks/TaskItem/TaskTitle.tsx @@ -0,0 +1,177 @@ +'use client'; + +import { ThreadStatus } from '@lobechat/types'; +import { Avatar, Block, Flexbox, Icon, Text } from '@lobehub/ui'; +import { cssVar } from 'antd-style'; +import { Footprints, ListChecksIcon, Wrench, XIcon } from 'lucide-react'; +import { memo, useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +import NeuralNetworkLoading from '@/components/NeuralNetworkLoading'; +import { DEFAULT_AVATAR } from '@/const/meta'; + +import { formatDuration, formatElapsedTime, isProcessingStatus } from '../../Tasks/shared'; + +export interface TaskMetrics { + /** Task duration in milliseconds (for completed tasks) */ + duration?: number; + /** Whether metrics are still loading */ + isLoading?: boolean; + /** Start time timestamp for elapsed time calculation */ + startTime?: number; + /** Number of execution steps/blocks */ + steps?: number; + /** Total tool calls count */ + toolCalls?: number; +} + +interface TaskTitleProps { + /** Agent info for avatar display */ + agent?: { + avatar?: string; + backgroundColor?: string | null; + }; + /** Metrics to display (steps, tool calls, elapsed time) */ + metrics?: TaskMetrics; + status?: ThreadStatus; + title?: string; +} + +const TaskStatusIndicator = memo<{ status?: ThreadStatus }>(({ status }) => { + const isCompleted = status === ThreadStatus.Completed; + const isError = status === ThreadStatus.Failed || status === ThreadStatus.Cancel; + const isProcessing = status ? isProcessingStatus(status) : false; + const isInitializing = !status; + + let icon; + + if (isCompleted) { + icon = ; + } else if (isError) { + icon = ; + } else if (isProcessing || isInitializing) { + icon = ; + } else { + return null; + } + + return ( + + {icon} + + ); +}); + +TaskStatusIndicator.displayName = 'TaskStatusIndicator'; + +interface MetricsDisplayProps { + metrics: TaskMetrics; + status?: ThreadStatus; +} + +const MetricsDisplay = memo(({ metrics, status }) => { + const { t } = useTranslation('chat'); + const { steps, toolCalls, startTime, duration, isLoading } = metrics; + const [elapsedTime, setElapsedTime] = useState(0); + + const isProcessing = status ? isProcessingStatus(status) : false; + + // Calculate initial elapsed time + useEffect(() => { + if (startTime && isProcessing) { + setElapsedTime(Math.max(0, Date.now() - startTime)); + } + }, [startTime, isProcessing]); + + // Timer for updating elapsed time every second (only when processing) + useEffect(() => { + if (!startTime || !isProcessing) return; + + const timer = setInterval(() => { + setElapsedTime(Math.max(0, Date.now() - startTime)); + }, 1000); + + return () => clearInterval(timer); + }, [startTime, isProcessing]); + + // Don't show metrics if loading or no data + if (isLoading) return null; + + const hasSteps = steps !== undefined && steps > 0; + const hasToolCalls = toolCalls !== undefined && toolCalls > 0; + const hasTime = isProcessing ? startTime !== undefined : duration !== undefined; + + // Don't render if no metrics to show + if (!hasSteps && !hasToolCalls && !hasTime) return null; + + return ( + + {/* Steps */} + {hasSteps && ( + + + + {steps} + + + )} + {/* Tool calls */} + {hasToolCalls && ( + + + + {toolCalls} + + + )} + {/* Time */} + {hasTime && ( + + {isProcessing + ? formatElapsedTime(elapsedTime) + : duration + ? t('task.metrics.duration', { duration: formatDuration(duration) }) + : null} + + )} + + ); +}); + +MetricsDisplay.displayName = 'MetricsDisplay'; + +const TaskTitle = memo(({ title, status, metrics, agent }) => { + return ( + + + {agent && ( + + )} + + {title} + + {metrics && } + + ); +}); + +TaskTitle.displayName = 'TaskTitle'; + +export default TaskTitle; diff --git a/src/features/Conversation/Messages/GroupTasks/TaskItem/index.tsx b/src/features/Conversation/Messages/GroupTasks/TaskItem/index.tsx new file mode 100644 index 0000000000..992b1b3b1d --- /dev/null +++ b/src/features/Conversation/Messages/GroupTasks/TaskItem/index.tsx @@ -0,0 +1,26 @@ +'use client'; + +import type { UIChatMessage } from '@lobechat/types'; +import isEqual from 'fast-deep-equal'; +import { memo } from 'react'; + +import ClientTaskItem from './ClientTaskItem'; +import ServerTaskItem from './ServerTaskItem'; + +interface TaskItemProps { + item: UIChatMessage; +} + +const TaskItem = memo(({ item }) => { + const isClientMode = item.taskDetail?.clientMode; + + if (isClientMode) { + return ; + } + + return ; +}, isEqual); + +TaskItem.displayName = 'TaskItem'; + +export default TaskItem; diff --git a/src/features/Conversation/Messages/GroupTasks/TaskItem/useClientTaskStats.ts b/src/features/Conversation/Messages/GroupTasks/TaskItem/useClientTaskStats.ts new file mode 100644 index 0000000000..271bc431d3 --- /dev/null +++ b/src/features/Conversation/Messages/GroupTasks/TaskItem/useClientTaskStats.ts @@ -0,0 +1,93 @@ +'use client'; + +import { useMemo } from 'react'; + +import { useChatStore } from '@/store/chat'; +import { displayMessageSelectors } from '@/store/chat/selectors'; +import { messageMapKey } from '@/store/chat/utils/messageMapKey'; + +export interface ClientTaskStats { + isLoading: boolean; + startTime?: number; + steps: number; + toolCalls: number; +} + +interface UseClientTaskStatsOptions { + /** Agent ID from the task message (use task's agentId, not activeAgentId) */ + agentId?: string; + enabled?: boolean; + /** Group ID from the task message (use task's groupId, not activeGroupId) */ + groupId?: string; + threadId?: string; +} + +/** + * Hook to fetch thread messages and compute task statistics for client mode tasks. + * Used in TaskItem to display progress metrics (steps, tool calls, elapsed time). + */ +export const useClientTaskStats = ({ + agentId: propAgentId, + groupId, + threadId, + enabled = true, +}: UseClientTaskStatsOptions): ClientTaskStats => { + // Use task message's agentId to query with the correct SubAgent ID that created the thread + // Fall back to activeAgentId if not provided + const [activeAgentId, activeTopicId, useFetchMessages] = useChatStore((s) => [ + s.activeAgentId, + s.activeTopicId, + s.useFetchMessages, + ]); + + const agentId = propAgentId || activeAgentId; + + const threadContext = useMemo( + () => ({ + agentId, + groupId, + scope: 'thread' as const, + threadId, + topicId: activeTopicId, + }), + [agentId, groupId, activeTopicId, threadId], + ); + + const threadMessageKey = useMemo( + () => (threadId ? messageMapKey(threadContext) : null), + [threadId, threadContext], + ); + + // Fetch thread messages (skip when disabled or no threadId) + useFetchMessages(threadContext, !enabled || !threadId); + + // Get thread messages from store using selector + const threadMessages = useChatStore((s) => + threadMessageKey + ? displayMessageSelectors.getDisplayMessagesByKey(threadMessageKey)(s) + : undefined, + ); + + // Compute stats from thread messages + return useMemo(() => { + if (!threadMessages || !enabled) { + return { isLoading: true, steps: 0, toolCalls: 0 }; + } + + // Find the assistantGroup message which contains the children blocks + const assistantGroupMessage = threadMessages.find((item) => item.role === 'assistantGroup'); + const blocks = assistantGroupMessage?.children ?? []; + + // Calculate stats + const steps = blocks.length; + const toolCalls = blocks.reduce((sum, block) => sum + (block.tools?.length || 0), 0); + const startTime = assistantGroupMessage?.createdAt; + + return { + isLoading: false, + startTime, + steps, + toolCalls, + }; + }, [threadMessages, enabled]); +}; diff --git a/src/features/Conversation/Messages/GroupTasks/index.tsx b/src/features/Conversation/Messages/GroupTasks/index.tsx new file mode 100644 index 0000000000..3d98485034 --- /dev/null +++ b/src/features/Conversation/Messages/GroupTasks/index.tsx @@ -0,0 +1,151 @@ +'use client'; + +import { type UIChatMessage } from '@lobechat/types'; +import { Block, Flexbox, GroupAvatar, Icon, Tag } from '@lobehub/ui'; +import { cssVar } from 'antd-style'; +import isEqual from 'fast-deep-equal'; +import { ListTodo } from 'lucide-react'; +import { memo, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; + +import { DEFAULT_AVATAR } from '@/const/meta'; +import { useAgentGroupStore } from '@/store/agentGroup'; +import { agentGroupSelectors } from '@/store/agentGroup/selectors'; + +import { ChatItem } from '../../ChatItem'; +import { dataSelectors, useConversationStore } from '../../store'; +import { AssistantActionsBar } from '../Task/Actions'; +import TaskItem from './TaskItem'; + +interface GroupTasksMessageProps { + id: string; + index: number; +} + +/** + * Custom avatar component for GroupTasks + * Shows GroupAvatar (only task agents, no user) with a ListTodo badge + */ +const GroupTasksAvatar = memo<{ avatars: { avatar?: string; background?: string }[] }>( + ({ avatars }) => { + return ( + + ({ + avatar: a.avatar || DEFAULT_AVATAR, + background: a.background, + }))} + cornerShape={'square'} + size={28} + /> + + + + + ); + }, +); + +GroupTasksAvatar.displayName = 'GroupTasksAvatar'; + +const GroupTasksMessage = memo(({ id, index }) => { + const { t } = useTranslation('chat'); + const item = useConversationStore(dataSelectors.getDisplayMessageById(id), isEqual)!; + const actionsConfig = useConversationStore((s) => s.actionsBar?.assistant); + const tasks = (item as UIChatMessage)?.tasks?.filter(Boolean) as UIChatMessage[] | undefined; + + // Get unique agent IDs from tasks + const taskAgentIds = useMemo(() => { + if (!tasks) return []; + const ids = tasks.map((task) => task.agentId).filter(Boolean) as string[]; + return [...new Set(ids)]; + }, [tasks]); + + // Get active group ID + const activeGroupId = useAgentGroupStore(agentGroupSelectors.activeGroupId); + + // Get agent info (avatars and names) for all unique agents in tasks + const taskAgents = useAgentGroupStore((s) => { + if (!activeGroupId || taskAgentIds.length === 0) return []; + return taskAgentIds + .map((agentId) => { + const agent = agentGroupSelectors.getAgentByIdFromGroup(activeGroupId, agentId)(s); + return agent + ? { avatar: agent.avatar, background: agent.backgroundColor, title: agent.title } + : null; + }) + .filter(Boolean) as { avatar?: string; background?: string; title?: string }[]; + }, isEqual); + + // Build title: "Agent1 / Agent2 等 N 个 agents tasks" (show max 2 agents) + const title = useMemo(() => { + const agentNames = taskAgents.map((a) => a.title).filter(Boolean); + if (agentNames.length === 0) return ''; + + const totalAgents = agentNames.length; + // Show at most 2 agent names + const displayedAgents = agentNames.slice(0, 2).join(' / '); + + if (totalAgents <= 2) { + // Show all agent names when 2 or fewer + return t('task.groupTasksTitleSimple', { + agents: displayedAgents, + count: tasks?.length || 0, + }); + } + + // Show "Agent1 / Agent2 等 X 个 agents tasks" when more than 2 + return t('task.groupTasksTitle', { + agents: displayedAgents, + count: totalAgents, + taskCount: tasks?.length || 0, + }); + }, [taskAgents, tasks?.length, t]); + + if (!tasks || tasks.length === 0) { + return null; + } + + const { createdAt } = item; + + return ( + + } + avatar={{ title }} + customAvatarRender={() => } + id={id} + message="" + placement="left" + showTitle + time={createdAt} + titleAddon={{t('task.groupTasks', { count: tasks.length })}} + > + + {tasks.map((task) => ( + + ))} + + + ); +}, isEqual); + +GroupTasksMessage.displayName = 'GroupTasksMessage'; + +export default GroupTasksMessage; diff --git a/src/features/Conversation/Messages/Supervisor/index.tsx b/src/features/Conversation/Messages/Supervisor/index.tsx index 3d436a80a4..7957ea7336 100644 --- a/src/features/Conversation/Messages/Supervisor/index.tsx +++ b/src/features/Conversation/Messages/Supervisor/index.tsx @@ -110,7 +110,13 @@ const GroupMessage = memo(({ id, index, disableEditing, isLat titleAddon={{t('supervisor.label')}} > {children && children.length > 0 && ( - + )} {model && ( diff --git a/src/features/Conversation/Messages/Task/ClientTaskDetail/CompletedState.tsx b/src/features/Conversation/Messages/Task/ClientTaskDetail/CompletedState.tsx deleted file mode 100644 index 2393894d37..0000000000 --- a/src/features/Conversation/Messages/Task/ClientTaskDetail/CompletedState.tsx +++ /dev/null @@ -1,108 +0,0 @@ -'use client'; - -import { type AssistantContentBlock } from '@lobechat/types'; -import { Accordion, AccordionItem, Block, Flexbox, Icon, Text } from '@lobehub/ui'; -import { cssVar } from 'antd-style'; -import { Workflow } from 'lucide-react'; -import { memo, useMemo } from 'react'; -import { useTranslation } from 'react-i18next'; - -import ContentBlock from '../../AssistantGroup/components/ContentBlock'; -import { formatDuration } from '../../Tasks/shared/utils'; -import Usage from '../../components/Extras/Usage'; - -interface CompletedStateProps { - assistantId: string; - blocks: AssistantContentBlock[]; - duration?: number; - model?: string; - provider?: string; - totalCost?: number; - totalTokens?: number; - totalToolCalls?: number; -} - -const CompletedState = memo( - ({ blocks, assistantId, duration, totalToolCalls, model, provider, totalTokens, totalCost }) => { - const { t } = useTranslation('chat'); - - // Split blocks: intermediate steps (all but last) and final result (last) - const { intermediateBlocks, finalBlock } = useMemo(() => { - if (blocks.length === 0) return { finalBlock: null, intermediateBlocks: [] }; - if (blocks.length === 1) return { finalBlock: blocks[0], intermediateBlocks: [] }; - - return { - finalBlock: blocks.at(-1)!, - intermediateBlocks: blocks.slice(0, -1), - }; - }, [blocks]); - - if (!finalBlock) return null; - - const title = ( - - - - - - - {totalToolCalls} - - - {t('task.metrics.toolCallsShort')} - - {/* Duration display */} - {duration && ( - - {t('task.metrics.duration', { duration: formatDuration(duration) })} - - )} - - - ); - - return ( - - {/* Intermediate steps - collapsed by default */} - {intermediateBlocks.length > 0 && ( - - - - {intermediateBlocks.map((block) => ( - - ))} - - - - )} - - {/* Final result - always visible */} - - - {/* Usage display */} - {model && provider && ( - - )} - - ); - }, -); - -CompletedState.displayName = 'ClientCompletedState'; - -export default CompletedState; diff --git a/src/features/Conversation/Messages/Task/ClientTaskDetail/InstructionAccordion.tsx b/src/features/Conversation/Messages/Task/ClientTaskDetail/InstructionAccordion.tsx deleted file mode 100644 index 87269f673a..0000000000 --- a/src/features/Conversation/Messages/Task/ClientTaskDetail/InstructionAccordion.tsx +++ /dev/null @@ -1,63 +0,0 @@ -import { Accordion, AccordionItem, Block, Flexbox, Icon, Markdown, Text } from '@lobehub/ui'; -import { cssVar } from 'antd-style'; -import { ScrollText } from 'lucide-react'; -import { memo, useEffect, useState } from 'react'; -import { useTranslation } from 'react-i18next'; - -interface InstructionAccordionProps { - childrenCount: number; - instruction: string; -} - -const InstructionAccordion = memo(({ instruction, childrenCount }) => { - const { t } = useTranslation('chat'); - - // Auto-collapse instruction when children count exceeds threshold - const [expandedKeys, setExpandedKeys] = useState(['instruction']); - - useEffect(() => { - if (childrenCount > 1) { - setExpandedKeys([]); - } - }, [childrenCount > 1]); - - return ( - setExpandedKeys(keys as string[])} - > - - - - - - {t('task.instruction')} - - - } - > - - {instruction} - - - - ); -}); - -export default InstructionAccordion; diff --git a/src/features/Conversation/Messages/Task/ClientTaskDetail/ProcessingState.tsx b/src/features/Conversation/Messages/Task/ClientTaskDetail/ProcessingState.tsx deleted file mode 100644 index 87e12d149d..0000000000 --- a/src/features/Conversation/Messages/Task/ClientTaskDetail/ProcessingState.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client'; - -import { type AssistantContentBlock } from '@lobechat/types'; -import { Block, Flexbox, ScrollShadow, Text } from '@lobehub/ui'; -import { createStaticStyles } from 'antd-style'; -import { type RefObject, memo, useEffect, useMemo, useState } from 'react'; -import { useTranslation } from 'react-i18next'; - -import NeuralNetworkLoading from '@/components/NeuralNetworkLoading'; -import AnimatedNumber from '@/features/Conversation/Messages/components/Extras/Usage/UsageDetail/AnimatedNumber'; -import { useAutoScroll } from '@/hooks/useAutoScroll'; - -import ContentBlock from '../../AssistantGroup/components/ContentBlock'; -import { accumulateUsage, formatElapsedTime } from '../../Tasks/shared/utils'; -import Usage from '../../components/Extras/Usage'; - -const styles = createStaticStyles(({ css }) => ({ - contentScroll: css` - max-height: min(50vh, 300px); - `, -})); - -interface ProcessingStateProps { - assistantId: string; - blocks: AssistantContentBlock[]; - model?: string; - provider?: string; - startTime?: number; -} - -const ProcessingState = memo( - ({ blocks, assistantId, startTime, model, provider }) => { - const { t } = useTranslation('chat'); - const [elapsedTime, setElapsedTime] = useState(0); - const { ref, handleScroll } = useAutoScroll({ - deps: [blocks], - enabled: true, - }); - - const totalToolCalls = useMemo( - () => blocks.reduce((sum, block) => sum + (block.tools?.length || 0), 0), - [blocks], - ); - - // Accumulate usage from all blocks - const accumulatedUsage = useMemo(() => accumulateUsage(blocks), [blocks]); - - // Calculate initial elapsed time - useEffect(() => { - if (startTime) { - setElapsedTime(Math.max(0, Date.now() - startTime)); - } - }, [startTime]); - - // Timer for updating elapsed time every second - useEffect(() => { - if (!startTime) return; - - const timer = setInterval(() => { - setElapsedTime(Math.max(0, Date.now() - startTime)); - }, 1000); - - return () => clearInterval(timer); - }, [startTime]); - - return ( - - - - - - - - Math.round(v).toString()} - value={totalToolCalls} - /> - - - {t('task.metrics.toolCallsShort')} - - {startTime && ( - - ({formatElapsedTime(elapsedTime)}) - - )} - - - } - size={8} - > - - {blocks.map((block) => ( - - ))} - - - - {/* Usage display */} - {model && provider && } - - ); - }, -); - -ProcessingState.displayName = 'ClientProcessingState'; - -export default ProcessingState; diff --git a/src/features/Conversation/Messages/Task/ClientTaskDetail/index.tsx b/src/features/Conversation/Messages/Task/ClientTaskDetail/index.tsx index f8801ff4da..804aec01bd 100644 --- a/src/features/Conversation/Messages/Task/ClientTaskDetail/index.tsx +++ b/src/features/Conversation/Messages/Task/ClientTaskDetail/index.tsx @@ -1,7 +1,6 @@ 'use client'; import { type TaskDetail, ThreadStatus } from '@lobechat/types'; -import { Flexbox } from '@lobehub/ui'; import { memo, useMemo } from 'react'; import BubblesLoading from '@/components/BubblesLoading'; @@ -9,10 +8,8 @@ import { useChatStore } from '@/store/chat'; import { displayMessageSelectors } from '@/store/chat/selectors'; import { messageMapKey } from '@/store/chat/utils/messageMapKey'; -import CompletedState from './CompletedState'; +import { TaskMessages } from '../../Tasks/shared'; import InitializingState from './InitializingState'; -import InstructionAccordion from './InstructionAccordion'; -import ProcessingState from './ProcessingState'; interface ClientTaskDetailProps { /** Agent ID from the task message (use task's agentId, not activeAgentId) */ @@ -24,92 +21,76 @@ interface ClientTaskDetailProps { taskDetail?: TaskDetail; } -const ClientTaskDetail = memo(({ agentId: propAgentId, groupId, taskDetail }) => { - const threadId = taskDetail?.threadId; - const isExecuting = taskDetail?.status === ThreadStatus.Processing; +const ClientTaskDetail = memo( + ({ agentId: propAgentId, groupId, taskDetail }) => { + const threadId = taskDetail?.threadId; + const isExecuting = taskDetail?.status === ThreadStatus.Processing; - // Use task message's agentId to query with the correct SubAgent ID that created the thread - // Fall back to activeAgentId if task message doesn't have agentId (shouldn't happen normally) - const [activeAgentId, activeTopicId, useFetchMessages] = useChatStore((s) => [ - s.activeAgentId, - s.activeTopicId, - s.useFetchMessages, - ]); + // Use task message's agentId to query with the correct SubAgent ID that created the thread + // Fall back to activeAgentId if task message doesn't have agentId (shouldn't happen normally) + const [activeAgentId, activeTopicId, useFetchMessages] = useChatStore((s) => [ + s.activeAgentId, + s.activeTopicId, + s.useFetchMessages, + ]); - const agentId = propAgentId || activeAgentId; + const agentId = propAgentId || activeAgentId; - const threadContext = useMemo( - () => ({ - agentId, - groupId, - scope: 'thread' as const, - threadId, - topicId: activeTopicId, - }), - [agentId, groupId, activeTopicId, threadId], - ); + const threadContext = useMemo( + () => ({ + agentId, + groupId, + scope: 'thread' as const, + threadId, + topicId: activeTopicId, + }), + [agentId, groupId, activeTopicId, threadId], + ); - const threadMessageKey = useMemo( - () => (threadId ? messageMapKey(threadContext) : null), - [threadId], - ); + const threadMessageKey = useMemo( + () => (threadId ? messageMapKey(threadContext) : null), + [threadId], + ); - // Fetch thread messages (skip when executing - messages come from real-time updates) - useFetchMessages(threadContext, isExecuting); + // Fetch thread messages (skip when executing - messages come from real-time updates) + useFetchMessages(threadContext, isExecuting); - // Get thread messages from store using selector - const threadMessages = useChatStore((s) => - threadMessageKey - ? displayMessageSelectors.getDisplayMessagesByKey(threadMessageKey)(s) - : undefined, - ); + // Get thread messages from store using selector + const threadMessages = useChatStore((s) => + threadMessageKey + ? displayMessageSelectors.getDisplayMessagesByKey(threadMessageKey)(s) + : undefined, + ); - if (!threadMessages) return ; + if (!threadMessages) return ; - // Find the assistantGroup message which contains the children blocks - const assistantGroupMessage = threadMessages.find((item) => item.role === 'assistantGroup'); - const instruction = threadMessages.find((item) => item.role === 'user')?.content; - const childrenCount = assistantGroupMessage?.children?.length ?? 0; + // Find the assistantGroup message which contains the children blocks + const assistantGroupMessage = threadMessages.find((item) => item.role === 'assistantGroup'); + const childrenCount = assistantGroupMessage?.children?.length ?? 0; - // Get model/provider from assistantGroup message - const model = assistantGroupMessage?.model; - const provider = assistantGroupMessage?.provider; + // Get model/provider from assistantGroup message + const model = assistantGroupMessage?.model; + const provider = assistantGroupMessage?.provider; - // Initializing state: no status yet (task just created, waiting for client execution) - if (threadMessages.length === 0 || !assistantGroupMessage?.children || childrenCount === 0) { - return ; - } + // Initializing state: no status yet (task just created, waiting for client execution) + if (threadMessages.length === 0 || !assistantGroupMessage?.children || childrenCount === 0) { + return ; + } - return ( - - {instruction && ( - - )} + return ( + + ); + }, +); - {isExecuting ? ( - - ) : ( - - )} - - ); -}); - -ClientTaskDetail.displayName = 'ClientClientTaskDetail'; +ClientTaskDetail.displayName = 'ClientTaskDetail'; export default ClientTaskDetail; diff --git a/src/features/Conversation/Messages/Task/TaskDetailPanel/StatusContent.tsx b/src/features/Conversation/Messages/Task/TaskDetailPanel/StatusContent.tsx index b43047946e..c761963f04 100644 --- a/src/features/Conversation/Messages/Task/TaskDetailPanel/StatusContent.tsx +++ b/src/features/Conversation/Messages/Task/TaskDetailPanel/StatusContent.tsx @@ -2,13 +2,13 @@ import { memo } from 'react'; +import { useChatStore } from '@/store/chat'; import { type TaskDetail, ThreadStatus } from '@/types/index'; import { - CompletedState, ErrorState, InitializingState, - ProcessingState, + TaskMessages, isProcessingStatus, } from '../../Tasks/shared'; @@ -18,30 +18,59 @@ interface StatusContentProps { taskDetail?: TaskDetail; } -const StatusContent = memo(({ taskDetail, content, messageId }) => { +const StatusContent = memo(({ taskDetail, messageId }) => { const status = taskDetail?.status; + const threadId = taskDetail?.threadId; + const isProcessing = isProcessingStatus(status); + + // Get polling hook - poll for task status to get messages + const [useEnablePollingTaskStatus, operations] = useChatStore((s) => [ + s.useEnablePollingTaskStatus, + s.operations, + ]); + + // Check if exec_async_task is already polling for this message + const hasActiveOperationPolling = Object.values(operations).some( + (op) => + op.status === 'running' && + op.type === 'execAgentRuntime' && + op.context?.messageId === messageId, + ); + + // Enable polling when task has threadId and no active operation is polling + // For completed tasks, this will fetch messages once (no refreshInterval needed) + const shouldPoll = !!threadId && !hasActiveOperationPolling; + const { data } = useEnablePollingTaskStatus(threadId, messageId, shouldPoll); + + const messages = data?.messages; // Initializing state: no status yet (task just created, waiting for backend) if (!status) { return ; } - // Processing states: Processing, InReview, Pending, Active, Todo - if (isProcessingStatus(status)) { - return ; + // Processing or Completed state with messages + if (messages && messages.length > 0) { + return ( + <> + + { + // Error states: Failed, Cancel + (status === ThreadStatus.Failed || status === ThreadStatus.Cancel) && ( + + ) + } + + ); } - // Completed state - if (status === ThreadStatus.Completed) { - return ; - } - - // Error states: Failed, Cancel - if (status === ThreadStatus.Failed || status === ThreadStatus.Cancel) { - return ; - } - - // Fallback to initializing state for unknown status + // Still loading messages return ; }); diff --git a/src/features/Conversation/Messages/Tasks/TaskItem/ClientTaskItem.tsx b/src/features/Conversation/Messages/Tasks/TaskItem/ClientTaskItem.tsx index 101f1e5b95..dcdd4b9a37 100644 --- a/src/features/Conversation/Messages/Tasks/TaskItem/ClientTaskItem.tsx +++ b/src/features/Conversation/Messages/Tasks/TaskItem/ClientTaskItem.tsx @@ -9,9 +9,7 @@ import { messageMapKey } from '@/store/chat/utils/messageMapKey'; import { ThreadStatus } from '@/types/index'; import type { UIChatMessage } from '@/types/index'; -import ClientTaskDetailCompletedState from '../../Task/ClientTaskDetail/CompletedState'; -import ClientTaskDetailProcessingState from '../../Task/ClientTaskDetail/ProcessingState'; -import { ErrorState, InitializingState, isProcessingStatus } from '../shared'; +import { ErrorState, InitializingState, TaskMessages, isProcessingStatus } from '../shared'; import TaskTitle, { type TaskMetrics } from './TaskTitle'; interface ClientTaskItemProps { @@ -43,8 +41,7 @@ const ClientTaskItem = memo(({ item }) => { // Use task message's agentId (skip 'supervisor' as it's not a valid agent ID for queries) // Fall back to activeAgentId if not available - const agentId = - itemAgentId && itemAgentId !== 'supervisor' ? itemAgentId : activeAgentId; + const agentId = itemAgentId && itemAgentId !== 'supervisor' ? itemAgentId : activeAgentId; const threadContext = useMemo( () => ({ @@ -135,33 +132,21 @@ const ClientTaskItem = memo(({ item }) => { {/* Initializing State - no taskDetail yet or no blocks */} {(isInitializing || (isProcessing && !hasBlocks)) && } - {/* Processing State - show streaming blocks */} - {!isInitializing && isProcessing && hasBlocks && ( - )} {/* Error State */} {!isInitializing && isError && taskDetail && } - - {/* Completed State - show blocks with final result */} - {!isInitializing && isCompleted && taskDetail && hasBlocks && ( - - )} ); diff --git a/src/features/Conversation/Messages/Tasks/TaskItem/ServerTaskItem.tsx b/src/features/Conversation/Messages/Tasks/TaskItem/ServerTaskItem.tsx index 6488fc6ddb..cdfc5655bf 100644 --- a/src/features/Conversation/Messages/Tasks/TaskItem/ServerTaskItem.tsx +++ b/src/features/Conversation/Messages/Tasks/TaskItem/ServerTaskItem.tsx @@ -1,18 +1,12 @@ 'use client'; -import { AccordionItem, Block, Text } from '@lobehub/ui'; +import { AccordionItem, Block } from '@lobehub/ui'; import { memo, useMemo, useState } from 'react'; import { ThreadStatus } from '@/types/index'; import type { UIChatMessage } from '@/types/index'; -import { - CompletedState, - ErrorState, - InitializingState, - ProcessingState, - isProcessingStatus, -} from '../shared'; +import { TaskContent } from '../shared'; import TaskTitle, { type TaskMetrics } from './TaskTitle'; interface ServerTaskItemProps { @@ -20,17 +14,15 @@ interface ServerTaskItemProps { } const ServerTaskItem = memo(({ item }) => { - const { id, content, metadata, taskDetail } = item; + const { id, metadata, taskDetail, tasks } = item; const [expanded, setExpanded] = useState(false); const title = taskDetail?.title || metadata?.taskTitle; - const instruction = metadata?.instruction; const status = taskDetail?.status; + const threadId = taskDetail?.threadId; - const isProcessing = isProcessingStatus(status); const isCompleted = status === ThreadStatus.Completed; const isError = status === ThreadStatus.Failed || status === ThreadStatus.Cancel; - const isInitializing = !taskDetail || !status; // Build metrics for TaskTitle (only for completed/error states) const metrics: TaskMetrics | undefined = useMemo(() => { @@ -42,7 +34,13 @@ const ServerTaskItem = memo(({ item }) => { }; } return undefined; - }, [isCompleted, isError, taskDetail?.duration, taskDetail?.totalSteps, taskDetail?.totalToolCalls]); + }, [ + isCompleted, + isError, + taskDetail?.duration, + taskDetail?.totalSteps, + taskDetail?.totalToolCalls, + ]); return ( (({ item }) => { title={} > - {instruction && ( - - - {instruction} - - - )} - - {/* Initializing State - no taskDetail yet */} - {isInitializing && } - - {/* Processing State */} - {!isInitializing && isProcessing && taskDetail && ( - - )} - - {/* Error State */} - {!isInitializing && isError && taskDetail && } - - {/* Completed State */} - {!isInitializing && isCompleted && taskDetail && ( - )} diff --git a/src/features/Conversation/Messages/Tasks/shared/ErrorState.tsx b/src/features/Conversation/Messages/Tasks/shared/ErrorState.tsx index 5c3d755be7..ad3aec79fa 100644 --- a/src/features/Conversation/Messages/Tasks/shared/ErrorState.tsx +++ b/src/features/Conversation/Messages/Tasks/shared/ErrorState.tsx @@ -38,6 +38,46 @@ interface ErrorStateProps { taskDetail: TaskDetail; } +/** + * Extract displayable error content from various error structures + */ +const getErrorContent = (error: Record | undefined): string | null => { + if (!error) return null; + + // Try common error structures + // 1. error.error.body (TRPC style) + if (error.error?.body) { + return JSON.stringify(error.error.body, null, 2); + } + + // 2. error.body (direct body) + if (error.body && typeof error.body === 'object') { + return JSON.stringify(error.body, null, 2); + } + + // 3. error.message (if it's not "[object Object]") + if (error.message && typeof error.message === 'string' && error.message !== '[object Object]') { + return error.message; + } + + // 4. If error itself is an object with meaningful content, stringify it + // Skip if it only has a useless message field + const keys = Object.keys(error); + if (keys.length > 0) { + // Filter out useless "[object Object]" values + const meaningfulEntries = Object.entries(error).filter(([, value]) => { + if (typeof value === 'string' && value === '[object Object]') return false; + return true; + }); + + if (meaningfulEntries.length > 0) { + return JSON.stringify(Object.fromEntries(meaningfulEntries), null, 2); + } + } + + return null; +}; + const ErrorState = memo(({ taskDetail }) => { const { t } = useTranslation('chat'); @@ -49,6 +89,9 @@ const ErrorState = memo(({ taskDetail }) => { const formattedDuration = useMemo(() => formatDuration(duration), [duration]); const formattedCost = useMemo(() => formatCost(totalCost), [totalCost]); + // Extract error content + const errorContent = useMemo(() => getErrorContent(error), [error]); + const hasMetrics = !!(formattedDuration || totalToolCalls || totalMessages || formattedCost); return ( @@ -56,14 +99,14 @@ const ErrorState = memo(({ taskDetail }) => { {/* Error Content */} - {JSON.stringify(error?.error?.body, null, 2)} + {errorContent} ) } diff --git a/src/features/Conversation/Messages/Tasks/shared/InitializingState.tsx b/src/features/Conversation/Messages/Tasks/shared/InitializingState.tsx index 5dcba8626f..adc72be55a 100644 --- a/src/features/Conversation/Messages/Tasks/shared/InitializingState.tsx +++ b/src/features/Conversation/Messages/Tasks/shared/InitializingState.tsx @@ -2,12 +2,14 @@ import { Flexbox, Text } from '@lobehub/ui'; import { createStaticStyles, keyframes } from 'antd-style'; -import { memo } from 'react'; +import { memo, useEffect, useState } from 'react'; import { useTranslation } from 'react-i18next'; import NeuralNetworkLoading from '@/components/NeuralNetworkLoading'; import { shinyTextStyles } from '@/styles'; +import { formatElapsedTime } from './utils'; + const shimmer = keyframes` 0% { transform: translateX(-100%); @@ -48,6 +50,18 @@ const styles = createStaticStyles(({ css, cssVar }) => ({ const InitializingState = memo(() => { const { t } = useTranslation('chat'); + const [elapsedTime, setElapsedTime] = useState(0); + + // Timer for updating elapsed time every second + useEffect(() => { + const startTime = Date.now(); + + const timer = setInterval(() => { + setElapsedTime(Date.now() - startTime); + }, 1000); + + return () => clearInterval(timer); + }, []); return ( @@ -56,6 +70,7 @@ const InitializingState = memo(() => { {t('task.status.initializing')} + ({formatElapsedTime(elapsedTime)}) ); diff --git a/src/features/Conversation/Messages/Tasks/shared/TaskContent.tsx b/src/features/Conversation/Messages/Tasks/shared/TaskContent.tsx new file mode 100644 index 0000000000..05ff732505 --- /dev/null +++ b/src/features/Conversation/Messages/Tasks/shared/TaskContent.tsx @@ -0,0 +1,68 @@ +'use client'; + +import { ThreadStatus } from '@lobechat/types'; +import type { TaskDetail, UIChatMessage } from '@lobechat/types'; +import { Flexbox, Text } from '@lobehub/ui'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; + +import BubblesLoading from '@/components/BubblesLoading'; + +import ErrorState from './ErrorState'; +import InitializingState from './InitializingState'; +import TaskMessages from './TaskMessages'; +import { useTaskPolling } from './useTaskPolling'; + +export interface TaskContentProps { + id: string; + isError: boolean; + messages: UIChatMessage[] | undefined; + status: ThreadStatus | undefined; + taskDetail: TaskDetail | undefined; + threadId: string | undefined; +} + +const TaskContent = memo( + ({ id, threadId, status, messages, taskDetail, isError }) => { + const { t } = useTranslation('chat'); + const { isProcessing } = useTaskPolling({ + messageId: id, + status, + threadId, + }); + + // No messages yet + if (!messages || messages.length === 0) { + // Still processing: show full initializing state + if (isProcessing) { + return ; + } + + // Already completed but loading messages: show simple loading + return ( + + + {t('task.status.fetchingDetails')} + + ); + } + + return ( + <> + + {/* Error states: Failed, Cancel */} + {isError && taskDetail && } + + ); + }, +); + +TaskContent.displayName = 'TaskContent'; + +export default TaskContent; diff --git a/src/features/Conversation/Messages/Tasks/shared/TaskMessages.tsx b/src/features/Conversation/Messages/Tasks/shared/TaskMessages.tsx new file mode 100644 index 0000000000..5340f646be --- /dev/null +++ b/src/features/Conversation/Messages/Tasks/shared/TaskMessages.tsx @@ -0,0 +1,383 @@ +'use client'; + +import { type AssistantContentBlock, type UIChatMessage } from '@lobechat/types'; +import { + Accordion, + AccordionItem, + Block, + Flexbox, + Icon, + Markdown, + ScrollShadow, + Text, +} from '@lobehub/ui'; +import { createStaticStyles, cssVar } from 'antd-style'; +import { ScrollText, Workflow } from 'lucide-react'; +import { type RefObject, memo, useEffect, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +import NeuralNetworkLoading from '@/components/NeuralNetworkLoading'; +import { useAutoScroll } from '@/hooks/useAutoScroll'; + +import ContentBlock from '../../AssistantGroup/components/ContentBlock'; +import Usage from '../../components/Extras/Usage'; +import AnimatedNumber from '../../components/Extras/Usage/UsageDetail/AnimatedNumber'; +import { accumulateUsage, formatDuration, formatElapsedTime } from './utils'; + +const styles = createStaticStyles(({ css }) => ({ + contentScroll: css` + max-height: min(50vh, 300px); + `, + instructionContent: css` + overflow: auto; + max-height: 300px; + `, +})); + +/** + * InstructionAccordion - Shows the task instruction in a collapsible accordion + */ +const InstructionAccordion = memo<{ childrenCount: number; instruction: string }>( + ({ instruction, childrenCount }) => { + const { t } = useTranslation('chat'); + + // Auto-collapse instruction when children count exceeds threshold + const [expandedKeys, setExpandedKeys] = useState(['instruction']); + + useEffect(() => { + if (childrenCount > 1) { + setExpandedKeys([]); + } + }, [childrenCount > 1]); + + return ( + setExpandedKeys(keys as string[])} + > + + + + + + {t('task.instruction')} + + + } + > + + {instruction} + + + + ); + }, +); + +InstructionAccordion.displayName = 'InstructionAccordion'; + +interface TaskMessagesProps { + /** + * Task duration in ms (for completed state) + */ + duration?: number; + /** + * Whether the task is currently processing + */ + isProcessing?: boolean; + /** + * Messages from task execution (parsed by conversation-flow) + * Will extract assistantGroup.children as blocks for rendering + */ + messages: UIChatMessage[]; + /** + * Model name for usage display + */ + model?: string; + /** + * Provider name for usage display + */ + provider?: string; + /** + * Task start time for elapsed time calculation + */ + startTime?: number; + /** + * Total cost (for completed state) + */ + totalCost?: number; +} + +/** + * Processing state - shows all blocks with loading indicator + */ +const ProcessingView = memo<{ + accumulatedUsage: { cost?: number; totalTokens?: number }; + assistantId: string; + blocks: AssistantContentBlock[]; + model?: string; + provider?: string; + startTime?: number; + totalToolCalls: number; +}>(({ blocks, assistantId, startTime, model, provider, totalToolCalls, accumulatedUsage }) => { + const { t } = useTranslation('chat'); + const [elapsedTime, setElapsedTime] = useState(0); + const { ref, handleScroll } = useAutoScroll({ + deps: [blocks], + enabled: true, + }); + + // Calculate initial elapsed time + useEffect(() => { + if (startTime) { + setElapsedTime(Math.max(0, Date.now() - startTime)); + } + }, [startTime]); + + // Timer for updating elapsed time every second + useEffect(() => { + if (!startTime) return; + + const timer = setInterval(() => { + setElapsedTime(Math.max(0, Date.now() - startTime)); + }, 1000); + + return () => clearInterval(timer); + }, [startTime]); + + return ( + + + + + + + + Math.round(v).toString()} + value={totalToolCalls} + /> + + + {t('task.metrics.toolCallsShort')} + + {startTime && ( + + ({formatElapsedTime(elapsedTime)}) + + )} + + + } + size={8} + > + + {blocks.map((block) => ( + + ))} + + + + {/* Usage display */} + {model && provider && } + + ); +}); + +ProcessingView.displayName = 'ProcessingView'; + +/** + * Completed state - shows intermediate steps in accordion, final result visible + */ +const CompletedView = memo<{ + assistantId: string; + blocks: AssistantContentBlock[]; + duration?: number; + model?: string; + provider?: string; + totalCost?: number; + totalTokens?: number; + totalToolCalls: number; +}>(({ blocks, assistantId, duration, totalToolCalls, model, provider, totalTokens, totalCost }) => { + const { t } = useTranslation('chat'); + + // Split blocks: intermediate steps (all but last) and final result (last) + const { intermediateBlocks, finalBlock } = useMemo(() => { + if (blocks.length === 0) return { finalBlock: null, intermediateBlocks: [] }; + if (blocks.length === 1) return { finalBlock: blocks[0], intermediateBlocks: [] }; + + return { + finalBlock: blocks.at(-1)!, + intermediateBlocks: blocks.slice(0, -1), + }; + }, [blocks]); + + if (!finalBlock) return null; + + const title = ( + + + + + + + {totalToolCalls} + + + {t('task.metrics.toolCallsShort')} + + {/* Duration display */} + {duration && ( + + {t('task.metrics.duration', { duration: formatDuration(duration) })} + + )} + + + ); + + return ( + + {/* Intermediate steps - collapsed by default */} + {intermediateBlocks.length > 0 && ( + + + + {intermediateBlocks.map((block) => ( + + ))} + + + + )} + + {/* Final result - always visible */} + + + {/* Usage display */} + {model && provider && ( + + )} + + ); +}); + +CompletedView.displayName = 'CompletedView'; + +/** + * TaskMessages - Renders task execution messages (blocks) for both processing and completed states + * + * Extracts assistantGroup.children (blocks) from messages and renders them: + * - Processing: Shows all blocks with loading indicator and real-time updates + * - Completed: Shows intermediate steps in accordion, final result always visible + */ +const TaskMessages = memo( + ({ messages, isProcessing = false, startTime, duration, model, provider, totalCost }) => { + // Extract blocks and instruction from messages + const { blocks, assistantId, instruction } = useMemo(() => { + if (!messages || messages.length === 0) + return { assistantId: '', blocks: [], instruction: undefined }; + + const assistantGroupMessage = messages.find((item) => item.role === 'assistantGroup'); + const userMessage = messages.find((item) => item.role === 'user'); + + return { + assistantId: assistantGroupMessage?.id ?? '', + blocks: assistantGroupMessage?.children ?? [], + instruction: userMessage?.content, + }; + }, [messages]); + + // Calculate total tool calls + const totalToolCalls = useMemo( + () => blocks.reduce((sum, block) => sum + (block.tools?.length || 0), 0), + [blocks], + ); + + // Accumulate usage from all blocks + const accumulatedUsage = useMemo(() => accumulateUsage(blocks), [blocks]); + + return ( + + {/* Instruction accordion */} + {instruction && ( + + )} + + {/* Processing or Completed view */} + {isProcessing ? ( + + ) : ( + + )} + + ); + }, +); + +TaskMessages.displayName = 'TaskMessages'; + +export default TaskMessages; diff --git a/src/features/Conversation/Messages/Tasks/shared/index.ts b/src/features/Conversation/Messages/Tasks/shared/index.ts index 120635febb..cdda266c0b 100644 --- a/src/features/Conversation/Messages/Tasks/shared/index.ts +++ b/src/features/Conversation/Messages/Tasks/shared/index.ts @@ -5,4 +5,8 @@ export { default as ErrorState } from './ErrorState'; export { default as InitializingState } from './InitializingState'; export type { ProcessingStateVariant } from './ProcessingState'; export { default as ProcessingState } from './ProcessingState'; +export type { TaskContentProps } from './TaskContent'; +export { default as TaskContent } from './TaskContent'; +export { default as TaskMessages } from './TaskMessages'; +export * from './useTaskPolling'; export * from './utils'; diff --git a/src/features/Conversation/Messages/Tasks/shared/useTaskPolling.ts b/src/features/Conversation/Messages/Tasks/shared/useTaskPolling.ts new file mode 100644 index 0000000000..61c794d309 --- /dev/null +++ b/src/features/Conversation/Messages/Tasks/shared/useTaskPolling.ts @@ -0,0 +1,48 @@ +'use client'; + +import { ThreadStatus } from '@lobechat/types'; +import { useEffect, useState } from 'react'; + +import { useChatStore } from '@/store/chat'; + +import { isProcessingStatus } from './utils'; + +interface UseTaskPollingParams { + messageId: string; + status: ThreadStatus | undefined; + threadId: string | undefined; +} + +export const useTaskPolling = ({ messageId, threadId, status }: UseTaskPollingParams) => { + const isProcessing = isProcessingStatus(status); + const [hasFetched, setHasFetched] = useState(false); + + const [useEnablePollingTaskStatus, operations] = useChatStore((s) => [ + s.useEnablePollingTaskStatus, + s.operations, + ]); + + // Check if exec_async_task is already polling for this message + const hasActiveOperationPolling = Object.values(operations).some( + (op) => + op.status === 'running' && + op.type === 'execAgentRuntime' && + op.context?.messageId === messageId, + ); + + // Enable polling when: + // 1. Has threadId + // 2. Not already being polled by an active operation + // 3. Either hasn't fetched yet (initial fetch) or is still processing (continuous polling) + const shouldPoll = !!threadId && !hasActiveOperationPolling && (!hasFetched || isProcessing); + const { data } = useEnablePollingTaskStatus(threadId, messageId, shouldPoll); + + // Mark as fetched when we get data + useEffect(() => { + if (data?.taskDetail && !hasFetched) { + setHasFetched(true); + } + }, [data?.taskDetail, hasFetched]); + + return { isProcessing }; +}; diff --git a/src/features/Conversation/Messages/index.tsx b/src/features/Conversation/Messages/index.tsx index 817e0977bb..4a3b3b87a5 100644 --- a/src/features/Conversation/Messages/index.tsx +++ b/src/features/Conversation/Messages/index.tsx @@ -15,6 +15,7 @@ import AgentCouncilMessage from './AgentCouncil'; import AssistantMessage from './Assistant'; import AssistantGroupMessage from './AssistantGroup'; import CompressedGroupMessage from './CompressedGroup'; +import GroupTasksMessage from './GroupTasks'; import SupervisorMessage from './Supervisor'; import TaskMessage from './Task'; import TasksMessage from './Tasks'; @@ -159,6 +160,10 @@ const MessageItem = memo( return ; } + case 'groupTasks': { + return ; + } + case 'agentCouncil': { return ; } diff --git a/src/locales/default/chat.ts b/src/locales/default/chat.ts index cd6e5e49b3..183bc108b6 100644 --- a/src/locales/default/chat.ts +++ b/src/locales/default/chat.ts @@ -369,6 +369,9 @@ export default { 'task.activity.toolCalling': 'Calling {{toolName}}...', 'task.activity.toolResult': '{{toolName}} result received', 'task.batchTasks': '{{count}} Batch Subtasks', + 'task.groupTasks': '{{count}} Parallel Tasks', + 'task.groupTasksTitle': '{{agents}} and {{count}} agents tasks', + 'task.groupTasksTitleSimple': '{{agents}} {{count}} tasks', 'task.instruction': 'Task Instruction', 'task.intermediateSteps': '{{count}} intermediate steps', 'task.metrics.duration': '(took {{duration}})', @@ -376,6 +379,7 @@ export default { 'task.metrics.toolCallsShort': 'skill uses', 'task.status.cancelled': 'Task Cancelled', 'task.status.failed': 'Task Failed', + 'task.status.fetchingDetails': 'Fetching details...', 'task.status.initializing': 'Initializing task...', 'task.subtask': 'Subtask', 'thread.divider': 'Subtopic', diff --git a/src/server/modules/AgentRuntime/RuntimeExecutors.ts b/src/server/modules/AgentRuntime/RuntimeExecutors.ts index 20bf557574..b4e36829c8 100644 --- a/src/server/modules/AgentRuntime/RuntimeExecutors.ts +++ b/src/server/modules/AgentRuntime/RuntimeExecutors.ts @@ -753,7 +753,11 @@ export const createRuntimeExecutors = ( const newState = structuredClone(state); // Query latest messages from database + // Must pass agentId to ensure correct query scope, otherwise when topicId is undefined, + // the query will use isNull(topicId) condition which won't find messages with actual topicId const latestMessages = await ctx.messageModel.query({ + agentId: state.metadata?.agentId, + threadId: state.metadata?.threadId, topicId: state.metadata?.topicId, }); diff --git a/src/server/modules/AgentRuntime/__tests__/RuntimeExecutors.test.ts b/src/server/modules/AgentRuntime/__tests__/RuntimeExecutors.test.ts index 064bd6a843..d280543301 100644 --- a/src/server/modules/AgentRuntime/__tests__/RuntimeExecutors.test.ts +++ b/src/server/modules/AgentRuntime/__tests__/RuntimeExecutors.test.ts @@ -655,8 +655,10 @@ describe('RuntimeExecutors', () => { const result = await executors.call_tools_batch!(instruction, state); - // Should query messages from database + // Should query messages from database with agentId, threadId, and topicId expect(mockMessageModel.query).toHaveBeenCalledWith({ + agentId: 'agent-123', + threadId: 'thread-123', topicId: 'topic-123', }); @@ -952,6 +954,109 @@ describe('RuntimeExecutors', () => { }), ); }); + + it('should query messages with correct metadata fields when state.metadata is defined', async () => { + const executors = createRuntimeExecutors(ctx); + const state = createMockState({ + metadata: { + agentId: 'agent-abc', + threadId: 'thread-xyz', + topicId: 'topic-abc-123', + }, + }); + + const instruction = { + payload: { + parentMessageId: 'assistant-msg-123', + toolsCalling: [ + { + apiName: 'search', + arguments: '{}', + id: 'tool-call-1', + identifier: 'web-search', + type: 'default' as const, + }, + ], + }, + type: 'call_tools_batch' as const, + }; + + await executors.call_tools_batch!(instruction, state); + + // Should query messages with agentId, threadId, and topicId from state.metadata + expect(mockMessageModel.query).toHaveBeenCalledWith({ + agentId: 'agent-abc', + threadId: 'thread-xyz', + topicId: 'topic-abc-123', + }); + }); + + it('should preserve messages in newState even when state.metadata.topicId is undefined', async () => { + // Regression test: When state.metadata.topicId is undefined, previously the query + // only passed topicId, which caused isNull(topicId) condition and returned 0 messages. + // This led to "messages: at least one message is required" error in the next call_llm step. + // + // Fix: Now we also pass agentId and threadId, so even when topicId is undefined, + // the query can still find messages by agentId scope. + + // Mock: query returns messages when agentId is provided (regardless of topicId) + mockMessageModel.query = vi + .fn() + .mockImplementation((params: { agentId?: string; topicId?: string }) => { + // With the fix, agentId is always passed, so we can find messages + if (params.agentId) { + return Promise.resolve([ + { id: 'msg-1', content: 'Hello', role: 'user' }, + { id: 'msg-2', content: 'Response', role: 'assistant', tool_calls: [] }, + ]); + } + // Without agentId (old buggy behavior), return empty + return Promise.resolve([]); + }); + + const executors = createRuntimeExecutors(ctx); + // State with undefined topicId but has agentId + const state = createMockState({ + messages: [ + { content: 'Hello', role: 'user' }, + { content: 'Response', role: 'assistant', tool_calls: [] }, + ], + metadata: { + agentId: 'agent-123', + threadId: 'thread-123', + topicId: undefined, // topicId is undefined + }, + }); + + const instruction = { + payload: { + parentMessageId: 'assistant-msg-123', + toolsCalling: [ + { + apiName: 'search', + arguments: '{}', + id: 'tool-call-1', + identifier: 'web-search', + type: 'default' as const, + }, + ], + }, + type: 'call_tools_batch' as const, + }; + + const result = await executors.call_tools_batch!(instruction, state); + + // Verify agentId is passed in the query + expect(mockMessageModel.query).toHaveBeenCalledWith({ + agentId: 'agent-123', + threadId: 'thread-123', + topicId: undefined, + }); + + // Expected: newState.messages should NOT be empty + // The next call_llm step needs messages to work properly + expect(result.newState.messages.length).toBeGreaterThan(0); + }); }); describe('resolve_aborted_tools executor', () => { diff --git a/src/server/services/aiAgent/__tests__/execAgent.threadId.test.ts b/src/server/services/aiAgent/__tests__/execAgent.threadId.test.ts index 4d886956cf..66f6967e91 100644 --- a/src/server/services/aiAgent/__tests__/execAgent.threadId.test.ts +++ b/src/server/services/aiAgent/__tests__/execAgent.threadId.test.ts @@ -205,7 +205,7 @@ describe('AiAgentService.execAgent - threadId handling', () => { expect(assistantMessageCall).toBeDefined(); expect(assistantMessageCall![0].threadId).toBeUndefined(); - }); + }, 10_000); }); describe('when appContext is undefined', () => { @@ -247,6 +247,6 @@ describe('AiAgentService.execAgent - threadId handling', () => { // Verify groupId is passed to AgentRuntimeService (checked in appContext) // This is handled by the createOperation call - }); + }, 10_000); }); }); diff --git a/src/server/utils/truncateToolResult.ts b/src/server/utils/truncateToolResult.ts index 387809f0c8..7cbc312243 100644 --- a/src/server/utils/truncateToolResult.ts +++ b/src/server/utils/truncateToolResult.ts @@ -6,9 +6,8 @@ /** * Default maximum length for tool execution result content (in characters) * This prevents context overflow when sending results back to LLM - * @default 6000 characters (~1,500 tokens for English, ~3,000 tokens for Chinese) */ -export const DEFAULT_TOOL_RESULT_MAX_LENGTH = 6000; +export const DEFAULT_TOOL_RESULT_MAX_LENGTH = 25_000; /** * Truncate tool result content if it exceeds the maximum length @@ -21,8 +20,6 @@ export const DEFAULT_TOOL_RESULT_MAX_LENGTH = 6000; export function truncateToolResult(content: string, maxLength?: number): string { const limit = maxLength ?? DEFAULT_TOOL_RESULT_MAX_LENGTH; - console.log('content-limit', content, limit); - if (!content || content.length <= limit) { return content; } diff --git a/src/store/chat/agents/GroupOrchestration/__tests__/batch-exec-async-tasks.test.ts b/src/store/chat/agents/GroupOrchestration/__tests__/batch-exec-async-tasks.test.ts index 5f5bc0f25e..0e74fc52fd 100644 --- a/src/store/chat/agents/GroupOrchestration/__tests__/batch-exec-async-tasks.test.ts +++ b/src/store/chat/agents/GroupOrchestration/__tests__/batch-exec-async-tasks.test.ts @@ -139,8 +139,8 @@ describe('createGroupOrchestrationExecutors', () => { { payload: { tasks: [ - { agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }, - { agentId: TEST_IDS.AGENT_2_ID, task: 'Task 2', title: 'Task 2 Title' }, + { agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }, + { agentId: TEST_IDS.AGENT_2_ID, instruction: 'Task 2', title: 'Task 2 Title' }, ], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, @@ -187,8 +187,8 @@ describe('createGroupOrchestrationExecutors', () => { { payload: { tasks: [ - { agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }, - { agentId: TEST_IDS.AGENT_2_ID, task: 'Task 2', title: 'Task 2 Title' }, + { agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }, + { agentId: TEST_IDS.AGENT_2_ID, instruction: 'Task 2', title: 'Task 2 Title' }, ], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, @@ -263,8 +263,8 @@ describe('createGroupOrchestrationExecutors', () => { { payload: { tasks: [ - { agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }, - { agentId: TEST_IDS.AGENT_2_ID, task: 'Task 2', title: 'Task 2 Title' }, + { agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }, + { agentId: TEST_IDS.AGENT_2_ID, instruction: 'Task 2', title: 'Task 2 Title' }, ], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, @@ -327,8 +327,8 @@ describe('createGroupOrchestrationExecutors', () => { { payload: { tasks: [ - { agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }, - { agentId: TEST_IDS.AGENT_2_ID, task: 'Task 2', title: 'Task 2 Title' }, + { agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }, + { agentId: TEST_IDS.AGENT_2_ID, instruction: 'Task 2', title: 'Task 2 Title' }, ], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, @@ -386,8 +386,8 @@ describe('createGroupOrchestrationExecutors', () => { { payload: { tasks: [ - { agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }, - { agentId: TEST_IDS.AGENT_2_ID, task: 'Task 2', title: 'Task 2 Title' }, + { agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }, + { agentId: TEST_IDS.AGENT_2_ID, instruction: 'Task 2', title: 'Task 2 Title' }, ], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, @@ -445,8 +445,8 @@ describe('createGroupOrchestrationExecutors', () => { { payload: { tasks: [ - { agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }, - { agentId: TEST_IDS.AGENT_2_ID, task: 'Task 2', title: 'Task 2 Title' }, + { agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }, + { agentId: TEST_IDS.AGENT_2_ID, instruction: 'Task 2', title: 'Task 2 Title' }, ], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, @@ -498,7 +498,7 @@ describe('createGroupOrchestrationExecutors', () => { const result = await batchExecTasksExecutor( { payload: { - tasks: [{ agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }], + tasks: [{ agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, type: 'batch_exec_async_tasks', @@ -546,7 +546,7 @@ describe('createGroupOrchestrationExecutors', () => { const result = await batchExecTasksExecutor( { payload: { - tasks: [{ agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }], + tasks: [{ agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, type: 'batch_exec_async_tasks', @@ -604,7 +604,7 @@ describe('createGroupOrchestrationExecutors', () => { await batchExecTasksExecutor( { payload: { - tasks: [{ agentId: TEST_IDS.AGENT_1_ID, task: 'Task 1', title: 'Task 1 Title' }], + tasks: [{ agentId: TEST_IDS.AGENT_1_ID, instruction: 'Task 1', title: 'Task 1 Title' }], toolMessageId: TEST_IDS.TOOL_MESSAGE_ID, }, type: 'batch_exec_async_tasks', diff --git a/src/store/chat/agents/GroupOrchestration/createGroupOrchestrationExecutors.ts b/src/store/chat/agents/GroupOrchestration/createGroupOrchestrationExecutors.ts index c6fd378565..5c3991ae2c 100644 --- a/src/store/chat/agents/GroupOrchestration/createGroupOrchestrationExecutors.ts +++ b/src/store/chat/agents/GroupOrchestration/createGroupOrchestrationExecutors.ts @@ -369,14 +369,17 @@ export const createGroupOrchestrationExecutors = ( * * Returns: task_completed result */ - exec_async_task: async (instruction, state): Promise => { - const { agentId, task, timeout, title, toolMessageId } = ( - instruction as SupervisorInstructionExecAsyncTask + exec_async_task: async ( + supervisorInstruction, + state, + ): Promise => { + const { agentId, instruction, timeout, title, toolMessageId } = ( + supervisorInstruction as SupervisorInstructionExecAsyncTask ).payload; const sessionLogId = `${state.operationId}:exec_async_task`; log( - `[${sessionLogId}] Executing async task for agent: ${agentId}, task: ${task}, timeout: ${timeout}`, + `[${sessionLogId}] Executing async task for agent: ${agentId}, instruction: ${instruction}, timeout: ${timeout}`, ); const { groupId, topicId } = messageContext; @@ -400,7 +403,7 @@ export const createGroupOrchestrationExecutors = ( agentId, content: '', groupId, - metadata: { instruction: task, taskTitle: title }, + metadata: { instruction, taskTitle: title }, parentId: toolMessageId, role: 'task', topicId, @@ -427,7 +430,7 @@ export const createGroupOrchestrationExecutors = ( const createResult = await aiAgentService.execSubAgentTask({ agentId, groupId, - instruction: task, + instruction, parentMessageId: taskMessageId, title, topicId, @@ -600,9 +603,12 @@ export const createGroupOrchestrationExecutors = ( * * Returns: task_completed result */ - exec_client_async_task: async (instruction, state): Promise => { - const { agentId, task, title, toolMessageId } = ( - instruction as SupervisorInstructionExecClientAsyncTask + exec_client_async_task: async ( + supervisorInstruction, + state, + ): Promise => { + const { agentId, instruction, title, toolMessageId } = ( + supervisorInstruction as SupervisorInstructionExecClientAsyncTask ).payload; const sessionLogId = `${state.operationId}:exec_client_async_task`; @@ -629,7 +635,7 @@ export const createGroupOrchestrationExecutors = ( agentId, content: '', groupId, - metadata: { instruction: task, taskTitle: title }, + metadata: { instruction, taskTitle: title }, parentId: toolMessageId, role: 'task', topicId, @@ -656,7 +662,7 @@ export const createGroupOrchestrationExecutors = ( // Use Group-specific API that handles different agentIds in thread context const threadResult = await aiAgentService.createClientGroupAgentTaskThread({ groupId: groupId!, - instruction: task, + instruction, parentMessageId: taskMessageId, subAgentId: agentId, title, @@ -861,9 +867,9 @@ export const createGroupOrchestrationExecutors = ( interface TaskTracker { agentId: string; error?: string; + instruction: string; result?: string; status: 'pending' | 'running' | 'completed' | 'failed'; - task: string; taskMessageId?: string; threadId?: string; timeout: number; @@ -873,7 +879,7 @@ export const createGroupOrchestrationExecutors = ( const taskTrackers: TaskTracker[] = tasks.map((t) => ({ agentId: t.agentId, status: 'pending', - task: t.task, + instruction: t.instruction, timeout: t.timeout || 1_800_000, // Default 30 minutes title: t.title, })); @@ -887,8 +893,9 @@ export const createGroupOrchestrationExecutors = ( { agentId: tracker.agentId, content: '', + createdAt: Date.now() + index, groupId, - metadata: { instruction: tracker.task, taskTitle: tracker.title }, + metadata: { instruction: tracker.instruction, taskTitle: tracker.title }, parentId: toolMessageId, role: 'task', topicId, @@ -922,7 +929,7 @@ export const createGroupOrchestrationExecutors = ( const createResult = await aiAgentService.execSubAgentTask({ agentId: tracker.agentId, groupId, - instruction: tracker.task, + instruction: tracker.instruction, parentMessageId: tracker.taskMessageId, title: tracker.title, topicId, diff --git a/src/store/chat/agents/__tests__/createAgentExecutors/exec-tasks.test.ts b/src/store/chat/agents/__tests__/createAgentExecutors/exec-tasks.test.ts index 976acc7a6b..7e187a9520 100644 --- a/src/store/chat/agents/__tests__/createAgentExecutors/exec-tasks.test.ts +++ b/src/store/chat/agents/__tests__/createAgentExecutors/exec-tasks.test.ts @@ -66,7 +66,8 @@ describe('exec_tasks executor', () => { expect(result.nextContext).toBeDefined(); expect((result.nextContext as AgentRuntimeContext).phase).toBe('tasks_batch_result'); - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results).toHaveLength(1); expect(payload.results[0].success).toBe(true); expect(payload.results[0].threadId).toBe('thread_1'); @@ -131,7 +132,8 @@ describe('exec_tasks executor', () => { // Then expect(result.nextContext).toBeDefined(); - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results).toHaveLength(3); expect(payload.results.every((r) => r.success)).toBe(true); }); @@ -170,7 +172,8 @@ describe('exec_tasks executor', () => { // Then expect(result.nextContext).toBeDefined(); - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results).toHaveLength(1); expect(payload.results[0].success).toBe(false); expect(payload.results[0].error).toBe('No valid context available'); @@ -196,7 +199,8 @@ describe('exec_tasks executor', () => { }); // Then - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results[0].success).toBe(false); expect(payload.results[0].error).toBe('Failed to create task message'); }); @@ -228,7 +232,8 @@ describe('exec_tasks executor', () => { }); // Then - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results[0].success).toBe(false); expect(payload.results[0].error).toBe('API error'); expect(mockStore.optimisticUpdateMessageContent).toHaveBeenCalledWith( @@ -270,7 +275,8 @@ describe('exec_tasks executor', () => { }); // Then - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results[0].success).toBe(false); expect(payload.results[0].error).toBe('Execution error'); }); @@ -318,7 +324,8 @@ describe('exec_tasks executor', () => { }, { operationId: 'test-op' }, ); - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results[0].success).toBe(true); }); @@ -353,7 +360,8 @@ describe('exec_tasks executor', () => { }); // Then - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results[0].success).toBe(false); expect(payload.results[0].error).toBe('Task was cancelled'); expect(mockStore.optimisticUpdateMessageContent).toHaveBeenCalledWith( @@ -402,7 +410,8 @@ describe('exec_tasks executor', () => { }); // Then - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results[0].success).toBe(false); expect(payload.results[0].error).toBe('Operation cancelled'); // getSubAgentTaskStatus should not be called since operation was cancelled before poll @@ -532,6 +541,7 @@ describe('exec_tasks executor', () => { { agentId: 'agent_1', content: '', + createdAt: expect.any(Number), metadata: { instruction: 'Do something important' }, parentId: 'msg_parent', role: 'task', @@ -588,7 +598,8 @@ describe('exec_tasks executor', () => { }); // Then - const payload = (result.nextContext as AgentRuntimeContext).payload as TasksBatchResultPayload; + const payload = (result.nextContext as AgentRuntimeContext) + .payload as TasksBatchResultPayload; expect(payload.results).toHaveLength(2); expect(payload.results[0].success).toBe(true); expect(payload.results[1].success).toBe(false); diff --git a/src/store/chat/agents/createAgentExecutors.ts b/src/store/chat/agents/createAgentExecutors.ts index 2ace6864a2..e955709131 100644 --- a/src/store/chat/agents/createAgentExecutors.ts +++ b/src/store/chat/agents/createAgentExecutors.ts @@ -1349,6 +1349,7 @@ export const createAgentExecutors = (context: { { agentId, content: '', + createdAt: Date.now() + taskIndex, metadata: { instruction: task.instruction }, parentId: parentMessageId, role: 'task', @@ -2035,6 +2036,7 @@ export const createAgentExecutors = (context: { { agentId, content: '', + createdAt: Date.now() + taskIndex, metadata: { instruction: task.instruction, taskTitle: task.description }, parentId: parentMessageId, role: 'task', diff --git a/src/store/chat/slices/aiAgent/actions/groupOrchestration.ts b/src/store/chat/slices/aiAgent/actions/groupOrchestration.ts index 35c1130cf0..f0c3d7fdd4 100644 --- a/src/store/chat/slices/aiAgent/actions/groupOrchestration.ts +++ b/src/store/chat/slices/aiAgent/actions/groupOrchestration.ts @@ -216,17 +216,17 @@ export const groupOrchestrationSlice: StateCreator< const { supervisorAgentId, agentId, - task, + instruction, timeout, toolMessageId, skipCallSupervisor, runInClient, } = params; log( - '[triggerExecuteTask] Starting orchestration with execute_task: supervisorAgentId=%s, agentId=%s, task=%s, timeout=%s, toolMessageId=%s, skipCallSupervisor=%s, runInClient=%s', + '[triggerExecuteTask] Starting orchestration with execute_task: supervisorAgentId=%s, agentId=%s, instruction=%s, timeout=%s, toolMessageId=%s, skipCallSupervisor=%s, runInClient=%s', supervisorAgentId, agentId, - task, + instruction, timeout, toolMessageId, skipCallSupervisor, @@ -248,7 +248,7 @@ export const groupOrchestrationSlice: StateCreator< type: 'supervisor_decided', payload: { decision: 'execute_task', - params: { agentId, runInClient, task, timeout, toolMessageId }, + params: { agentId, instruction, runInClient, timeout, toolMessageId }, skipCallSupervisor: skipCallSupervisor ?? false, }, }, @@ -446,12 +446,15 @@ export const groupOrchestrationSlice: StateCreator< revalidateOnReconnect: false, refreshInterval: POLLING_INTERVAL, onSuccess: (data) => { - if (data?.taskDetail && messageId) { - // Update taskDetail + if (data && messageId) { + // Update taskDetail and tasks (intermediate messages) get().internal_dispatchMessage({ id: messageId, type: 'updateMessage', - value: { taskDetail: data.taskDetail }, + value: { + taskDetail: data.taskDetail, + tasks: data.messages, + }, }); // Update content when task is completed or failed