feat(cli): add generate command for text/image/video/tts/asr (#12799)

*  feat(cli): add generate command for text/image/video/tts/asr

LOBE-5711

- `lh generate text <prompt>` — LLM text completion with SSE streaming
  - Supports --model (provider/model format), --system, --temperature, --pipe
- `lh generate image <prompt>` — Image generation via async task
- `lh generate video <prompt>` — Video generation via async task
- `lh generate tts <text>` — Text-to-speech (openai/microsoft/edge backends)
- `lh generate asr <file>` — Speech-to-text via OpenAI Whisper
- `lh generate status` — Check async generation task status
- `lh generate list` — List generation topics
- Add shared HTTP auth helper (api/http.ts) for webapi endpoints

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* update info

* ♻️ refactor(cli): split generate command into submodules, text defaults non-streaming

- Split monolithic generate.ts into generate/{index,text,image,video,tts,asr}.ts
- Text subcommand now defaults to non-streaming (use --stream to opt in)
- Text subcommand supports --json for full JSON response output
- Video subcommand uses requiredOption for --model and --provider

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* 🐛 fix(cli): read generation data from result.data and add required X-lobe-chat-auth header

Image/video mutations return { success, data: { ... } }, read IDs from data.
WebAPI endpoints require X-lobe-chat-auth (XOR-encrypted) alongside Oidc-Auth.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Arvin Xu
2026-03-08 11:19:01 +08:00
committed by GitHub
parent 2cce103137
commit e67bcb2571
11 changed files with 959 additions and 1 deletions

View File

@@ -1,6 +1,6 @@
{
"name": "@lobehub/cli",
"version": "0.0.1-canary.3",
"version": "0.0.1-canary.5",
"type": "module",
"bin": {
"lh": "./dist/index.js"

49
apps/cli/src/api/http.ts Normal file
View File

@@ -0,0 +1,49 @@
import { getValidToken } from '../auth/refresh';
import { log } from '../utils/logger';
// Must match the server's SECRET_XOR_KEY (src/envs/auth.ts)
const SECRET_XOR_KEY = 'LobeHub · LobeHub';
/**
* XOR-obfuscate a payload and encode as Base64.
* The /webapi/* routes require `X-lobe-chat-auth` with this encoding.
*/
function obfuscatePayloadWithXOR(payload: Record<string, any>): string {
const jsonString = JSON.stringify(payload);
const dataBytes = new TextEncoder().encode(jsonString);
const keyBytes = new TextEncoder().encode(SECRET_XOR_KEY);
const result = new Uint8Array(dataBytes.length);
for (let i = 0; i < dataBytes.length; i++) {
result[i] = dataBytes[i] ^ keyBytes[i % keyBytes.length];
}
return btoa(String.fromCharCode(...result));
}
export interface AuthInfo {
accessToken: string;
/** Headers required for /webapi/* endpoints (includes both X-lobe-chat-auth and Oidc-Auth) */
headers: Record<string, string>;
serverUrl: string;
}
export async function getAuthInfo(): Promise<AuthInfo> {
const result = await getValidToken();
if (!result) {
log.error("No authentication found. Run 'lh login' first.");
process.exit(1);
}
const { serverUrl, accessToken } = result!.credentials;
return {
accessToken,
headers: {
'Content-Type': 'application/json',
'Oidc-Auth': accessToken,
'X-lobe-chat-auth': obfuscatePayloadWithXOR({}),
},
serverUrl: serverUrl.replace(/\/$/, ''),
};
}

View File

@@ -0,0 +1,372 @@
import { Command } from 'commander';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { log } from '../utils/logger';
import { registerGenerateCommand } from './generate';
const { mockTrpcClient } = vi.hoisted(() => ({
mockTrpcClient: {
generation: {
getGenerationStatus: { query: vi.fn() },
},
generationTopic: {
createTopic: { mutate: vi.fn() },
getAllGenerationTopics: { query: vi.fn() },
},
image: {
createImage: { mutate: vi.fn() },
},
video: {
createVideo: { mutate: vi.fn() },
},
},
}));
const { getTrpcClient: mockGetTrpcClient } = vi.hoisted(() => ({
getTrpcClient: vi.fn(),
}));
const { getAuthInfo: mockGetAuthInfo } = vi.hoisted(() => ({
getAuthInfo: vi.fn(),
}));
const { writeFileSync: mockWriteFileSync } = vi.hoisted(() => ({
writeFileSync: vi.fn(),
}));
vi.mock('../api/client', () => ({ getTrpcClient: mockGetTrpcClient }));
vi.mock('../api/http', () => ({ getAuthInfo: mockGetAuthInfo }));
vi.mock('node:fs', async (importOriginal) => {
const actual: Record<string, unknown> = await importOriginal();
return { ...actual, writeFileSync: mockWriteFileSync };
});
vi.mock('../utils/logger', () => ({
log: { debug: vi.fn(), error: vi.fn(), info: vi.fn(), warn: vi.fn() },
setVerbose: vi.fn(),
}));
describe('generate command', () => {
let exitSpy: ReturnType<typeof vi.spyOn>;
let consoleSpy: ReturnType<typeof vi.spyOn>;
let stdoutSpy: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
exitSpy = vi.spyOn(process, 'exit').mockImplementation((() => {}) as any);
consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
stdoutSpy = vi.spyOn(process.stdout, 'write').mockImplementation(() => true);
mockGetTrpcClient.mockResolvedValue(mockTrpcClient);
mockGetAuthInfo.mockResolvedValue({
accessToken: 'test-token',
headers: {
'Content-Type': 'application/json',
'Oidc-Auth': 'test-token',
'X-lobe-chat-auth': 'test-xor-token',
},
serverUrl: 'https://app.lobehub.com',
});
for (const router of Object.values(mockTrpcClient)) {
for (const method of Object.values(router)) {
for (const fn of Object.values(method)) {
(fn as ReturnType<typeof vi.fn>).mockReset();
}
}
}
});
afterEach(() => {
exitSpy.mockRestore();
consoleSpy.mockRestore();
stdoutSpy.mockRestore();
vi.restoreAllMocks();
});
function createProgram() {
const program = new Command();
program.exitOverride();
registerGenerateCommand(program);
return program;
}
describe('text', () => {
it('should default to non-streaming and output plain text', async () => {
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
json: vi.fn().mockResolvedValue({
choices: [{ message: { content: 'Response text' } }],
}),
ok: true,
}),
);
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'text', 'Hello']);
// Should send stream: false by default
const fetchCall = vi.mocked(fetch).mock.calls[0];
const body = JSON.parse(fetchCall[1]!.body as string);
expect(body.stream).toBe(false);
expect(stdoutSpy).toHaveBeenCalledWith('Response text');
});
it('should output JSON when --json is used', async () => {
const responseBody = {
choices: [{ message: { content: 'Hello' } }],
model: 'gpt-4o-mini',
usage: { completion_tokens: 5, prompt_tokens: 10 },
};
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
json: vi.fn().mockResolvedValue(responseBody),
ok: true,
}),
);
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'text', 'Hello', '--json']);
expect(consoleSpy).toHaveBeenCalledWith(JSON.stringify(responseBody, null, 2));
});
it('should stream when --stream is explicitly passed', async () => {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
controller.enqueue(
encoder.encode('data: {"choices":[{"delta":{"content":"Hello"}}]}\n\n'),
);
controller.enqueue(encoder.encode('data: [DONE]\n\n'));
controller.close();
},
});
vi.stubGlobal('fetch', vi.fn().mockResolvedValue({ body: stream, ok: true }));
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'text', 'Hi', '--stream']);
const fetchCall = vi.mocked(fetch).mock.calls[0];
const body = JSON.parse(fetchCall[1]!.body as string);
expect(body.stream).toBe(true);
expect(stdoutSpy).toHaveBeenCalledWith('Hello');
});
it('should parse provider from model string', async () => {
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
json: vi.fn().mockResolvedValue({
choices: [{ message: { content: 'ok' } }],
}),
ok: true,
}),
);
const program = createProgram();
await program.parseAsync([
'node',
'test',
'generate',
'text',
'Hi',
'--model',
'anthropic/claude-3-haiku',
]);
expect(fetch).toHaveBeenCalledWith(
'https://app.lobehub.com/webapi/chat/anthropic',
expect.any(Object),
);
});
it('should exit on error response', async () => {
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
ok: false,
status: 500,
text: vi.fn().mockResolvedValue('Internal error'),
}),
);
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'text', 'fail']);
expect(log.error).toHaveBeenCalledWith(expect.stringContaining('500'));
expect(exitSpy).toHaveBeenCalledWith(1);
});
});
describe('image', () => {
it('should create image generation', async () => {
mockTrpcClient.generationTopic.createTopic.mutate.mockResolvedValue('topic-1');
mockTrpcClient.image.createImage.mutate.mockResolvedValue({
data: {
batch: { id: 'batch-1' },
generations: [{ asyncTaskId: 'task-1', id: 'gen-1' }],
},
success: true,
});
const program = createProgram();
await program.parseAsync([
'node',
'test',
'generate',
'image',
'a cute cat',
'--model',
'dall-e-3',
'--provider',
'openai',
]);
expect(mockTrpcClient.generationTopic.createTopic.mutate).toHaveBeenCalledWith({
type: 'image',
});
expect(mockTrpcClient.image.createImage.mutate).toHaveBeenCalledWith(
expect.objectContaining({
generationTopicId: 'topic-1',
model: 'dall-e-3',
params: { prompt: 'a cute cat' },
provider: 'openai',
}),
);
expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('Image generation started'));
});
});
describe('video', () => {
it('should create video generation', async () => {
mockTrpcClient.generationTopic.createTopic.mutate.mockResolvedValue('topic-2');
mockTrpcClient.video.createVideo.mutate.mockResolvedValue({
data: { generationId: 'gen-v1' },
success: true,
});
const program = createProgram();
await program.parseAsync([
'node',
'test',
'generate',
'video',
'a dancing cat',
'--model',
'gen-3',
'--provider',
'runway',
]);
expect(mockTrpcClient.video.createVideo.mutate).toHaveBeenCalledWith(
expect.objectContaining({
generationTopicId: 'topic-2',
model: 'gen-3',
params: { prompt: 'a dancing cat' },
provider: 'runway',
}),
);
expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('Video generation started'));
});
});
describe('tts', () => {
it('should call TTS endpoint and save file', async () => {
const audioBuffer = new ArrayBuffer(100);
vi.stubGlobal(
'fetch',
vi.fn().mockResolvedValue({
arrayBuffer: vi.fn().mockResolvedValue(audioBuffer),
ok: true,
}),
);
const program = createProgram();
await program.parseAsync([
'node',
'test',
'generate',
'tts',
'Hello world',
'--output',
'/tmp/test.mp3',
]);
expect(fetch).toHaveBeenCalledWith(
'https://app.lobehub.com/webapi/tts/openai',
expect.objectContaining({ method: 'POST' }),
);
expect(mockWriteFileSync).toHaveBeenCalledWith('/tmp/test.mp3', expect.any(Buffer));
expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('Audio saved'));
});
it('should reject invalid backend', async () => {
const program = createProgram();
await program.parseAsync([
'node',
'test',
'generate',
'tts',
'Hello',
'--backend',
'invalid',
]);
expect(log.error).toHaveBeenCalledWith(expect.stringContaining('Invalid backend'));
expect(exitSpy).toHaveBeenCalledWith(1);
});
});
describe('asr', () => {
it('should exit when file not found', async () => {
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'asr', '/nonexistent/audio.mp3']);
expect(log.error).toHaveBeenCalledWith(expect.stringContaining('not found'));
expect(exitSpy).toHaveBeenCalledWith(1);
});
});
describe('status', () => {
it('should show generation status', async () => {
mockTrpcClient.generation.getGenerationStatus.query.mockResolvedValue({
generation: { asset: { url: 'https://example.com/image.png' }, id: 'gen-1' },
status: 'success',
});
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'status', 'gen-1', 'task-1']);
expect(mockTrpcClient.generation.getGenerationStatus.query).toHaveBeenCalledWith({
asyncTaskId: 'task-1',
generationId: 'gen-1',
});
expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('success'));
});
});
describe('list', () => {
it('should list generation topics', async () => {
mockTrpcClient.generationTopic.getAllGenerationTopics.query.mockResolvedValue([
{ id: 't1', title: 'My Images', type: 'image', updatedAt: new Date().toISOString() },
]);
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'list']);
expect(consoleSpy).toHaveBeenCalledTimes(2);
expect(consoleSpy.mock.calls[0][0]).toContain('ID');
});
it('should show message when empty', async () => {
mockTrpcClient.generationTopic.getAllGenerationTopics.query.mockResolvedValue([]);
const program = createProgram();
await program.parseAsync(['node', 'test', 'generate', 'list']);
expect(consoleSpy).toHaveBeenCalledWith('No generation topics found.');
});
});
});

View File

@@ -0,0 +1,77 @@
import { createReadStream, existsSync } from 'node:fs';
import path from 'node:path';
import type { Command } from 'commander';
import { getAuthInfo } from '../../api/http';
import { log } from '../../utils/logger';
export function registerAsrCommand(parent: Command) {
parent
.command('asr <audio-file>')
.description('Convert speech to text (automatic speech recognition)')
.option('--model <model>', 'STT model', 'whisper-1')
.option('--language <lang>', 'Language code (e.g. en, zh)')
.option('--json', 'Output raw JSON')
.action(
async (
audioFile: string,
options: {
json?: boolean;
language?: string;
model: string;
},
) => {
if (!existsSync(audioFile)) {
log.error(`File not found: ${audioFile}`);
process.exit(1);
return;
}
const { serverUrl, headers } = await getAuthInfo();
const sttOptions: Record<string, any> = { model: options.model };
if (options.language) sttOptions.language = options.language;
const formData = new FormData();
const fileBuffer = await readFileAsBlob(audioFile);
formData.append('speech', fileBuffer, path.basename(audioFile));
formData.append('options', JSON.stringify(sttOptions));
// Remove Content-Type for multipart/form-data (let fetch set it with boundary)
const { 'Content-Type': _, ...formHeaders } = headers;
const res = await fetch(`${serverUrl}/webapi/stt/openai`, {
body: formData,
headers: formHeaders,
method: 'POST',
});
if (!res.ok) {
const errText = await res.text();
log.error(`ASR failed: ${res.status} ${errText}`);
process.exit(1);
return;
}
const result = await res.json();
if (options.json) {
console.log(JSON.stringify(result, null, 2));
} else {
const text = (result as any).text || JSON.stringify(result);
process.stdout.write(text);
process.stdout.write('\n');
}
},
);
}
async function readFileAsBlob(filePath: string): Promise<Blob> {
const chunks: Uint8Array[] = [];
const stream = createReadStream(filePath);
for await (const chunk of stream) {
chunks.push(chunk as Uint8Array);
}
return new Blob(chunks);
}

View File

@@ -0,0 +1,76 @@
import type { Command } from 'commander';
import pc from 'picocolors';
import { getTrpcClient } from '../../api/client';
export function registerImageCommand(parent: Command) {
parent
.command('image <prompt>')
.description('Generate an image from text')
.option('-m, --model <model>', 'Model ID', 'dall-e-3')
.option('-p, --provider <provider>', 'Provider name', 'openai')
.option('-n, --num <n>', 'Number of images', '1')
.option('--width <px>', 'Width in pixels')
.option('--height <px>', 'Height in pixels')
.option('--steps <n>', 'Number of steps')
.option('--seed <n>', 'Random seed')
.option('--json', 'Output raw JSON')
.action(
async (
prompt: string,
options: {
height?: string;
json?: boolean;
model: string;
num: string;
provider: string;
seed?: string;
steps?: string;
width?: string;
},
) => {
const client = await getTrpcClient();
// Create a generation topic first
const topicId = await client.generationTopic.createTopic.mutate({ type: 'image' });
const params: Record<string, any> = { prompt };
if (options.width) params.width = Number.parseInt(options.width, 10);
if (options.height) params.height = Number.parseInt(options.height, 10);
if (options.steps) params.steps = Number.parseInt(options.steps, 10);
if (options.seed) params.seed = Number.parseInt(options.seed, 10);
const result = await client.image.createImage.mutate({
generationTopicId: topicId as string,
imageNum: Number.parseInt(options.num, 10),
model: options.model,
params,
provider: options.provider,
});
const r = result as any;
if (options.json) {
console.log(JSON.stringify(r, null, 2));
return;
}
const data = r.data || r;
console.log(`${pc.green('✓')} Image generation started`);
if (data.batch?.id) console.log(` Batch ID: ${pc.bold(data.batch.id)}`);
const generations = data.generations || [];
if (generations.length > 0) {
console.log(` ${generations.length} image(s) queued`);
for (const gen of generations) {
if (gen.asyncTaskId) {
console.log(` Generation ${pc.bold(gen.id)} → Task ${pc.dim(gen.asyncTaskId)}`);
}
}
console.log();
console.log(
pc.dim('Use "lh generate status <generationId> <taskId>" to check progress.'),
);
}
},
);
}

View File

@@ -0,0 +1,104 @@
import type { Command } from 'commander';
import pc from 'picocolors';
import { getTrpcClient } from '../../api/client';
import { outputJson, printTable, timeAgo, truncate } from '../../utils/format';
import { registerAsrCommand } from './asr';
import { registerImageCommand } from './image';
import { registerTextCommand } from './text';
import { registerTtsCommand } from './tts';
import { registerVideoCommand } from './video';
export function registerGenerateCommand(program: Command) {
const generate = program
.command('generate')
.alias('gen')
.description('Generate content (text, image, video, speech)');
registerTextCommand(generate);
registerImageCommand(generate);
registerVideoCommand(generate);
registerTtsCommand(generate);
registerAsrCommand(generate);
// ── status ──────────────────────────────────────────
generate
.command('status <generationId> <taskId>')
.description('Check generation task status')
.option('--json', 'Output raw JSON')
.action(async (generationId: string, taskId: string, options: { json?: boolean }) => {
const client = await getTrpcClient();
const result = await client.generation.getGenerationStatus.query({
asyncTaskId: taskId,
generationId,
});
if (options.json) {
console.log(JSON.stringify(result, null, 2));
return;
}
const r = result as any;
console.log(`Status: ${colorStatus(r.status)}`);
if (r.error) {
console.log(`Error: ${pc.red(r.error.message || JSON.stringify(r.error))}`);
}
if (r.generation) {
const gen = r.generation;
console.log(` ID: ${gen.id}`);
if (gen.asset?.url) console.log(` URL: ${gen.asset.url}`);
if (gen.asset?.thumbnailUrl) console.log(` Thumb: ${gen.asset.thumbnailUrl}`);
}
});
// ── list ────────────────────────────────────────────
generate
.command('list')
.description('List generation topics')
.option('--json [fields]', 'Output JSON, optionally specify fields (comma-separated)')
.action(async (options: { json?: string | boolean }) => {
const client = await getTrpcClient();
const result = await client.generationTopic.getAllGenerationTopics.query();
const items = Array.isArray(result) ? result : [];
if (options.json !== undefined) {
const fields = typeof options.json === 'string' ? options.json : undefined;
outputJson(items, fields);
return;
}
if (items.length === 0) {
console.log('No generation topics found.');
return;
}
const rows = items.map((t: any) => [
t.id || '',
truncate(t.title || 'Untitled', 40),
t.type || '',
t.updatedAt ? timeAgo(t.updatedAt) : '',
]);
printTable(rows, ['ID', 'TITLE', 'TYPE', 'UPDATED']);
});
}
export function colorStatus(status: string): string {
switch (status) {
case 'success': {
return pc.green(status);
}
case 'error': {
return pc.red(status);
}
case 'processing': {
return pc.yellow(status);
}
case 'pending': {
return pc.cyan(status);
}
default: {
return status;
}
}
}

View File

@@ -0,0 +1,146 @@
import type { Command } from 'commander';
import { getAuthInfo } from '../../api/http';
import { log } from '../../utils/logger';
export function registerTextCommand(parent: Command) {
parent
.command('text <prompt>')
.description('Generate text with an LLM (single completion, no tools)')
.option('-m, --model <model>', 'Model ID (provider/model format)', 'openai/gpt-4o-mini')
.option('-p, --provider <provider>', 'Provider name (derived from model if omitted)')
.option('-s, --system <prompt>', 'System prompt')
.option('--temperature <n>', 'Temperature (0-2)')
.option('--max-tokens <n>', 'Maximum output tokens')
.option('--stream', 'Enable streaming (SSE, renders incrementally)')
.option('--json', 'Output full JSON response')
.option('--pipe', 'Pipe mode: read additional context from stdin')
.action(
async (
prompt: string,
options: {
json?: boolean;
maxTokens?: string;
model: string;
pipe?: boolean;
provider?: string;
stream?: boolean;
system?: string;
temperature?: string;
},
) => {
// Resolve provider from model if not specified
const parts = options.model.split('/');
const provider = options.provider || (parts.length > 1 ? parts[0] : 'openai');
const model = parts.length > 1 ? parts.slice(1).join('/') : options.model;
// Read additional input from stdin if --pipe
let fullPrompt = prompt;
if (options.pipe) {
const chunks: Buffer[] = [];
for await (const chunk of process.stdin) {
chunks.push(chunk as Buffer);
}
const stdinContent = Buffer.concat(chunks).toString('utf8').trim();
if (stdinContent) {
fullPrompt = `${prompt}\n\n${stdinContent}`;
}
}
const messages: Array<{ content: string; role: string }> = [];
if (options.system) {
messages.push({ content: options.system, role: 'system' });
}
messages.push({ content: fullPrompt, role: 'user' });
const useStream = options.stream === true;
const payload: Record<string, any> = {
messages,
model,
stream: useStream,
};
if (options.temperature) payload.temperature = Number.parseFloat(options.temperature);
if (options.maxTokens) payload.max_tokens = Number.parseInt(options.maxTokens, 10);
const { serverUrl, headers } = await getAuthInfo();
const res = await fetch(`${serverUrl}/webapi/chat/${provider}`, {
body: JSON.stringify(payload),
headers,
method: 'POST',
});
if (!res.ok) {
const text = await res.text();
log.error(`Text generation failed: ${res.status} ${text}`);
process.exit(1);
return;
}
if (!useStream) {
const body = await res.json();
if (options.json) {
console.log(JSON.stringify(body, null, 2));
} else {
const content = (body as any).choices?.[0]?.message?.content || JSON.stringify(body);
process.stdout.write(content);
process.stdout.write('\n');
}
return;
}
// Stream SSE response
if (!res.body) {
log.error('No response body received');
process.exit(1);
return;
}
await streamSSEResponse(res.body, options.json);
},
);
}
async function streamSSEResponse(body: ReadableStream<Uint8Array>, json?: boolean): Promise<void> {
const reader = body.getReader();
const decoder = new TextDecoder();
let buffer = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (!line.startsWith('data:')) continue;
const data = line.slice(5).trim();
if (data === '[DONE]') {
if (!json) process.stdout.write('\n');
return;
}
try {
const parsed = JSON.parse(data);
if (json) {
console.log(JSON.stringify(parsed));
} else {
const content = parsed.choices?.[0]?.delta?.content;
if (content) process.stdout.write(content);
}
} catch {
// Not JSON, might be raw text chunk
if (!json) process.stdout.write(data);
}
}
}
// Final newline
if (!json) process.stdout.write('\n');
} finally {
reader.releaseLock();
}
}

View File

@@ -0,0 +1,69 @@
import { writeFileSync } from 'node:fs';
import type { Command } from 'commander';
import pc from 'picocolors';
import { getAuthInfo } from '../../api/http';
import { log } from '../../utils/logger';
export function registerTtsCommand(parent: Command) {
parent
.command('tts <text>')
.description('Convert text to speech')
.option('-o, --output <file>', 'Output audio file path', 'output.mp3')
.option('--voice <voice>', 'Voice name', 'alloy')
.option('--speed <n>', 'Speed multiplier (0.25-4.0)', '1')
.option('--model <model>', 'TTS model', 'tts-1')
.option('--backend <backend>', 'TTS backend: openai, microsoft, edge', 'openai')
.action(
async (
text: string,
options: {
backend: string;
model: string;
output: string;
speed: string;
voice: string;
},
) => {
const backends = ['openai', 'microsoft', 'edge'];
if (!backends.includes(options.backend)) {
log.error(`Invalid backend. Must be one of: ${backends.join(', ')}`);
process.exit(1);
return;
}
const { serverUrl, headers } = await getAuthInfo();
const payload: Record<string, any> = {
input: text,
model: options.model,
options: {
model: options.model,
voice: options.voice,
},
speed: Number.parseFloat(options.speed),
voice: options.voice,
};
const res = await fetch(`${serverUrl}/webapi/tts/${options.backend}`, {
body: JSON.stringify(payload),
headers,
method: 'POST',
});
if (!res.ok) {
const errText = await res.text();
log.error(`TTS failed: ${res.status} ${errText}`);
process.exit(1);
return;
}
const buffer = Buffer.from(await res.arrayBuffer());
writeFileSync(options.output, buffer);
console.log(
`${pc.green('✓')} Audio saved to ${pc.bold(options.output)} (${Math.round(buffer.length / 1024)}KB)`,
);
},
);
}

View File

@@ -0,0 +1,62 @@
import type { Command } from 'commander';
import pc from 'picocolors';
import { getTrpcClient } from '../../api/client';
export function registerVideoCommand(parent: Command) {
parent
.command('video <prompt>')
.description('Generate a video from text')
.requiredOption('-m, --model <model>', 'Model ID')
.requiredOption('-p, --provider <provider>', 'Provider name')
.option('--aspect-ratio <ratio>', 'Aspect ratio (e.g. 16:9)')
.option('--duration <sec>', 'Duration in seconds')
.option('--resolution <res>', 'Resolution (e.g. 720p, 1080p)')
.option('--seed <n>', 'Random seed')
.option('--json', 'Output raw JSON')
.action(
async (
prompt: string,
options: {
aspectRatio?: string;
duration?: string;
json?: boolean;
model: string;
provider: string;
resolution?: string;
seed?: string;
},
) => {
const client = await getTrpcClient();
const topicId = await client.generationTopic.createTopic.mutate({ type: 'video' });
const params: Record<string, any> = { prompt };
if (options.aspectRatio) params.aspectRatio = options.aspectRatio;
if (options.duration) params.duration = Number.parseInt(options.duration, 10);
if (options.resolution) params.resolution = options.resolution;
if (options.seed) params.seed = Number.parseInt(options.seed, 10);
const result = await client.video.createVideo.mutate({
generationTopicId: topicId as string,
model: options.model,
params,
provider: options.provider,
});
const r = result as any;
if (options.json) {
console.log(JSON.stringify(r, null, 2));
return;
}
const data = r.data || r;
console.log(`${pc.green('✓')} Video generation started`);
if (data.generationId) {
console.log(` Generation ID: ${pc.bold(data.generationId)}`);
}
console.log(
pc.dim('Video generation runs asynchronously. Check status or wait for notification.'),
);
},
);
}

View File

@@ -5,6 +5,7 @@ import { registerConfigCommand } from './commands/config';
import { registerConnectCommand } from './commands/connect';
import { registerDocCommand } from './commands/doc';
import { registerFileCommand } from './commands/file';
import { registerGenerateCommand } from './commands/generate';
import { registerKbCommand } from './commands/kb';
import { registerLoginCommand } from './commands/login';
import { registerLogoutCommand } from './commands/logout';
@@ -34,6 +35,7 @@ registerSearchCommand(program);
registerKbCommand(program);
registerMemoryCommand(program);
registerAgentCommand(program);
registerGenerateCommand(program);
registerFileCommand(program);
registerSkillCommand(program);
registerTopicCommand(program);

View File

@@ -58,6 +58,7 @@ export const defaultClients: ClientMetadata[] = [
client_id: 'lobehub-cli',
client_name: 'LobeHub CLI',
grant_types: ['urn:ietf:params:oauth:grant-type:device_code', 'refresh_token'],
logo_uri: 'https://hub-apac-1.lobeobjects.space/lobehub-desktop-icon.png',
response_types: [],
token_endpoint_auth_method: 'none',
},