// Text-to-Image tools — 10 models
import { z } from "zod";
import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { submitAndPoll } from "../poller.js";
import { TOOL_ENDPOINTS, SIZE_MAP } from "../types.js";

export function registerTextToImageTools(server: McpServer): void {
  // ─── Z-Image (default homepage model) ───
  server.tool(
    "zimage_generate",
    "Generate an image using Z-Image (default model, 2 credits). Fast general-purpose text-to-image.",
    {
      prompt: z.string().describe("Text prompt describing the image to generate"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1").describe("Aspect ratio"),
      seed: z.number().int().default(-1).describe("Seed for reproducibility (-1 for random)"),
      output_format: z.enum(["png", "jpeg", "webp"]).default("png").describe("Output image format"),
    },
    async ({ prompt, aspect_ratio, seed, output_format }) => {
      const ep = TOOL_ENDPOINTS.zimage_generate;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        size: SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"],
        seed,
        output_format,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Z-Image Turbo ───
  server.tool(
    "zimage_turbo_t2i",
    "Generate an image using Z-Image Turbo (faster variant). Text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
      seed: z.number().int().default(-1),
      output_format: z.enum(["png", "jpeg", "webp"]).default("png"),
    },
    async ({ prompt, aspect_ratio, seed, output_format }) => {
      const ep = TOOL_ENDPOINTS.zimage_turbo_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        size: SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"],
        seed,
        output_format,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Nano Banana ───
  server.tool(
    "nano_banana_t2i",
    "Generate an image using Nano Banana model (2 credits). Good quality text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
      output_format: z.enum(["png", "jpeg", "webp"]).default("png"),
    },
    async ({ prompt, aspect_ratio, output_format }) => {
      const ep = TOOL_ENDPOINTS.nano_banana_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        output_format,
        aspect_ratio,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Nano Banana Pro ───
  server.tool(
    "nano_banana_pro_t2i",
    "Generate an image using Nano Banana Pro model (2 credits). Higher quality text-to-image with resolution control.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
      resolution: z.enum(["720p", "1080p"]).default("1080p").describe("Output resolution"),
      output_format: z.enum(["png", "jpeg", "webp"]).default("png"),
    },
    async ({ prompt, aspect_ratio, resolution, output_format }) => {
      const ep = TOOL_ENDPOINTS.nano_banana_pro_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        output_format,
        aspect_ratio,
        resolution,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Nano Banana 2 ───
  server.tool(
    "nano_banana_2_t2i",
    "Generate an image using Nano Banana 2 model (2 credits). Latest Nano Banana text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
      resolution: z.enum(["720p", "1080p"]).default("1080p"),
      output_format: z.enum(["png", "jpeg", "webp"]).default("png"),
    },
    async ({ prompt, aspect_ratio, resolution, output_format }) => {
      const ep = TOOL_ENDPOINTS.nano_banana_2_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        output_format,
        aspect_ratio,
        resolution,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Seedream V4.5 ───
  server.tool(
    "seedream_v45_t2i",
    "Generate an image using Seedream V4.5 model (2 credits). ByteDance's high-quality text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
    },
    async ({ prompt, aspect_ratio }) => {
      const ep = TOOL_ENDPOINTS.seedream_v45_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        size: SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"],
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Seedream 5.0 Lite ───
  server.tool(
    "seedream_50_lite_t2i",
    "Generate an image using Seedream 5.0 Lite model (2 credits). Lighter, faster ByteDance text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
    },
    async ({ prompt, aspect_ratio }) => {
      const ep = TOOL_ENDPOINTS.seedream_50_lite_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        size: SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"],
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Flux 2 Klein ───
  server.tool(
    "flux_2_klein_t2i",
    "Generate an image using Flux 2 Klein model (2 credits). Black Forest Labs text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
      seed: z.number().int().default(-1).describe("Seed for reproducibility (-1 for random)"),
    },
    async ({ prompt, aspect_ratio, seed }) => {
      const ep = TOOL_ENDPOINTS.flux_2_klein_t2i;
      // Flux uses "width*height" format
      const size = SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"];
      const [w, h] = size.split("*");
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        size: `${w}*${h}`,
        seed: seed === -1 ? -1 : seed,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── GPT Image 1 ───
  server.tool(
    "gpt_image_1_t2i",
    "Generate an image using GPT Image 1 / DALL-E model (10 credits). OpenAI's text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      quality: z.enum(["auto", "high", "medium", "low"]).default("auto").describe("Image quality"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
    },
    async ({ prompt, quality, aspect_ratio }) => {
      const ep = TOOL_ENDPOINTS.gpt_image_1_t2i;
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        quality,
        size: SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"],
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );

  // ─── Qwen Image ───
  server.tool(
    "qwen_t2i",
    "Generate an image using Qwen Image model (2 credits). Alibaba's text-to-image.",
    {
      prompt: z.string().describe("Text prompt"),
      aspect_ratio: z.enum(["1:1", "16:9", "3:2", "2:3", "3:4", "4:3", "9:16"]).default("1:1"),
      seed: z.number().int().default(-1),
      output_format: z.enum(["png", "jpeg", "webp"]).default("png"),
    },
    async ({ prompt, aspect_ratio, seed, output_format }) => {
      const ep = TOOL_ENDPOINTS.qwen_t2i;
      const size = SIZE_MAP[aspect_ratio] || SIZE_MAP["1:1"];
      const [w, h] = size.split("*");
      const result = await submitAndPoll(ep.endpoint, ep.scenario, {
        prompt: prompt.trim(),
        enable_base64_output: false,
        enable_sync_mode: false,
        size: `${w}*${h}`,
        seed,
        output_format,
      });
      return { content: [{ type: "text", text: `✅ Image generated: ${result.output}\nTask ID: ${result.taskId}` }] };
    },
  );
}
