From e5069994ba13afcf64e84c22099686fb38700a10 Mon Sep 17 00:00:00 2001 From: Miguel Medina Date: Tue, 23 Dec 2025 22:03:53 -0600 Subject: [PATCH 1/3] feat: document fireworks.ai models --- .../fireworks/models/deepseek-v3p2.toml | 23 +++++++++++++++++++ .../accounts/fireworks/models/glm-4p6.toml | 23 +++++++++++++++++++ .../accounts/fireworks/models/glm-4p7.toml | 23 +++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml create mode 100644 providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml create mode 100644 providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml b/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml new file mode 100644 index 00000000..6c7cd698 --- /dev/null +++ b/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml @@ -0,0 +1,23 @@ +name = "DeepSeek V3.2" +family = "deepseek-v3" +release_date = "2025-12-01" +last_updated = "2025-12-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2025-09" +open_weights = true + +[cost] +input = 0.56 +output = 1.68 +cache_read = 0.28 + +[limit] +context = 160_000 +output = 160_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml new file mode 100644 index 00000000..c2ac8247 --- /dev/null +++ b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml @@ -0,0 +1,23 @@ +name = "GLM 4.6" +family = "glm-4" +release_date = "2025-10-01" +last_updated = "2025-10-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2025-04" +open_weights = true + +[cost] +input = 0.55 +output = 2.19 +cache_read = 0.28 + +[limit] +context = 198_000 +output = 198_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml new file mode 100644 index 00000000..b0569b24 --- /dev/null +++ b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml @@ -0,0 +1,23 @@ +name = "GLM 4.7" +family = "glm-4" +release_date = "2025-12-22" +last_updated = "2025-12-22" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2025-04" +open_weights = true + +[cost] +input = 0.60 +output = 2.20 +cache_read = 0.30 + +[limit] +context = 198_000 +output = 198_000 + +[modalities] +input = ["text"] +output = ["text"] From 4eb339327eba3778f21d837dbcce5039037b5f2c Mon Sep 17 00:00:00 2001 From: Miguel Medina Date: Tue, 23 Dec 2025 22:58:13 -0600 Subject: [PATCH 2/3] fix: document interleaved thinking --- .../models/accounts/fireworks/models/deepseek-v3p2.toml | 3 +++ .../fireworks-ai/models/accounts/fireworks/models/glm-4p5.toml | 3 +++ .../fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml | 3 +++ .../fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml | 3 +++ .../models/accounts/fireworks/models/kimi-k2-thinking.toml | 3 +++ .../models/accounts/fireworks/models/minimax-m2.toml | 3 +++ 6 files changed, 18 insertions(+) diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml b/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml index 6c7cd698..3f43b818 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/deepseek-v3p2.toml @@ -21,3 +21,6 @@ output = 160_000 [modalities] input = ["text"] output = ["text"] + +[interleaved] +field = "reasoning_content" \ No newline at end of file diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p5.toml b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p5.toml index cb368369..67546aa5 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p5.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p5.toml @@ -20,3 +20,6 @@ output = 131_072 [modalities] input = ["text"] output = ["text"] + +[interleaved] +field = "reasoning_content" \ No newline at end of file diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml index c2ac8247..4936aecd 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p6.toml @@ -21,3 +21,6 @@ output = 198_000 [modalities] input = ["text"] output = ["text"] + +[interleaved] +field = "reasoning_content" \ No newline at end of file diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml index b0569b24..4ff8274f 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/glm-4p7.toml @@ -21,3 +21,6 @@ output = 198_000 [modalities] input = ["text"] output = ["text"] + +[interleaved] +field = "reasoning_content" \ No newline at end of file diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml b/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml index 62fbd260..0c969ba8 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml @@ -19,3 +19,6 @@ output = 16_384 [modalities] input = ["text"] output = ["text"] + +[interleaved] +field = "reasoning_content" \ No newline at end of file diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml b/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml index f5f5f726..1ad0103c 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml @@ -20,3 +20,6 @@ output = 16_384 [modalities] input = ["text"] output = ["text"] + +[interleaved] +field = "reasoning_content" From bc6981debc59cd217f9336075349f0c6db0f7ac7 Mon Sep 17 00:00:00 2001 From: Miguel Medina Date: Tue, 23 Dec 2025 23:01:31 -0600 Subject: [PATCH 3/3] fix: increase output token limit 16_384 seems to be the ui limit --- .../models/accounts/fireworks/models/kimi-k2-thinking.toml | 2 +- .../models/accounts/fireworks/models/minimax-m2.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml b/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml index 0c969ba8..8f6d6632 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/kimi-k2-thinking.toml @@ -14,7 +14,7 @@ output = 2.50 [limit] context = 256_000 -output = 16_384 +output = 256_000 [modalities] input = ["text"] diff --git a/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml b/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml index 1ad0103c..3382161f 100644 --- a/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml +++ b/providers/fireworks-ai/models/accounts/fireworks/models/minimax-m2.toml @@ -15,7 +15,7 @@ output = 1.20 [limit] context = 128_000 -output = 16_384 +output = 128_000 [modalities] input = ["text"]