chore: adjust some openai cached token costs

Andrey Nering created

Change summary

internal/providers/configs/openai.json | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

Detailed changes

internal/providers/configs/openai.json 🔗

@@ -26,8 +26,8 @@
       "name": "GPT-5.1",
       "cost_per_1m_in": 1.25,
       "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0.13,
-      "cost_per_1m_out_cached": 0.13,
+      "cost_per_1m_in_cached": 0.125,
+      "cost_per_1m_out_cached": 0.125,
       "context_window": 400000,
       "default_max_tokens": 128000,
       "can_reason": true,
@@ -40,8 +40,8 @@
       "name": "GPT-5.1 Codex",
       "cost_per_1m_in": 1.25,
       "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0.25,
-      "cost_per_1m_out_cached": 0.25,
+      "cost_per_1m_in_cached": 0.125,
+      "cost_per_1m_out_cached": 0.125,
       "context_window": 400000,
       "default_max_tokens": 128000,
       "can_reason": true,
@@ -54,8 +54,8 @@
       "name": "GPT-5.1 Codex Mini",
       "cost_per_1m_in": 0.25,
       "cost_per_1m_out": 2,
-      "cost_per_1m_in_cached": 0.03,
-      "cost_per_1m_out_cached": 0.03,
+      "cost_per_1m_in_cached": 0.025,
+      "cost_per_1m_out_cached": 0.025,
       "context_window": 400000,
       "default_max_tokens": 128000,
       "can_reason": true,
@@ -68,8 +68,8 @@
       "name": "GPT-5 Codex",
       "cost_per_1m_in": 1.25,
       "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0.25,
-      "cost_per_1m_out_cached": 0.25,
+      "cost_per_1m_in_cached": 0.125,
+      "cost_per_1m_out_cached": 0.125,
       "context_window": 400000,
       "default_max_tokens": 128000,
       "can_reason": true,
@@ -82,8 +82,8 @@
       "name": "GPT-5",
       "cost_per_1m_in": 1.25,
       "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0.13,
-      "cost_per_1m_out_cached": 0.13,
+      "cost_per_1m_in_cached": 0.125,
+      "cost_per_1m_out_cached": 0.125,
       "context_window": 400000,
       "default_max_tokens": 128000,
       "can_reason": true,