openai.json

  1{
  2  "name": "OpenAI",
  3  "id": "openai",
  4  "type": "openai",
  5  "api_key": "$OPENAI_API_KEY",
  6  "api_endpoint": "$OPENAI_API_ENDPOINT",
  7  "default_large_model_id": "o4-mini",
  8  "default_small_model_id": "gpt-4o",
  9  "models": [
 10    {
 11      "id": "o4-mini",
 12      "model": "o4 Mini",
 13      "cost_per_1m_in": 1.1,
 14      "cost_per_1m_out": 4.4,
 15      "cost_per_1m_in_cached": 0,
 16      "cost_per_1m_out_cached": 0.275,
 17      "context_window": 200000,
 18      "default_max_tokens": 50000,
 19      "can_reason": true,
 20      "has_reasoning_effort": true,
 21      "default_reasoning_effort": "medium",
 22      "supports_attachments": true
 23    },
 24    {
 25      "id": "o3",
 26      "model": "o3",
 27      "cost_per_1m_in": 2,
 28      "cost_per_1m_out": 8,
 29      "cost_per_1m_in_cached": 0,
 30      "cost_per_1m_out_cached": 0.5,
 31      "context_window": 200000,
 32      "default_max_tokens": 50000,
 33      "can_reason": true,
 34      "has_reasoning_effort": true,
 35      "default_reasoning_effort": "medium",
 36      "supports_attachments": true
 37    },
 38    {
 39      "id": "o3-pro",
 40      "model": "o3 Pro",
 41      "cost_per_1m_in": 20,
 42      "cost_per_1m_out": 80,
 43      "cost_per_1m_in_cached": 0,
 44      "cost_per_1m_out_cached": 0,
 45      "context_window": 200000,
 46      "default_max_tokens": 50000,
 47      "can_reason": true,
 48      "has_reasoning_effort": true,
 49      "default_reasoning_effort": "medium",
 50      "supports_attachments": true
 51    },
 52    {
 53      "id": "gpt-4.1",
 54      "model": "GPT-4.1",
 55      "cost_per_1m_in": 2,
 56      "cost_per_1m_out": 8,
 57      "cost_per_1m_in_cached": 0,
 58      "cost_per_1m_out_cached": 0.5,
 59      "context_window": 1047576,
 60      "default_max_tokens": 50000,
 61      "can_reason": false,
 62      "supports_attachments": true
 63    },
 64    {
 65      "id": "gpt-4.1-mini",
 66      "model": "GPT-4.1 Mini",
 67      "cost_per_1m_in": 0.39999999999999997,
 68      "cost_per_1m_out": 1.5999999999999999,
 69      "cost_per_1m_in_cached": 0,
 70      "cost_per_1m_out_cached": 0.09999999999999999,
 71      "context_window": 1047576,
 72      "default_max_tokens": 50000,
 73      "can_reason": false,
 74      "supports_attachments": true
 75    },
 76    {
 77      "id": "gpt-4.1-nano",
 78      "model": "GPT-4.1 Nano",
 79      "cost_per_1m_in": 0.09999999999999999,
 80      "cost_per_1m_out": 0.39999999999999997,
 81      "cost_per_1m_in_cached": 0,
 82      "cost_per_1m_out_cached": 0.024999999999999998,
 83      "context_window": 1047576,
 84      "default_max_tokens": 50000,
 85      "can_reason": false,
 86      "supports_attachments": true
 87    },
 88    {
 89      "id": "gpt-4.5-preview",
 90      "model": "GPT-4.5 (Preview)",
 91      "cost_per_1m_in": 75,
 92      "cost_per_1m_out": 150,
 93      "cost_per_1m_in_cached": 0,
 94      "cost_per_1m_out_cached": 37.5,
 95      "context_window": 128000,
 96      "default_max_tokens": 50000,
 97      "can_reason": false,
 98      "supports_attachments": true
 99    },
100    {
101      "id": "o3-mini",
102      "model": "o3 Mini",
103      "cost_per_1m_in": 1.1,
104      "cost_per_1m_out": 4.4,
105      "cost_per_1m_in_cached": 0,
106      "cost_per_1m_out_cached": 0.55,
107      "context_window": 200000,
108      "default_max_tokens": 50000,
109      "can_reason": true,
110      "has_reasoning_effort": true,
111      "default_reasoning_effort": "medium",
112      "supports_attachments": false
113    },
114    {
115      "id": "gpt-4o",
116      "model": "GPT-4o",
117      "cost_per_1m_in": 2.5,
118      "cost_per_1m_out": 10,
119      "cost_per_1m_in_cached": 0,
120      "cost_per_1m_out_cached": 1.25,
121      "context_window": 128000,
122      "default_max_tokens": 15000,
123      "can_reason": false,
124      "supports_attachments": true
125    },
126    {
127      "id": "gpt-4o-mini",
128      "model": "GPT-4o-mini",
129      "cost_per_1m_in": 0.15,
130      "cost_per_1m_out": 0.6,
131      "cost_per_1m_in_cached": 0,
132      "cost_per_1m_out_cached": 0.075,
133      "context_window": 128000,
134      "default_max_tokens": 14000,
135      "can_reason": false,
136      "reasoning_effort": "",
137      "supports_attachments": true
138    }
139  ]
140}