1{ 2 "chat": { 3 "open-mistral-7b": { 4 "description": "Our very first. A 7B transformer model, fast-deployed and easily customisable. Small, yet very powerful for a variety of use cases. English and code.", 5 "inputTokens": 32000, 6 "inputTokenPrice": 0.25, 7 "outputTokens": 8191, 8 "outputTokenPrice": 0.25 9 }, 10 "open-mixtral-8x7b": { 11 "description": "A 7B sparse Mixture-of-Experts (SMoE). Uses 12B active parameters out of 45B total. Fluent in English, French, Italian, German, Spanish, and strong in code.", 12 "inputTokens": 32000, 13 "inputTokenPrice": 0.7, 14 "outputTokens": 8191, 15 "outputTokenPrice": 0.7 16 }, 17 "open-mixtral-8x22b": { 18 "description": "A 22B sparse Mixture-of-Experts (SMoE). Uses only 39B active parameters out of 141B. Fluent in English, French, Italian, German, Spanish, and strong in code.", 19 "inputTokens": 65336, 20 "inputTokenPrice": 2, 21 "outputTokens": 8191, 22 "outputTokenPrice": 6 23 }, 24 "mistral-small-latest": { 25 "description": "Cost-efficient reasoning for low-latency workloads. Fluent in English, French, Italian, German, Spanish, and strong in code.", 26 "inputTokens": 131072, 27 "inputTokenPrice": 0.06, 28 "outputTokens": 131072, 29 "outputTokenPrice": 0.18 30 }, 31 "mistral-medium-latest": { 32 "description": "Balanced reasoning for a wide range of tasks. Fluent in English, French, Italian, German, Spanish, and strong in code.", 33 "inputTokens": 131072, 34 "inputTokenPrice": 0.4, 35 "outputTokens": 131072, 36 "outputTokenPrice": 2 37 }, 38 "mistral-large-latest": { 39 "description": "Top-tier reasoning for high-complexity tasks. Fluent in English, French, Italian, German, Spanish, and strong in code.", 40 "inputTokens": 262144, 41 "inputTokenPrice": 0.5, 42 "outputTokens": 262144, 43 "outputTokenPrice": 1.5 44 }, 45 "mistral-tiny": { 46 "description": "", 47 "inputTokens": 32000, 48 "inputTokenPrice": 0.25, 49 "outputTokens": 8191, 50 "outputTokenPrice": 0.25 51 }, 52 "mistral-small": { 53 "description": "", 54 "inputTokens": 32000, 55 "inputTokenPrice": 0.1, 56 "outputTokens": 8191, 57 "outputTokenPrice": 0.3 58 }, 59 "mistral-medium": { 60 "description": "", 61 "inputTokens": 32000, 62 "inputTokenPrice": 2.7, 63 "outputTokens": 8191, 64 "outputTokenPrice": 8.1 65 }, 66 "codestral-latest": { 67 "description": "", 68 "inputTokens": 32000, 69 "inputTokenPrice": 1, 70 "outputTokens": 8191, 71 "outputTokenPrice": 3 72 }, 73 "open-mistral-nemo": { 74 "description": "https:\/\/mistral.ai\/technology\/", 75 "inputTokens": 128000, 76 "inputTokenPrice": 0.3, 77 "outputTokens": 128000, 78 "outputTokenPrice": 0.3 79 }, 80 "open-codestral-mamba": { 81 "description": "https:\/\/mistral.ai\/technology\/", 82 "inputTokens": 256000, 83 "inputTokenPrice": 0.25, 84 "outputTokens": 256000, 85 "outputTokenPrice": 0.25 86 }, 87 "codestral-mamba-latest": { 88 "description": "https:\/\/mistral.ai\/technology\/", 89 "inputTokens": 256000, 90 "inputTokenPrice": 0.25, 91 "outputTokens": 256000, 92 "outputTokenPrice": 0.25 93 }, 94 "pixtral-large-latest": { 95 "description": "", 96 "inputTokens": 128000, 97 "inputTokenPrice": 2, 98 "outputTokens": 128000, 99 "outputTokenPrice": 6 100 }, 101 "devstral-small-latest": { 102 "description": "https:\/\/docs.mistral.ai\/models\/devstral-small-2-25-12", 103 "inputTokens": 256000, 104 "inputTokenPrice": 0.1, 105 "outputTokens": 256000, 106 "outputTokenPrice": 0.3 107 }, 108 "devstral-latest": { 109 "description": "https:\/\/mistral.ai\/news\/devstral-2-vibe-cli", 110 "inputTokens": 256000, 111 "inputTokenPrice": 0.4, 112 "outputTokens": 256000, 113 "outputTokenPrice": 2 114 }, 115 "devstral-medium-latest": { 116 "description": "https:\/\/mistral.ai\/news\/devstral-2-vibe-cli", 117 "inputTokens": 256000, 118 "inputTokenPrice": 0.4, 119 "outputTokens": 256000, 120 "outputTokenPrice": 2 121 }, 122 "magistral-medium-latest": { 123 "description": "https:\/\/mistral.ai\/news\/magistral", 124 "inputTokens": 40000, 125 "inputTokenPrice": 2, 126 "outputTokens": 40000, 127 "outputTokenPrice": 5 128 }, 129 "magistral-small-latest": { 130 "description": "https:\/\/mistral.ai\/pricing#api-pricing", 131 "inputTokens": 40000, 132 "inputTokenPrice": 0.5, 133 "outputTokens": 40000, 134 "outputTokenPrice": 1.5 135 }, 136 "mistral-large-3": { 137 "description": "https:\/\/docs.mistral.ai\/models\/mistral-large-3-25-12", 138 "inputTokens": 262144, 139 "inputTokenPrice": 0.5, 140 "outputTokens": 262144, 141 "outputTokenPrice": 1.5 142 } 143 }, 144 "embedding": { 145 "mistral-embed": { 146 "description": "State-of-the-art semantic for extracting representation of text extracts. English only for now.", 147 "inputTokens": 8192, 148 "inputTokenPrice": 0.1, 149 "dimensions": 1024 150 }, 151 "codestral-embed": { 152 "description": "", 153 "inputTokens": 8192, 154 "inputTokenPrice": 0.15, 155 "dimensions": 1536 156 } 157 } 158}