1---
2id: post-chat-completions
3title: "Create Chat Completion"
4description: "Creates a model response for the given chat conversation."
5sidebar_label: "Create Chat Completion"
6hide_title: true
7hide_table_of_contents: true
8api: eJztfWlzG0eS6F+p7YkNm7MASMoajc2NFxu0ZL3hPttSmOQ4dgkHUOguADVqVPV0dZOCFfzvG5lZVx+4KNLr5+B8GIvoOrOysvLOT0nFFyY5u0nOL5JfBkkmTFrKopJaJWfJ61LwShjG2UpnImelMIVWRrC5Llm1FGwhb4Vi6ZJXLNXqVpSGQ9dRMkh0IUr84yJLzpJCm2oC7SapXhW5gA8mGSSl+GctTPWtztbJ2afe6Rl3E7iOzPZitZFqwTjLuVrUfCFomTB7qlUlVAVj8qLIZYpLOf6HgYE/JSZdihWHf1XrQiRniZ79Q6SVXZAsRQYgWQlj+ELAOnFgAFBRwsYqKQyOXWdSwz+0Eu/mydmNH1DVeZ7cD9pbes9LvhKVKA3CEPszXVdFXY3YT3ZudrcUqvGNSeM2DZ9ltRyr6UpnPJewlDN2M6a1jJNfpiN2873gpWIrXYpfvlxWVWHOjo+LnFdzXa5GuhCKy1GqV8eZTs3xopaZMMfY/wiAtw0mt1qmIhkkMBKvuhCxv3fO8rIQqZxLYRBv7K5oi9RlxH6oTcVmgmklmJ6z6R2/nQ7YlPMU/rMqvoL/zHP6Uxe1mQ6YLtm0SFenr6bRwk1VSrVIBolQ9QoWfcdvk0HCeQonWXwFy8/xDxglGSQ4AuylklUOI7xe8uq1x7dzWOZb2tj9wIKgs8OrpWD4CXdIF6Y2sGFtL042Ypd1UegSDhGbGsZLMVZTnud6jZs1S/jPjOc5z+BfqS55Dv8Q6VIjAPgsF/APpW85QkKtP8J/AVWnA8ZVxqZmKVcrUSJQOqjZAQ/OnvwC2LqxiVlub0Ar3t4G97K9Cexyewvc//YmAJntLQBm21sANHe0IBg3GvVeGV0toVn7ptDPZ+0J7u/vtyPi3xH97u+3Nkruf7kfJHOkGSpdT+DK59U6THeTqHo1E2UyIFL1i7/SZ8k81xw20cTvH7E9m4nqTgjFhi9GJ4hsL0YnI/ZeG1nJW8FueV4Lw3A++atgStyxSn8QyrAZNyJjWo1VtRSyZOKjNBUQcL9MJhVenkp8rJjRbM7LActEWgqOlN5frC/MWOXyg8jlUuuMblgheIUtDF8Jlksl2K0oZ7ySqxFc23mtUtjKJOV5fgjNfiOKUqQcLq1UbM5vdYn0qdI6n6RLOI7paKzG6rVWValzw+6WMl2yL+WccbU+Ym5moOIwucjYbB02MxqrqdJKTNlKcGUi8nEn85wpXWEvxsNAAHepTCV4xhZCwUtLzzS9WDAiryvdHTHlihUy/eCP0famd9R2B6oKM9KPbtLRWBERX9PvBS8rmdY5L8O6biVn00/jRPGVGCdnbJys1hP3dZzcT4HYpyJeUaVpdxW88dFcHiiSWmdizuu8ordRad8USSgrSmGEqkbM7rvVSc43tN9KH7v0nZacaWH6joWIr6X1SPdhDUJlw9qIctSgUGoDddk0Z+fguPIjbzivmHzXle4nVM353m4+HcGMfcKzxhQxpXtrf+8SO/+lh+V61CUA5nWnx1/7SO0WYuuW/BrIBZFTh0IxGeVlydcRFZWVWJldnCWuZ5AUnhvsLrkBlTCdRxQ3XxN65yz6G6jU3dJSRX9HAXsHwJk0qRDCeam1EZb9VBlb6rsG+APQ7wcbYNqHwdAS1tJYRqWB1yNyGHg/Pvx1wM6H/z1gJ8NvkL0DTp5LxWqVidKkugRCpzKWcbOEjQAzDKSLf5SresVyoRbVEmZ79RKXGcG4l2UL3xvrM4ynqSgqM7AQnYmMcaCx/3n57kd2iQIEo8MdsUtBXN8N8tL789zw0A0dCdbq2M0+tNf5CKUE8ZEDUhqiLzhPvIhSzEUpVBrNC3LOkISckS4XxwS8iqtMqsUw+npMM2Q6rVdCVZxel5muLdIQaw7U+N1KVvhMTAPEpkBgpcKnxx8sHYhiYlVU6wBelktTjbbwLW/97bpvI9G2F9jQ23uO48Ov4QQDbq/42r+TdIBSFXWFQtgIcdZNkJxVZS3uB0muF7KazCRv3HZ/m/uv3w86k3O6VBF7oucR4bLMEC8KweHGOJ4niLe0H8I+xhmclkU0eiRXvDBumC/DwHSZZUmf2MUbz03B3/JXUR7BpeOKcWN0KhGcsD/i2di81Cs2PD05gVanJyejsfqBV4AklQR0XA9wNOwhDeNZhruhzQKwjAdxi7IUpQR9gQbOrAC0Ho0VXD3xkacVE/M5bA15nVterlkhSuo4YLO6chyl5ztP8RacMrPUdZ459hCZFqnsv1vAF7lAlPj3sbKjQQParC5hs260UhjkFxTjbMYVw9uX5rUB3taP44hZKXJxy1VFIAZE4lkmoQXP3zeouSWSUlVigUy3Z7alqr56gYSKOqx/5KtGH/9YIU4WpZ41MHKmdS642oiSPy8FCBrEI1d1qeCwGAzDZ5LUB247Viq3qKVL4HFG7GLO4EoMxor6G3fi3UEET5eNUeyUdG+h29QqZqZ4fy2/OUVCveIfI+XQhFYR7zQAryOxEBA7j6FidQHYNNO1yrzSigQf3DTtFG8VMFgzEaEw6meiazkA9MproKBjdSuNnOVtkAFi3gAGaoXiCv66/2vge6IWZiUVvGjJ2YkFziNABPlJ+1IGMNzss9CYivTCS5Jg11bWjdjVUjoaYzsh8wGMBQlM7CbVpormj6blhTwuSplKtbBvFQqHYVoQOc7fXyBJ8fNI4M/vWLbh3ehFNas7kWaskLfXq4JXeMj4ot3ooRElIDrSpgcfa/upaR+00zkGAuA4TM9ZBomF53lTdunlNp0ytMNjRlrSLp7Yj546ZOJW5BqJs5UytwhPkajx2cuHA++uHX/tpZL97MVPpD79gdb+mjb3npfVFQy0S4eD37prwB6bVUS0chAyNosZdl1vHHS7C0y2CCnbe3fZ9N0ChGK6oBeM+HZHNUnclwUHKft9qW8Bt6NHXiqiPZa3z+QcudIK+Az/eEeDeLxCdU2pc7GNOdywz53nBuN2zw1/3XJuHtXt4Q1+25tm1qYSq32uWR99R/q4a8jn+3ng/bxEAPZdzp4bhCAF0NtjQLw3TlQlmSljsKoRe6vL1umYAdMqX+N3NoU1oj7LONPBKHnQav9wxKCxySeiBHQy/ztkAFV7j0kEnm//ptu/98nKFV+ISV322ILDpx3Kv97emai4PMgs0bSoUn+Ww8vlzhtXNGLBGOyksJu/SwNX80Bt1S32Os713VCXw6VcLIdzmYlcVushzjVs6JmOtplkUSMNEu1dMkhgqNj8egGDvSGIoOa1CdUNeP+dREGXs+ufvm+AgFmqBQaoVy+ZUKkG3QV9y3jFm8QGZ78u84egK/Z9InyNcG830jZBQyhAer0bsrqTAuwh7gGN+wCjTMgNonsjoo877gScwhavAvy8EwG+bZ4v7dOe72CjZ8KV13M6pOkZgL2uS3jA8rV7hw0bg1/BOEGpcQx+BeNktxMCuB/EmA4gavgX3Pd9fAgq2o5Pg4rxsR9GQeey7x3GX3twpNtq0sSFnTzMVffiwzB4rNYMgvaOghtvY8YGVrWJDM1YoeqfpiJsgqXI7MCFXLwBHOOgl8o192upNCwEzAsIWD/Bgexax9Yic7H3FtG4MVZ+ixHCvZW5eEcn8gA8hN5PhIaES3vx79emX7T+TO49+PMQo2wsr+udHei9RQjvQWtJtXTEZgLOyanJvOWCdM7kgzdir7ny1jHgZQb0oqHVLKbxe8gLXej84aSFaItPJCsAv74PNfxMF8I3vOL2Kefgx3ArdW3sgQcHTTDmxB4mT+gSKLOepz/b/VxfK/lP0NVmcNRzKUqrct9zT7uP/NwYCcyoI0qRZ1QkiO0L9z4RjbsZvGwVnDhrlQtjnJsQz3MzRXfFhh8SSfbOhvcs5f0+pLxSzGvDe6Q092Evtwfb2HtV9VtH98DjLrx+sut4GpD57e/1srZv2WM+r6/JXoQewSWJsIDlQTE2IAstT4Ezt47D9NluYrrH67dhB02nn89xGyS3sCLnKR1+TBPAPmbZNmjGywV6YuB1jrwq0OZmjdTeb2YATNsmtNpOtK0Hkp+ux7fdf9oL28PC+zyG8Hi3LBc0E+iX4ZzBf9TI71jnpZbfHc/v+DoMBUY/mWF/suGBu8eS53mdSgXfIx8f6O5wbLZma11H7ovWY4b9HcZzDFfYmFTUHqQINhNzQEfnd9cY6JH8o2DsFife8Un7/4pHG3QJ6F5CTZuSWszxry8OHe7UXu55h76AMksCMegRYCMHxx79QuMax+646P72fFf/8Hd1sBdLHHQE5Lil856Bm2+YfbOutM7tTE8iaXvEP1BfDrcmrU2lVxPYTo9JJPrYbxbBBgSMz7k+KAxvUFLudzLY1FPMeF14hzbydw/Fr8bO+1DsNTaAo396BAtzPSGa0Y53MZ4tlDfgyH0gi/dEKgD/Ih2uoofduAdugzkxeuMmfUL3QdbFKprvUeTO5oDPwueBDgZw/k/kXhCfzCM4F3RX2mC/Jn10aOexRIM+0d2k5+ew56s/miS6Zwcxseh9GzlbN3gIAN0g8tKNdfNPxqP0HoVjXJ72OFocxS6kC4vpRLw4x3+H4ei6WUpv6ogj021U4Yi9EYVAK7XV0Y/VwUr6ta5BRT8IwpAXUGDThn0ZIrSPxgrCzvw1G5AH+g1cwAeHixwNxuoGdf4H2HHJiH+EzPdYkTH4cI0weqpW3Nni9teJWMe6oadSfEF+09YxH20ewGLNZV4JDJCIMgZ4BwaI/plpXmbblw6+yMOo/05dtjPuTpzW2Ux4IbvYvaFdwHfnlh/fth8cxFByD8hxSDjZhmv/zvqhI94hlwzYeYfqIsS0RpyXF8aIlw3JBTD05AdtKuvkjLGSKS84+rrP42hRsjhRpGsz2vKMTW/G+PRBNgIYEYjSdFFUw5d6iBg0RB2/uJtG0Y08Nzp2Dr9xqxyrhyEpu9I+U4QVHKT13g4QmOlqSfwMat8aFgczQCjC4moj4m0NWJRvof3eDxLvD+FO/ic74g/hzDuE7GFHOGJ7HddYbTkvDLZtHNlDDmys/ImxRz+wsXrIiTU31jy0e5fUY+fd+gF3fPFmuz3WUnQLNTL16K+mI/auEOr8Yqw0PBLgRXAnM8FKrhZ4RvbkkHkLDwkeoo2sGUBEFOpaVCrGKl3ykqdAG00lUxsUCCESghVaqsqADWpOET/Rw7a3s5d93sYKIjRLfWdIMY3xEKVg/JbLHBGMGhJv8llRKX/Td2zF1bqT54WC602HZGGcEUnj3u4WlFB4bSCabGahtQCRPLbHh9CXILO7EJ601MYwkOedFE6LGLH/J0TBpmoKmrHp6RRRHmM3IM8BRrB0wnZAmZXnIp/0KycPiN0SCmF+40ZsMo5SLfa/be0g0z+lWs3logbkH7rxh51Q1LHKsAmJE7UhhW5Rikx6zee+fMBrOyPxZGT2vXlPQ4mMESncf0eF6zmkeCwDnBGROiAFC8gjlK+ZXBWlvg1XllVyJQz5xuSAJlaQcoZUIjy+NWD/B6XvFONLSLwAzeRK2AAnpFQG0XcFOlQYFDARE7yUopFnAZ1v8MajLLaSCgKdl0ASENlxIId+JGz0iuibVAlL4kDdEB48XmDEK0KBixDFB8p+uw3sBzFQqXd4sxpcH7rX9gGM1BO9QvJWtcQTr/MQrYab1LOgnLVRMpy1nlciSlZCxjo4VnvmrHHkeNaNY96uIfnf14b8cdyier2g3nuyZbceazJ2KSmdd38ze0RnSGLzKdNIKn5nKXju3NOyFGsbjN6bfceGVLey70QB1sgnVDz/YH2SaLZCpsa+D3pVVJOUp0sx+SDWByhNrm2KCmKgSIGRRkdNKdSMXEnIQWMZR6SfgHT4KpMJCHstZcUwTQ6wR2iKNxSvOgW/rSmbS5FnD0ydRpscwkTk946WThv1ORHzuS4PcjV6rcEPk0tlGHVmmPTOxRXPdTlWUZzxZwSkjlXHsVl4WRzI2BT5GUpBlus7zMQmMlmvXKox8N2fAkwzsOAt4K1z67Jrhyc4xNXPuYFsEOEQYZS5uBOlQ1K432PV2DHG4/srPlbA7J0xL6X8ZViU2okmVqpBRPgSBsc31gnsR3bFrL3Mba7bmK7HsnY892ELBAgXvzBIPrbjGH5yc3xHKIAEwW1jEhzS98WLc58IwoRMTCFPR9s8toKkKja5IIhCl4IyeFSaTT+xMU5GWZogx8SELKokJzV+OGOfRqPRPbufWjbUjNVlVdZpVZf+aXRZr4QydSk6WaxWvEqXdB/hKHLIWAFG4LFyhtyeMJWeSQ4MWTF+BM8a7gMKgvI4ibZMGRLyTJRk/Ia9WXl6rBqbdjpAD4CQoYsC1K39e8Suka5OI2BPx0oaYITmoiwtG2IlRPIAIjRm8sEMYZzDE3CG3lT7fJ9Zk8DjGj92pp06ZH3R+TzOMqMBn2i19mhbq40+bWSd4zb7pJR68ixS7f1KfIQ3pJPKwI1jBYn4bCIpmxrN3W/rP/Jw40ZrNU+ZRiocQndJ9M07BrRWtU8CqbEi6mfhNKtlTjTCjm3YzVKUWxI8Ec8BYEurz9Mw0BhuTzxbUm4pEmhj/a9PGDMaqwtIsoNSGqa0aBN/K5nNdZ7DBn3qHzuJvS5j5ZLE2AvjeLJ38IBzZuoZTKLnDQjGxktc5FhNaQ9o2JzCgqaoWsz9AzOA1z/b9MaM1SM8Mg3Ry6l/KcbrP41WtPh9pI2YBLQkjuawxF0YPhfVehKc6g+69qZCHIhc8p2wtRR5gRc6RQG5RC0JPudRfmWXn2oNd/BW6pxQhRh4yNlZ48tY6FymUpiRsw1cvDGRb6uLs6LRaowSAO2NW5Qh/R/mVGQ/w30DnYuAfHXcLC1uSlx6iYkoQewWKwiP5VlWCmNArGG6zAjx+a2WGTPWHAhxB8rPtibu0/s+OgFhrA6TEOhYhjNhqmEB6ltQKf7J/hrAbew9FiLbX6P66mUPoQTCLDggJdwCqdi3ouII8IsoG9mAIUNG6Qcsn/YBTgAW6vh3l7hLeIKOmmdKCWbqdIkHBYw/pGAVWRDFkKh6J9ApbGtK6urg5OcTb5HV23uMosCAC37jZ11RYp+KLWpeclUJdIFWGSpd/EBB982mtLXJXKqFKItSQuYpR5/HKiSnQ6WbklWkh7PEaMbTD0JlG7LEGVHeylRMKnvTHha8XZQ6FTY2DxgLr4HC4S2xDSa6IQv09gsIpP4CKa6KmzkNuBvbpg0nANOiGSyaOeVvyNH1vtQkYxCPbEbsmkJZMGPxnTQi6jRodMFJIZTxCyuFfdFervv9wBVDSDkvyc6BOlRAomAV8W8vpWgTmY97akx+M8/FxwMU5bn4OAxnc/QFMBJf3GA6O1mtN+WrGroGUd/jowN3nOrSckuw3fjEcFM/UxZcPKRBbMxjM7Hkt5BvTxqLHHiJfnZzT2OUnUb5GeEhdYN5FmamszUtk5Kfib4xyKWlodqLMBpOgvG0qoFg+PcERhANvCbVqp+ZBrVvSTCMeccZ+g4na9HW72SPtAMWWJgIXnxMgL3jOWZktUcXS/GXtNkruOL0xla6OOSy/6irmEeBI85BHKxYW4eDJkPSq+iXQ6B5lN3yugCYvWSGEmWT0aIk+J2/v6ATgmXFHNq8Li1vB2oVjDNxOfzgDEDLFxJMO4UpXjZd+JkemuSrrYeO4VnpomEA8lAtxUN4V0pUiEgFQ0ScKckJkHV7QwEJ1KQBvQJ+oC6tRppl8MDlOS5uf3IR9zpiqKMTt/wQxRw2P4LLk9VpRfzRpUs34E3elLfCarUZqcLi35i+FSX7+odvPX3JSl0U6NuHMoLgq0PgvJm595cVvFr8bDRDSA6a5lKoCkPoJUqMwehKBTxukBiUQ8jIzcQtOJUGmPlcXKOV/hXAi/KOUMPrS4Ldz2J2fP7+4vgyDDKhQY5RvzIxnQ9/+g7+M6GFWj0c6IV8Gt9L/EKUl6ysM5Hruz1OEui/T8iL/kfHxg12NFZzF8YW8ZQDxnPtjEQ4ve8R1KQHSSPUeeg7H1mRhvVNDyTbyp1LrrJcWCLglkAgi3BnYm0620ngdh3KO2sWQians1sr8iHWwfX1xlT4ZUrtzxhJdp2AavtQTfRsXhuSSw6kKsoiOk3EooE8jpOwDAU9um14lpmxKrnK9Ip5bw1jk+9Oo6ZWwoUTCEDIRF5xC3X0MVBwUmjJKfga0kYwI38F9QVm/JeVXPiwrFSUSMaNzMQQGFkFiR2qiqcfDCXdhUS50VpxftL0W7ihGse+j5SAl2cZyGQrTHa/0jXcZT1HKoN2cHvNkQbQNkbsv6wXDh5Yz4mQ98Sc50Y0DDaADzOusjuZVUtI2w+nXpW1IfWTEtWdLj9AdYcPPiXwWHXkUJcn25qOID8oRkjYhaAYejgJHGDyZJ/hl6XLWn3okj0bbYSMEgDljN28effjd+BX5b1VrsjwBMlvAw7YpwoGNUt9Z6xbPeTRJcHZQIJu9PohSkLJaipZei4qpAifWr8VN36siAEh2yboBr/Ukq8p5zQ4veCTioswro/RnvvjzVUTgljdGYDOJlgesT//+cd3V9+d/fnPmD/YExQSRitRlnWBbrBwvsDkwQteilRI4grHai4BxLRvC2hy6CBOxQGn4nkDREEJR1zlFhM4XV1LiYgHqcQKy1bVTU7kUDvtz6iqt8muWTSozfQy8CZcZ79lf5MLAHycnvpk9HUkk0dcDVJwoi/k55JDPua7TvcXUXdpu811iiw46j5jcZ6kBBelDqx60Ktw54qLGAqebZUuJsUUTx9ODnzyoiDMpauQdIDBs7dyCroafWbVFLWmYX67milxuDqmqIexna6+Oz6a6XCtfR031VzBPbl6K8F8FYqtDBp/nbEtZVl8XRayiPcUZoHZRrtKsuCS9yzH0m3bEDT2C8+AwlV3IkPvur6o2MbnLQmVt80BwLCRVz1T4Ndu4bpgv3eRUSZynGykXQKuAGDgPeKNILWXBR1uolUPBbERJWG+0lY9ZDfrJlMBySGtU0DyBiraf/WVW2nnXGhPsYeU7cEYCX8YtIf04ZxGA+9aRzm2HdKWwA+8Cgg+WdnCDw07eKRa9jugYBgAx1v7YsDjwF5HsQbn7y9I2Nk4z0oulhXLtf6AJPdsrKZTNPmN1c1YMda0LG+5miy+mwtRTe4ElF4owQLN7gefNxY4KdJAY/ULLnFb9TCC0BWexpa4yrgdnecThdC17vihYZVB0cpdCHELLcCTlV0TH4pUsOcycKcyTzcWHtovNcDhFtynDE2HUhNbcAE+Z+HC/s5Cy12scH9A+W8J7D3jtA+DNvX5LeKs+yC+yQVqL8ruWeonWTs+uTuCBttrsdx984k5MDlI+/WJrpjLTNAqbxEe4KDZ2BF/9lRU5Lma2HM1sT9iNbHf3uelEXczYjs8X3a4vPiySDEUHuT1wvqcXiKXSjqFHodK52vZjic61NOyg2z9LMfOPLI2m8nTcxpdYvdAit58QNuDdtLXOGOvTfbri5hHtS2tX9z2p+IR+J8tCbcf9F68c6nFWs9Gi0kaeHjamB5CUYyr+FjtTNbt0/CAA2JPNh4I7rd6BsNqlTph3Bo9D3GcvW70npdCDGFeP85v6SR7zhYlX614GefEqmydiPZibNONjqXu+6607F7YBRv5WlX8Yx+u+EZ7PeCNbUjPAdjh+z0s8ZtDpZ4B2DtbND7n5QcKvoUArI9by8JDW9zwQnyMOdn/S+Nf0oK6jLz9Tm5374Fy7+PI50DecuILeZVCxcBQa/6zBZJw15xHGxphVcj9ZDbllIoqGPY3aBLup+L3Y1mleyc+g7xuFx96IvSbGqd2fVHiB5zhy1E2gUUpxuomWugBxtxurC4OM8RhrJNB49n6jLGPrAa/mPTVmXxI9UXbp23tOGmHr4R4bIx8xeCyta/OqJ3XCuMVuX6SwafAgDewn+OPjlOMqps2i1SCkWvqtjYltftMOPaNeCZUTYORo+EXBVeoE9uNJpDPsRJBTCUYVhTHsL2oPqp1BQh2o4GzgKg6zUVtfEu0/zhPIGuu0ArMv1YCIT/KKEkXwpTGhw3E8GErbtDkoNnJ6NTaKShbVOjaSq4DvkKnJ//aGYZSUdiliAyZfra/eSlsvcfIhInWD0m+BM6waASVxkZoNLLxdnymrQtWO3ZxSnrC7u/guU6GJfAj5ZJkTBsO6IzbKA0BePvcrIGNQSu2q+kODa8t5Z5pDCxvhjHCymeiAgyd1ekHgQJKJxgS9uHdt6013HpxY/4fSOsBXjd8hmrQB0U/HuDb3OtDeyvKGVzl9QNDJNEb0I3RCN7/wkQuJd/HFlIUznxQInlzA7qm4N0aZRMh4+qyYZxt9iXeleaPuu4RWbklnHIb09ITehg9ZH/3wESV152YTYzgZbrcz2un5+bg00pjWEPRnZghwvryx47I2Dom7WjNjhiKAieMQsPiFAeoOeCZG96J2ZC6/wf4XMF5/x9wt+pN8dRK3kQAsfLGBLxpDoEKmOptoS1YUcP/mPxjfJQ8uliqTN8xU/DUF3oJ3sqwkIh5bWEEkEIbX+t+bJlSPwNTfhazS5z/Na30EsBA1baMKCe5Dr5Te8enFkWpP8oVSMiuf6wSa+17h4CL3wYJD4P2mHijjz2yzOcuryVZy+qQ+PW3pRBRBoIgqkZkCkA9YGK0GLHpJVfsbcmBBqWaamKngE3lIZNicOKdHub0NNxcXL5jdhTMhRyumVCjO/lBFiKTpNaDv44vLt9Nvjp99Wp4etS3xuvLqY1uXxymGtgEDBqob6rXPJdzXSrJaUowY/4KVojDgHFzcf7jOXOdw/bhF17IkdTHDX3lseSKD1170wuF85UoZcqPv9dmcq4WIhdm2pSi/OX63l2jPSTEFqZ3xoISNm68Kxijd8a4FdznvjaR01Ms2UGeHNGbJwSWbxXHzu0hT84SSnaVxGWrb1zqx6iKblRvJfkvmx+FI08yr/NmGnffG5m8uOPfRJ7rf0Gpzfkwo/8gGqkD3bBMhX+IYbkvTk56nDRaPuHBK9nUKYiQ8zrP10mjVkzkY4hacXxNNobnNuf7SVhPF9Bvdx3SrSe198+frYmHGXQCK0CuzFylsL502+T4B//CA4Xf6MAGbm1dpaHt80AnGakyDKNw+Yt76rspaZYTinc4jAURNkoittxX6M/eUM37IAdpvEvmFBpamc71BQaaMwWyBbcxE5g2jdnaQxa8jSCIAQiOaAoKg1kLURBdrXwU9AyyFezDgRsFJj6D8SxWTSjPJY7rEutASw0GEpGxrBbkIjTP+YJ8fiitPTWl3gYGjOsMNXZsRUdu9UE9NYi+DFz50Ya+sdvDJvqFcTGDhCCVNCoy+Ftk95u0C6tE9O4t4grlziCGhNAr4GDQRezQQZC6OBNedUh47o7GqXNcire2iB/rQfZF2O+bOoeGp73nAHC+3YxqT6LhXflB+zkpQM3E7ydpaXk6t3W2rg7LSrrlQBoA3aRPs/3hglgi6fQL11dvh18zXFD4yGPDAm4OJXN4TKRCJ08eBUlFLviUmMuOYolsnVeyyL1+wxpFZYmTtuY0XmuU6tWMTAqtRFQYuJdWxOs0e/t6RlOAo7ulFKGrdP8mHcrQLpOAlBH0veJph8IJbkNLJeYjpHD8ASyJYnRAQ+TyQekCdHZd1dyIvXMRoYMoJm86/Oabb74ZnUyd6gxVW3KhQPnsXfNIhwcJUUS5hmJpOLJVQQK67qX1DmDp11semFqtdU+eL8bzxYgx7GC0vI/do4rvHXp2Dvx7GRI+9qjAA+hbqxwwlyW3qRIfsQvFSsoGjHosAqeNprXZtpa8nQ3VsioiIwf/oCt3XOm2cIorWIHd4h5p2j0LE6uk24cQZ17YUD7q+UF8vvfPD+Lzg/h8MZ4fxN/zg9hphzJggACWhLDqk92a7I4CaVeR2X1KtnCltMXeA72kt9GQuswnqaRxN/oj7VKNNgZBG0X8ww4qJlQ2cRoqU/Gy8n/RgeBo3TWFbo+hecg52HAdgXG/Xv/0PXPbcFqJqChXgyTGS3+MFc1l+blLsgi9FwWApm4OsP2Vwui6TCmNPBzAXsPA6voHie7XdZm/drjR8rZqKbdtWQ5bH9zjP452cGV2G+lMJSl8rC5W/FizUGSFkiaBpwPlcvURzVmo7B7G2VT+/GG2+r0ruoO6v4DQwAk9D1C2ZpBUJVe04x43QFsLaPsZfsuNePWSCQXGKFe+g96zftI1aGaL3KhfHZGJwq951w3pzZom2LWSH6kuQMVXBfsSkimIVKvMkG/7HeVRkp3q9DbqmEEqD3R5Qpd/YyTWYrGZdTATCY5j7eTIUwzRuSkuU9VVRe5T3PEaU+W13Up6FkuvdzjL3TfPt/WxoDjgQ0qaN68c3jF8/B5WVa1dhTAqQPj0pbspe+Fz7e7nesAHBF2G2t2PUwx7S9lZV4OvZxAdHmNeV0tdev4+eus3Gnowq2IycEbaUPaUjD6tStk+Xyks57lS9/PN/F3fzOdK3c+Vup8oAvy5UndH0XMVb26zDIBphrlpRl+2I1e2MZrdli6nwUZV44ZqdF43R2l6QGkEuVLkHGvDQXpD9Lqx+qRT8uOzjjgPFNu3CSUYlW1zu7ZXDI4kduoHCRTnNvt2V6TozDU6oKDilScLPtHxphEtxu41pBWnoWFU6NJXBVvyahRGn9pE2w9L33zVzqLsNxKncu4kt/VBCBhU4RPuWY+brZl5N0q9T53rtpNA+/DIC981qKzJidnm1vbppHkf51XW1jqIMSSvo4BciULzPxqh4haQLrkROF41snyH2HW6Nn4JNuv3kt8CFyIUW/HM8kqU5keuCgjgDknTVhviGXx2wYcmx7xuJfsLN6STwbat2LWhKaQmR68r1yP8hknz3J891fraPR5AtH5se8VZrA3kPb7nLcrUWcEkExWX+UFuWN+Wgn/IoFhjWIRDGt6afbsTFmVuENkklLrsQGav8LjGJlF/8rBxWjQaFTHE8Nh9btYqNJYQKpQ9wjKutk1to0XsdD0L+Qcmbf9sCDeXREnSKf6zXciRLEBd701pi8GHlRAJyGRGMlKjUl6ERuxv+k7cgos4WKriKmg0Ms5ngu0QisFDtmhyzg90HS/nWEU33fbAx6UuCw2hrXrOZhJFpIFVNA9skV4M5RgrG3WSy5XslKWNGaGmscq8sRfNVi6MackjUgEauFssN57vse98c94dN/4J76Z9/3aAAmP9ssdYwmsc6ZDZI/R4j997UKPxhDyEqcW8rZ3bFx+We72/pEWyf4tu29E+GH1NbP8eEQ4kKLRCHIA/ToBpTFdFPjx98VXwnqffI2YycrU/O33117+++suLF19/PWiFSWA+/6SfpUrmxeTly7+efJO9mqezyIkfsN6a/E4aBmIbJhGr3kKsBHBKGC9Bdu4B5l4C/c2FjbagvMo64+v/SBqOzoBWg7a7Pnl2g5jneZsWcfimj9U4O33RRpUXpzve9+6jdDLY+gSfbH8/Tu7xf4Pk5cnLfq1FYK1t3h5O+qy5rlUGwPlLX/SIjW4vIduJNeuIsgQ1aooViTMbOtqSB1oyzgHRJU3sxLkebRkgRyOQVqJaakD8Qhuki7xaJmfJ8e0ppbIPXYCRpAkJR9GAm4AZ8uz4GILr8qU21dnXJ9+cdG4/RCdBjhgMDwIloF36iLmyELY69nQ4hBGnlKvBiLRGeQVVwYWkcrI3qH4AcNlQpW91tv4uQMpacYleDpJ5STEd61CP96RjKXKNc72Q1WQmuUHrGhX/hsirMytsDkJDe38wC1Sy4h8nPffhpBUoFS5sEKRQ/xT+7sRSwbO3fz+rqT+sE2r1UXPYhF0YwQGzH2ydYZ2dI/wUmwBakEU7gANsh8pt2Ikfb4J0u72jyiWV2h8IXo8K6LUSFSf7ttujtesjx7BtA+4JCNMhGoBMmucin8SAINwJRCzM1i0ifdJXWjkGebv6sBuqU37WfeipHBfGaypKfBcshBP9BWnTaRcuRC/+K4Sguy6NfOsnrfThvhGlbdwG5aav44lPznHSyC7gBuwLinc8ZQIux8jSWJahrBVo3no1TqaQYBYOTXKZCmAlfCrR5LyA44FK3aDYjmEbfaEkCIYGPh2dno5OhrWiLBGYdVCbasVVNCwxMu2cxe1FxvhOPTDhebN2i1MuLOStUO5lCK4IEbsMogV4mEgVuezQE3GT3J4i18KrhtYBFQzwEkCTT58glPG6zO/v4ed/1qJEAj5IbnkpcbNAzgcJlJUAGN18SgivbUX14RWFhKNbLZCF9oMJV5x6nCPLsLXtL9F79/7d5VUySKDcVeLTeiclhwh6+P+zBNi/kEEBf/uU5FwtauSIEhoT/vc/e0lG8A==
9sidebar_class_name: "post api-method"
10info_path: docs/api/HTTP/runtime
11custom_edit_url: null
12proxy: http://localhost:8090
13---
14
15import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint";
16import ParamsDetails from "@theme/ParamsDetails";
17import RequestSchema from "@theme/RequestSchema";
18import StatusCodes from "@theme/StatusCodes";
19import OperationTabs from "@theme/OperationTabs";
20import TabItem from "@theme/TabItem";
21import Heading from "@theme/Heading";
22
23<Heading
24 as={"h1"}
25 className={"openapi__heading"}
26 children={"Create Chat Completion"}
27>
28</Heading>
29
30<MethodEndpoint
31 method={"post"}
32 path={"/v1/chat/completions"}
33 context={"endpoint"}
34>
35
36</MethodEndpoint>
37
38
39
40Creates a model response for the given chat conversation.
41
42<Heading
43 id={"request"}
44 as={"h2"}
45 className={"openapi-tabs__heading"}
46 children={"Request"}
47>
48</Heading>
49
50<ParamsDetails
51 parameters={undefined}
52>
53
54</ParamsDetails>
55
56<RequestSchema
57 title={"Body"}
58 body={{"description":"Create a chat completion request using a language model.","content":{"application/json":{"schema":{"type":"object","required":["messages","model"],"properties":{"audio":{"oneOf":[{"type":"null"},{"description":"Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](https://platform.openai.com/docs/guides/audio).","type":"object","required":["voice","format"],"properties":{"format":{"description":"Specifies the output audio format. Must be one of `wav`, `aac`, `mp3`, `flac`, `opus`, or `pcm16`.","type":"string","enum":["wav","aac","mp3","flac","opus","pcm16"],"title":"ChatCompletionAudioFormat"},"voice":{"description":"The voice the model uses to respond. Supported voices are\n`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.","oneOf":[{"type":"string","enum":["alloy"]},{"type":"string","enum":["ash"]},{"type":"string","enum":["ballad"]},{"type":"string","enum":["coral"]},{"type":"string","enum":["echo"]},{"type":"string","enum":["fable"]},{"type":"string","enum":["nova"]},{"type":"string","enum":["onyx"]},{"type":"string","enum":["sage"]},{"type":"string","enum":["shimmer"]},{"type":"object","required":["other"],"properties":{"other":{"type":"string"}}}],"title":"ChatCompletionAudioVoice"}},"title":"ChatCompletionAudio"}]},"frequency_penalty":{"type":["number","null"],"format":"float","description":"Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim."},"function_call":{"oneOf":[{"type":"null"},{"description":"Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.","oneOf":[{"type":"string","description":"The model does not call a function, and responds to the end-user.","enum":["none"]},{"type":"string","description":"The model can pick between an end-user or calling a function.","enum":["auto"]},{"type":"object","description":"Forces the model to call the specified function.","required":["Function"],"properties":{"Function":{"type":"object","description":"Forces the model to call the specified function.","required":["name"],"properties":{"name":{"type":"string"}}}}}],"title":"ChatCompletionFunctionCall"}]},"functions":{"type":["array","null"],"items":{"type":"object","required":["name","parameters"],"properties":{"description":{"type":["string","null"],"description":"A description of what the function does, used by the model to choose when and how to call the function."},"name":{"type":"string","description":"The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."},"parameters":{"description":"The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list."}},"title":"ChatCompletionFunctions"},"description":"Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.","deprecated":true},"logit_bias":{"type":["object","null"],"description":"Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\nMathematically, the bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\nvalues like -100 or 100 should result in a ban or exclusive selection of the relevant token.","additionalProperties":{"type":"integer","format":"int32"},"propertyNames":{"type":"string"}},"logprobs":{"type":["boolean","null"],"description":"Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the `content` of `message`."},"max_completion_tokens":{"type":["integer","null"],"format":"int32","description":"An upper bound for the number of tokens that can be generated for a completion, including\nvisible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).","minimum":0},"max_tokens":{"type":["integer","null"],"format":"int32","description":"The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in\nthe chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o-series models](https://platform.openai.com/docs/guides/reasoning).","deprecated":true,"minimum":0},"messages":{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["content"],"properties":{"content":{"description":"The contents of the developer message.","oneOf":[{"type":"string"},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]}],"title":"ChatCompletionRequestDeveloperMessageContentPart"}}],"title":"ChatCompletionRequestDeveloperMessageContent"},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."}},"title":"ChatCompletionRequestDeveloperMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["developer"]}}}]},{"allOf":[{"type":"object","required":["content"],"properties":{"content":{"description":"The contents of the system message.","oneOf":[{"type":"string","description":"The text contents of the system message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]}],"title":"ChatCompletionRequestSystemMessageContentPart"},"description":"An array of content parts with a defined type. For system messages, only type `text` is supported."}],"title":"ChatCompletionRequestSystemMessageContent"},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."}},"title":"ChatCompletionRequestSystemMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["system"]}}}]},{"allOf":[{"type":"object","required":["content"],"properties":{"content":{"description":"The contents of the user message.","oneOf":[{"type":"string","description":"The text contents of the message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]},{"allOf":[{"type":"object","required":["image_url"],"properties":{"image_url":{"type":"object","required":["url"],"properties":{"detail":{"oneOf":[{"type":"null"},{"description":"Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).","type":"string","enum":["auto","low","high"],"title":"ImageDetail"}]},"url":{"type":"string","description":"Either a URL of the image or the base64 encoded image data."}},"title":"ImageUrl"}},"title":"ChatCompletionRequestMessageContentPartImage"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["image_url"]}}}]},{"allOf":[{"type":"object","description":"Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).","required":["input_audio"],"properties":{"input_audio":{"type":"object","required":["data","format"],"properties":{"data":{"type":"string","description":"Base64 encoded audio data."},"format":{"description":"The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".","type":"string","enum":["wav","mp3"],"title":"InputAudioFormat"}},"title":"InputAudio"}},"title":"ChatCompletionRequestMessageContentPartAudio"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["input_audio"]}}}]},{"allOf":[{"type":"object","required":["file"],"properties":{"file":{"type":"object","properties":{"file_data":{"type":["string","null"],"description":"The base64 encoded file data, used when passing the file to the model\nas a string."},"file_id":{"type":["string","null"],"description":"The ID of an uploaded file to use as input."},"filename":{"type":["string","null"],"description":"The name of the file, used when passing the file to the model as a\nstring."}},"title":"FileObject"}},"title":"ChatCompletionRequestMessageContentPartFile"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["file"]}}}]}],"title":"ChatCompletionRequestUserMessageContentPart"},"description":"An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text, image, or audio inputs."}],"title":"ChatCompletionRequestUserMessageContent"},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."}},"title":"ChatCompletionRequestUserMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["user"]}}}]},{"allOf":[{"type":"object","properties":{"audio":{"oneOf":[{"type":"null"},{"description":"Data about a previous audio response from the model.\n[Learn more](https://platform.openai.com/docs/guides/audio).","type":"object","required":["id"],"properties":{"id":{"type":"string","description":"Unique identifier for a previous audio response from the model."}},"title":"ChatCompletionRequestAssistantMessageAudio"}]},"content":{"oneOf":[{"type":"null"},{"description":"The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.","oneOf":[{"type":"string","description":"The text contents of the message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]},{"allOf":[{"type":"object","required":["refusal"],"properties":{"refusal":{"type":"string","description":"The refusal message generated by the model."}},"title":"ChatCompletionRequestMessageContentPartRefusal"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["refusal"]}}}]}],"title":"ChatCompletionRequestAssistantMessageContentPart"},"description":"An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`."}],"title":"ChatCompletionRequestAssistantMessageContent"}]},"function_call":{"oneOf":[{"type":"null"},{"description":"Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"}]},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."},"refusal":{"type":["string","null"],"description":"The refusal message by the assistant."},"tool_calls":{"type":["array","null"],"items":{"oneOf":[{"allOf":[{"type":"object","required":["id","function"],"properties":{"function":{"description":"The function that the model called.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}]},{"allOf":[{"type":"object","required":["id","custom_tool"],"properties":{"custom_tool":{"description":"The custom tool that the model called.","type":"object","required":["name","input"],"properties":{"input":{"type":"string","description":"The input for the custom tool call generated by the model."},"name":{"type":"string","description":"The name of the custom tool to call."}},"title":"CustomTool"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageCustomToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}]}],"title":"ChatCompletionMessageToolCalls"}}},"title":"ChatCompletionRequestAssistantMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["assistant"]}}}]},{"allOf":[{"type":"object","description":"Tool message","required":["content","tool_call_id"],"properties":{"content":{"description":"The contents of the tool message.","oneOf":[{"type":"string","description":"The text contents of the tool message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]}],"title":"ChatCompletionRequestToolMessageContentPart"},"description":"An array of content parts with a defined type. For tool messages, only type `text` is supported."}],"title":"ChatCompletionRequestToolMessageContent"},"tool_call_id":{"type":"string"}},"title":"ChatCompletionRequestToolMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["tool"]}}}]},{"allOf":[{"type":"object","required":["name"],"properties":{"content":{"type":["string","null"],"description":"The return value from the function call, to return to the model."},"name":{"type":"string","description":"The name of the function to call."}},"title":"ChatCompletionRequestFunctionMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["function"]}}}]}],"title":"ChatCompletionRequestMessage"},"description":"A list of messages comprising the conversation so far. Depending on the\n[model](https://platform.openai.com/docs/models) you use, different message types (modalities)\nare supported, like [text](https://platform.openai.com/docs/guides/text-generation),\n[images](https://platform.openai.com/docs/guides/vision), and\n[audio](https://platform.openai.com/docs/guides/audio)."},"metadata":{"oneOf":[{"type":"null"},{"description":"Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions).","type":"object","required":["supports_responses_api"],"properties":{"supports_responses_api":{"type":"boolean"}},"title":"Metadata"}]},"modalities":{"type":["array","null"],"items":{"type":"string","description":"Output types that you would like the model to generate for this request.\n\nMost models are capable of generating text, which is the default: `[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to [generate\naudio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]`","enum":["text","audio"],"title":"ResponseModalities"},"description":"Output types that you would like the model to generate. Most models are capable of generating\ntext, which is the default:\n\n`[\"text\"]`\nThe `gpt-4o-audio-preview` model can also be used to\n[generate audio](https://platform.openai.com/docs/guides/audio). To request that this model\ngenerate both text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`"},"model":{"type":"string","description":"Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\noffers a wide range of models with different capabilities, performance\ncharacteristics, and price points. Refer to the\n[model guide](https://platform.openai.com/docs/models)\nto browse and compare available models."},"n":{"type":["integer","null"],"format":"int32","description":"How many chat completion choices to generate for each input message. Note that you will be\ncharged based on the number of generated tokens across all of the choices. Keep `n` as `1` to\nminimize costs.","minimum":0},"parallel_tool_calls":{"type":["boolean","null"],"description":"Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)\nduring tool use."},"prediction":{"oneOf":[{"type":"null"},{"description":"Configuration for a [Predicted Output](https://platform.openai.com/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.","oneOf":[{"type":"object","description":"The type of the predicted content you want to provide. This type is\ncurrently always `content`.","required":["content","type"],"properties":{"content":{"description":"The type of the predicted content you want to provide. This type is\ncurrently always `content`.","oneOf":[{"type":"string","description":"The content used for a Predicted Output. This is often the text of a file you are regenerating with minor changes."},{"type":"array","items":{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},"description":"An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text inputs."}],"title":"PredictionContentContent"},"type":{"type":"string","enum":["content"]}}}],"title":"PredictionContent"}]},"presence_penalty":{"type":["number","null"],"format":"float","description":"Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics."},"prompt_cache_key":{"type":["string","null"],"description":"Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces\nthe `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)."},"reasoning_effort":{"oneOf":[{"type":"null"},{"description":"Constrains effort on reasoning for\n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `minimal`, `low`, `medium`, and `high`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response.\nNote: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.","type":"string","enum":["none","minimal","low","medium","high","xhigh"],"title":"ReasoningEffort"}]},"response_format":{"oneOf":[{"type":"null"},{"description":"An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.","oneOf":[{"type":"object","description":"The type of response format being defined: `text`","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}},{"type":"object","description":"The type of response format being defined: `json_object`","required":["type"],"properties":{"type":{"type":"string","enum":["json_object"]}}},{"type":"object","description":"The type of response format being defined: `json_schema`","required":["json_schema","type"],"properties":{"json_schema":{"type":"object","required":["name"],"properties":{"description":{"type":["string","null"],"description":"A description of what the response format is for, used by the model to determine how to respond in the format."},"name":{"type":"string","description":"The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."},"schema":{"description":"The schema for the response format, described as a JSON Schema object.\nLearn how to build JSON schemas [here](https://json-schema.org/)."},"strict":{"type":["boolean","null"],"description":"Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](https://platform.openai.com/docs/guides/structured-outputs)."}},"title":"ResponseFormatJsonSchema"},"type":{"type":"string","enum":["json_schema"]}}}],"title":"ResponseFormat"}]},"safety_identifier":{"type":["string","null"],"description":"A stable identifier used to help detect users of your application that may be violating OpenAI's\nusage policies.\n\nThe IDs should be a string that uniquely identifies each user. We recommend hashing their username\nor email address, in order to avoid sending us any identifying information. [Learn\nmore](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)."},"seed":{"type":["integer","null"],"format":"int64","description":"This feature is in Beta.\n\nIf specified, our system will make a best effort to sample deterministically, such that\nrepeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response\nparameter to monitor changes in the backend.","deprecated":true},"service_tier":{"oneOf":[{"type":"null"},{"description":"Specifies the processing type used for serving the request.\n- If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.\n- If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.\n- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.\n- When not set, the default behavior is 'auto'.\n\nWhen the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.","type":"string","enum":["auto","default","flex","scale","priority"],"title":"ServiceTier"}]},"stop":{"oneOf":[{"type":"null"},{"description":"Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.","oneOf":[{"type":"string"},{"type":"array","items":{"type":"string"}}],"title":"StopConfiguration"}]},"store":{"type":["boolean","null"],"description":"Whether or not to store the output of this chat completion request for\nuse in our [model distillation](https://platform.openai.com/docs/guides/distillation) or\n[evals](https://platform.openai.com/docs/guides/evals) products.\n\nSupports text and image inputs. Note: image inputs over 8MB will be dropped."},"stream":{"type":["boolean","null"],"description":"If set to true, the model response data will be streamed to the client\nas it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\nSee the [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)\nfor more information, along with the [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)\nguide for more information on how to handle the streaming events."},"stream_options":{"oneOf":[{"type":"null"},{"type":"object","description":"Options for streaming response. Only set this when you set `stream: true`.","properties":{"include_obfuscation":{"type":["boolean","null"],"description":"When true, stream obfuscation will be enabled. Stream obfuscation adds\nrandom characters to an `obfuscation` field on streaming delta events to\nnormalize payload sizes as a mitigation to certain side-channel attacks.\nThese obfuscation fields are included by default, but add a small amount\nof overhead to the data stream. You can set `include_obfuscation` to\nfalse to optimize for bandwidth if you trust the network links between\nyour application and the OpenAI API."},"include_usage":{"type":["boolean","null"],"description":"If set, an additional chunk will be streamed before the `data: [DONE]`\nmessage. The `usage` field on this chunk shows the token usage statistics\nfor the entire request, and the `choices` field will always be an empty\narray.\n\nAll other chunks will also include a `usage` field, but with a null\nvalue. **NOTE:** If the stream is interrupted, you may not receive the\nfinal usage chunk which contains the total token usage for the request."}},"title":"ChatCompletionStreamOptions"}]},"temperature":{"type":["number","null"],"format":"float","description":"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random,\nwhile lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both."},"tool_choice":{"oneOf":[{"type":"null"},{"description":"Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces\nthe model to call that tool.\n`none` is the default when no tools are present. `auto` is the default if tools are present.","oneOf":[{"allOf":[{"type":"object","required":["allowed_tools"],"properties":{"allowed_tools":{"type":"array","items":{"type":"object","required":["mode","tools"],"properties":{"mode":{"description":"Constrains the tools available to the model to a pre-defined set.\n\n`auto` allows the model to pick from among the allowed tools and generate a\nmessage.\n\n`required` requires the model to call one or more of the allowed tools.","type":"string","enum":["auto","required"],"title":"ToolChoiceAllowedMode"},"tools":{"type":"array","items":{},"description":"A list of tool definitions that the model should be allowed to call.\n\nFor the Chat Completions API, the list of tool definitions might look like:\n```json\n[\n { \"type\": \"function\", \"function\": { \"name\": \"get_weather\" } },\n { \"type\": \"function\", \"function\": { \"name\": \"get_time\" } }\n]\n```"}},"title":"ChatCompletionAllowedTools"}}},"title":"ChatCompletionAllowedToolsChoice"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["allowed_tools"]}}}]},{"allOf":[{"type":"object","description":"Specifies a tool the model should use. Use to force the model to call a specific function.","required":["function"],"properties":{"function":{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionName"}},"title":"ChatCompletionNamedToolChoice"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}]},{"allOf":[{"type":"object","required":["custom"],"properties":{"custom":{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the custom tool to call."}},"title":"CustomName"}},"title":"ChatCompletionNamedToolChoiceCustom"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}]},{"allOf":[{"type":"string","enum":["none","auto","required"],"title":"ToolChoiceOptions"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["mode"]}}}]}],"title":"ChatCompletionToolChoiceOption"}]},"tools":{"type":["array","null"],"items":{"oneOf":[{"allOf":[{"description":"A function tool that can be used to generate a response.","type":"object","required":["function"],"properties":{"function":{"type":"object","required":["name"],"properties":{"description":{"type":["string","null"],"description":"A description of what the function does, used by the model to choose when and how to call the function."},"name":{"type":"string","description":"The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."},"parameters":{"description":"The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list."},"strict":{"type":["boolean","null"],"description":"Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/guides/function-calling)."}},"title":"FunctionObject"}},"title":"ChatCompletionTool"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}],"description":"A function tool that can be used to generate a response."},{"allOf":[{"description":"A custom tool that processes input using a specified format.","type":"object","required":["custom"],"properties":{"custom":{"type":"object","required":["name","format"],"properties":{"description":{"type":["string","null"],"description":"Optional description of the custom tool, used to provide more context."},"format":{"description":"The input format for the custom tool. Default is unconstrained text.","oneOf":[{"type":"object","description":"Unconstrained free-form text.","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}},{"type":"object","description":"A grammar defined by the user.","required":["grammar","type"],"properties":{"grammar":{"type":"object","required":["definition","syntax"],"properties":{"definition":{"type":"string","description":"The grammar definition."},"syntax":{"description":"The syntax of the grammar definition. One of `lark` or `regex`.","type":"string","enum":["lark","regex"],"title":"GrammarSyntax"}},"title":"CustomGrammarFormatParam"},"type":{"type":"string","enum":["grammar"]}}}],"title":"CustomToolPropertiesFormat"},"name":{"type":"string","description":"The name of the custom tool, used to identify it in tool calls."}},"title":"CustomToolProperties"}},"title":"CustomToolChatCompletions"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}],"description":"A custom tool that processes input using a specified format."}],"title":"ChatCompletionTools"},"description":"A list of tools the model may call. You can provide either\n[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) or\n[function tools](https://platform.openai.com/docs/guides/function-calling)."},"top_logprobs":{"type":["integer","null"],"format":"int32","description":"An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.","minimum":0},"top_p":{"type":["number","null"],"format":"float","description":"An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability mass.\nSo 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\n We generally recommend altering this or `temperature` but not both."},"user":{"type":["string","null"],"description":"This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key`\ninstead to maintain caching optimizations.\nA stable identifier for your end-users.\nUsed to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and\nprevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).","deprecated":true},"verbosity":{"oneOf":[{"type":"null"},{"description":"Constrains the verbosity of the model's response. Lower values will result in\nmore concise responses, while higher values will result in more verbose responses.\nCurrently supported values are `low`, `medium`, and `high`.","type":"string","enum":["low","medium","high"],"title":"Verbosity"}]},"web_search_options":{"oneOf":[{"type":"null"},{"description":"This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).","type":"object","properties":{"search_context_size":{"oneOf":[{"type":"null"},{"description":"High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default.","type":"string","enum":["low","medium","high"],"title":"WebSearchContextSize"}]},"user_location":{"oneOf":[{"type":"null"},{"description":"Approximate location parameters for the search.","type":"object","required":["type","approximate"],"properties":{"approximate":{"type":"object","description":"Approximate location parameters for the search.","properties":{"city":{"type":["string","null"],"description":"Free text input for the city of the user, e.g. `San Francisco`."},"country":{"type":["string","null"],"description":"The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`."},"region":{"type":["string","null"],"description":"Free text input for the region of the user, e.g. `California`."},"timezone":{"type":["string","null"],"description":"The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`."}},"title":"WebSearchLocation"},"type":{"type":"string","enum":["approximate"],"title":"WebSearchUserLocationType"}},"title":"WebSearchUserLocation"}]}},"title":"WebSearchOptions"}]}},"title":"CreateChatCompletionRequest"},"example":{"model":"gpt-4o","messages":[{"role":"developer","content":"You are a helpful assistant."},{"role":"user","content":"Hello!"}],"stream":false}}},"required":true}}59>
60
61</RequestSchema>
62
63<StatusCodes
64 id={undefined}
65 label={undefined}
66 responses={{"200":{"description":"Chat completion generated successfully","content":{"application/json":{"schema":{"type":"object","description":"Represents a chat completion response returned by model, based on the provided input.","required":["id","choices","created","model","object"],"properties":{"choices":{"type":"array","items":{"type":"object","required":["index","message"],"properties":{"finish_reason":{"oneOf":[{"type":"null"},{"description":"The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.","type":"string","enum":["stop","length","tool_calls","content_filter","function_call"],"title":"FinishReason"}]},"index":{"type":"integer","format":"int32","description":"The index of the choice in the list of choices.","minimum":0},"logprobs":{"oneOf":[{"type":"null"},{"description":"Log probability information for the choice.","type":"object","properties":{"content":{"type":["array","null"],"items":{"type":"object","required":["token","logprob","top_logprobs"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely."},"token":{"type":"string","description":"The token."},"top_logprobs":{"type":"array","items":{"type":"object","required":["token","logprob"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token."},"token":{"type":"string","description":"The token."}},"title":"TopLogprobs"},"description":"List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned."}},"title":"ChatCompletionTokenLogprob"},"description":"A list of message content tokens with log probability information."},"refusal":{"type":["array","null"],"items":{"type":"object","required":["token","logprob","top_logprobs"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely."},"token":{"type":"string","description":"The token."},"top_logprobs":{"type":"array","items":{"type":"object","required":["token","logprob"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token."},"token":{"type":"string","description":"The token."}},"title":"TopLogprobs"},"description":"List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned."}},"title":"ChatCompletionTokenLogprob"}}},"title":"ChatChoiceLogprobs"}]},"message":{"type":"object","description":"A chat completion message generated by the model.","required":["role"],"properties":{"annotations":{"type":["array","null"],"items":{"oneOf":[{"type":"object","required":["url_citation","type"],"properties":{"type":{"type":"string","enum":["url_citation"]},"url_citation":{"type":"object","required":["end_index","start_index","title","url"],"properties":{"end_index":{"type":"integer","format":"int32","description":"The index of the last character of the URL citation in the message.","minimum":0},"start_index":{"type":"integer","format":"int32","description":"The index of the first character of the URL citation in the message.","minimum":0},"title":{"type":"string","description":"The title of the web resource."},"url":{"type":"string","description":"The URL of the web resource."}},"title":"UrlCitation"}}}],"title":"ChatCompletionResponseMessageAnnotation"}},"audio":{"oneOf":[{"type":"null"},{"description":"If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).","type":"object","required":["id","expires_at","data","transcript"],"properties":{"data":{"type":"string","description":"Base64 encoded audio bytes generated by the model, in the format specified in the request."},"expires_at":{"type":"integer","format":"int64","description":"The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.","minimum":0},"id":{"type":"string","description":"Unique identifier for this audio response."},"transcript":{"type":"string","description":"Transcript of the audio generated by the model."}},"title":"ChatCompletionResponseMessageAudio"}]},"content":{"type":["string","null"],"description":"The contents of the message."},"function_call":{"oneOf":[{"type":"null"},{"description":"Deprecated and replaced by `tool_calls`.\nThe name and arguments of a function that should be called, as generated by the model.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"}]},"refusal":{"type":["string","null"],"description":"The refusal message generated by the model."},"role":{"description":"The role of the author of this message.","type":"string","enum":["system","user","assistant","tool","function"],"title":"Role"},"tool_calls":{"type":["array","null"],"items":{"oneOf":[{"allOf":[{"type":"object","required":["id","function"],"properties":{"function":{"description":"The function that the model called.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}]},{"allOf":[{"type":"object","required":["id","custom_tool"],"properties":{"custom_tool":{"description":"The custom tool that the model called.","type":"object","required":["name","input"],"properties":{"input":{"type":"string","description":"The input for the custom tool call generated by the model."},"name":{"type":"string","description":"The name of the custom tool to call."}},"title":"CustomTool"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageCustomToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}]}],"title":"ChatCompletionMessageToolCalls"},"description":"The tool calls generated by the model, such as function calls."}},"title":"ChatCompletionResponseMessage"}},"title":"ChatChoice"},"description":"A list of chat completion choices. Can be more than one if `n` is greater than 1."},"created":{"type":"integer","format":"int32","description":"The Unix timestamp (in seconds) of when the chat completion was created.","minimum":0},"id":{"type":"string","description":"A unique identifier for the chat completion."},"model":{"type":"string","description":"The model used for the chat completion."},"object":{"type":"string","description":"The object type, which is always `chat.completion`."},"service_tier":{"oneOf":[{"type":"null"},{"description":"The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.","type":"string","enum":["auto","default","flex","scale","priority"],"title":"ServiceTier"}]},"system_fingerprint":{"type":["string","null"],"description":"This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.","deprecated":true},"usage":{"oneOf":[{"type":"null"},{"type":"object","description":"Usage statistics for the completion request.","required":["prompt_tokens","completion_tokens","total_tokens"],"properties":{"completion_tokens":{"type":"integer","format":"int32","description":"Number of tokens in the generated completion.","minimum":0},"completion_tokens_details":{"oneOf":[{"type":"null"},{"description":"Breakdown of tokens used in a completion.","type":"object","properties":{"accepted_prediction_tokens":{"type":["integer","null"],"format":"int32","minimum":0},"audio_tokens":{"type":["integer","null"],"format":"int32","description":"Audio input tokens generated by the model.","minimum":0},"reasoning_tokens":{"type":["integer","null"],"format":"int32","description":"Tokens generated by the model for reasoning.","minimum":0},"rejected_prediction_tokens":{"type":["integer","null"],"format":"int32","description":" When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context\nwindow limits.","minimum":0}},"title":"CompletionTokensDetails"}]},"prompt_tokens":{"type":"integer","format":"int32","description":"Number of tokens in the prompt.","minimum":0},"prompt_tokens_details":{"oneOf":[{"type":"null"},{"description":"Breakdown of tokens used in the prompt.","type":"object","properties":{"audio_tokens":{"type":["integer","null"],"format":"int32","description":"Audio input tokens present in the prompt.","minimum":0},"cached_tokens":{"type":["integer","null"],"format":"int32","description":"Cached tokens present in the prompt.","minimum":0}},"title":"PromptTokensDetails"}]},"total_tokens":{"type":"integer","format":"int32","description":"Total number of tokens used in the request (prompt + completion).","minimum":0}},"title":"CompletionUsage"}]}},"title":"CreateChatCompletionResponse"},"example":{"id":"chatcmpl-123","object":"chat.completion","created":1677652288,"model":"gpt-4o-mini","system_fingerprint":"fp_44709d6fcb","choices":[{"index":0,"message":{"role":"assistant","content":"\n\nHello there, how may I assist you today?"},"logprobs":null,"finish_reason":"stop"}],"usage":{"prompt_tokens":9,"completion_tokens":12,"total_tokens":21,"completion_tokens_details":{"reasoning_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}}}},"404":{"description":"The specified model was not found"},"500":{"description":"An internal server error occurred while processing the chat completion","content":{"application/json":{"schema":{},"example":{"error":"An internal server error occurred while processing the chat completion."}}}}}}67>
68
69</StatusCodes>
70
71
72
1---
2id: post-chat-completions
3title: "Create Chat Completion"
4description: "Creates a model response for the given chat conversation."
5sidebar_label: "Create Chat Completion"
6hide_title: true
7hide_table_of_contents: true
8api: eJztfWlzG0eS6F+p7YkNm7MASMoajc2NFxu0ZL3hPttSmOQ4dgkHUOguADVqVPV0dZOCFfzvG5lZVx+4KNLr5+B8GIvoOrOysvLOT0nFFyY5u0nOL5JfBkkmTFrKopJaJWfJ61LwShjG2UpnImelMIVWRrC5Llm1FGwhb4Vi6ZJXLNXqVpSGQ9dRMkh0IUr84yJLzpJCm2oC7SapXhW5gA8mGSSl+GctTPWtztbJ2afe6Rl3E7iOzPZitZFqwTjLuVrUfCFomTB7qlUlVAVj8qLIZYpLOf6HgYE/JSZdihWHf1XrQiRniZ79Q6SVXZAsRQYgWQlj+ELAOnFgAFBRwsYqKQyOXWdSwz+0Eu/mydmNH1DVeZ7cD9pbes9LvhKVKA3CEPszXVdFXY3YT3ZudrcUqvGNSeM2DZ9ltRyr6UpnPJewlDN2M6a1jJNfpiN2873gpWIrXYpfvlxWVWHOjo+LnFdzXa5GuhCKy1GqV8eZTs3xopaZMMfY/wiAtw0mt1qmIhkkMBKvuhCxv3fO8rIQqZxLYRBv7K5oi9RlxH6oTcVmgmklmJ6z6R2/nQ7YlPMU/rMqvoL/zHP6Uxe1mQ6YLtm0SFenr6bRwk1VSrVIBolQ9QoWfcdvk0HCeQonWXwFy8/xDxglGSQ4AuylklUOI7xe8uq1x7dzWOZb2tj9wIKgs8OrpWD4CXdIF6Y2sGFtL042Ypd1UegSDhGbGsZLMVZTnud6jZs1S/jPjOc5z+BfqS55Dv8Q6VIjAPgsF/APpW85QkKtP8J/AVWnA8ZVxqZmKVcrUSJQOqjZAQ/OnvwC2LqxiVlub0Ar3t4G97K9Cexyewvc//YmAJntLQBm21sANHe0IBg3GvVeGV0toVn7ptDPZ+0J7u/vtyPi3xH97u+3Nkruf7kfJHOkGSpdT+DK59U6THeTqHo1E2UyIFL1i7/SZ8k81xw20cTvH7E9m4nqTgjFhi9GJ4hsL0YnI/ZeG1nJW8FueV4Lw3A++atgStyxSn8QyrAZNyJjWo1VtRSyZOKjNBUQcL9MJhVenkp8rJjRbM7LActEWgqOlN5frC/MWOXyg8jlUuuMblgheIUtDF8Jlksl2K0oZ7ySqxFc23mtUtjKJOV5fgjNfiOKUqQcLq1UbM5vdYn0qdI6n6RLOI7paKzG6rVWValzw+6WMl2yL+WccbU+Ym5moOIwucjYbB02MxqrqdJKTNlKcGUi8nEn85wpXWEvxsNAAHepTCV4xhZCwUtLzzS9WDAiryvdHTHlihUy/eCP0famd9R2B6oKM9KPbtLRWBERX9PvBS8rmdY5L8O6biVn00/jRPGVGCdnbJys1hP3dZzcT4HYpyJeUaVpdxW88dFcHiiSWmdizuu8ordRad8USSgrSmGEqkbM7rvVSc43tN9KH7v0nZacaWH6joWIr6X1SPdhDUJlw9qIctSgUGoDddk0Z+fguPIjbzivmHzXle4nVM353m4+HcGMfcKzxhQxpXtrf+8SO/+lh+V61CUA5nWnx1/7SO0WYuuW/BrIBZFTh0IxGeVlydcRFZWVWJldnCWuZ5AUnhvsLrkBlTCdRxQ3XxN65yz6G6jU3dJSRX9HAXsHwJk0qRDCeam1EZb9VBlb6rsG+APQ7wcbYNqHwdAS1tJYRqWB1yNyGHg/Pvx1wM6H/z1gJ8NvkL0DTp5LxWqVidKkugRCpzKWcbOEjQAzDKSLf5SresVyoRbVEmZ79RKXGcG4l2UL3xvrM4ynqSgqM7AQnYmMcaCx/3n57kd2iQIEo8MdsUtBXN8N8tL789zw0A0dCdbq2M0+tNf5CKUE8ZEDUhqiLzhPvIhSzEUpVBrNC3LOkISckS4XxwS8iqtMqsUw+npMM2Q6rVdCVZxel5muLdIQaw7U+N1KVvhMTAPEpkBgpcKnxx8sHYhiYlVU6wBelktTjbbwLW/97bpvI9G2F9jQ23uO48Ov4QQDbq/42r+TdIBSFXWFQtgIcdZNkJxVZS3uB0muF7KazCRv3HZ/m/uv3w86k3O6VBF7oucR4bLMEC8KweHGOJ4niLe0H8I+xhmclkU0eiRXvDBumC/DwHSZZUmf2MUbz03B3/JXUR7BpeOKcWN0KhGcsD/i2di81Cs2PD05gVanJyejsfqBV4AklQR0XA9wNOwhDeNZhruhzQKwjAdxi7IUpQR9gQbOrAC0Ho0VXD3xkacVE/M5bA15nVterlkhSuo4YLO6chyl5ztP8RacMrPUdZ459hCZFqnsv1vAF7lAlPj3sbKjQQParC5hs260UhjkFxTjbMYVw9uX5rUB3taP44hZKXJxy1VFIAZE4lkmoQXP3zeouSWSUlVigUy3Z7alqr56gYSKOqx/5KtGH/9YIU4WpZ41MHKmdS642oiSPy8FCBrEI1d1qeCwGAzDZ5LUB247Viq3qKVL4HFG7GLO4EoMxor6G3fi3UEET5eNUeyUdG+h29QqZqZ4fy2/OUVCveIfI+XQhFYR7zQAryOxEBA7j6FidQHYNNO1yrzSigQf3DTtFG8VMFgzEaEw6meiazkA9MproKBjdSuNnOVtkAFi3gAGaoXiCv66/2vge6IWZiUVvGjJ2YkFziNABPlJ+1IGMNzss9CYivTCS5Jg11bWjdjVUjoaYzsh8wGMBQlM7CbVpormj6blhTwuSplKtbBvFQqHYVoQOc7fXyBJ8fNI4M/vWLbh3ehFNas7kWaskLfXq4JXeMj4ot3ooRElIDrSpgcfa/upaR+00zkGAuA4TM9ZBomF53lTdunlNp0ytMNjRlrSLp7Yj546ZOJW5BqJs5UytwhPkajx2cuHA++uHX/tpZL97MVPpD79gdb+mjb3npfVFQy0S4eD37prwB6bVUS0chAyNosZdl1vHHS7C0y2CCnbe3fZ9N0ChGK6oBeM+HZHNUnclwUHKft9qW8Bt6NHXiqiPZa3z+QcudIK+Az/eEeDeLxCdU2pc7GNOdywz53nBuN2zw1/3XJuHtXt4Q1+25tm1qYSq32uWR99R/q4a8jn+3ng/bxEAPZdzp4bhCAF0NtjQLw3TlQlmSljsKoRe6vL1umYAdMqX+N3NoU1oj7LONPBKHnQav9wxKCxySeiBHQy/ztkAFV7j0kEnm//ptu/98nKFV+ISV322ILDpx3Kv97emai4PMgs0bSoUn+Ww8vlzhtXNGLBGOyksJu/SwNX80Bt1S32Os713VCXw6VcLIdzmYlcVushzjVs6JmOtplkUSMNEu1dMkhgqNj8egGDvSGIoOa1CdUNeP+dREGXs+ufvm+AgFmqBQaoVy+ZUKkG3QV9y3jFm8QGZ78u84egK/Z9InyNcG830jZBQyhAer0bsrqTAuwh7gGN+wCjTMgNonsjoo877gScwhavAvy8EwG+bZ4v7dOe72CjZ8KV13M6pOkZgL2uS3jA8rV7hw0bg1/BOEGpcQx+BeNktxMCuB/EmA4gavgX3Pd9fAgq2o5Pg4rxsR9GQeey7x3GX3twpNtq0sSFnTzMVffiwzB4rNYMgvaOghtvY8YGVrWJDM1YoeqfpiJsgqXI7MCFXLwBHOOgl8o192upNCwEzAsIWD/Bgexax9Yic7H3FtG4MVZ+ixHCvZW5eEcn8gA8hN5PhIaES3vx79emX7T+TO49+PMQo2wsr+udHei9RQjvQWtJtXTEZgLOyanJvOWCdM7kgzdir7ny1jHgZQb0oqHVLKbxe8gLXej84aSFaItPJCsAv74PNfxMF8I3vOL2Kefgx3ArdW3sgQcHTTDmxB4mT+gSKLOepz/b/VxfK/lP0NVmcNRzKUqrct9zT7uP/NwYCcyoI0qRZ1QkiO0L9z4RjbsZvGwVnDhrlQtjnJsQz3MzRXfFhh8SSfbOhvcs5f0+pLxSzGvDe6Q092Evtwfb2HtV9VtH98DjLrx+sut4GpD57e/1srZv2WM+r6/JXoQewSWJsIDlQTE2IAstT4Ezt47D9NluYrrH67dhB02nn89xGyS3sCLnKR1+TBPAPmbZNmjGywV6YuB1jrwq0OZmjdTeb2YATNsmtNpOtK0Hkp+ux7fdf9oL28PC+zyG8Hi3LBc0E+iX4ZzBf9TI71jnpZbfHc/v+DoMBUY/mWF/suGBu8eS53mdSgXfIx8f6O5wbLZma11H7ovWY4b9HcZzDFfYmFTUHqQINhNzQEfnd9cY6JH8o2DsFife8Un7/4pHG3QJ6F5CTZuSWszxry8OHe7UXu55h76AMksCMegRYCMHxx79QuMax+646P72fFf/8Hd1sBdLHHQE5Lil856Bm2+YfbOutM7tTE8iaXvEP1BfDrcmrU2lVxPYTo9JJPrYbxbBBgSMz7k+KAxvUFLudzLY1FPMeF14hzbydw/Fr8bO+1DsNTaAo396BAtzPSGa0Y53MZ4tlDfgyH0gi/dEKgD/Ih2uoofduAdugzkxeuMmfUL3QdbFKprvUeTO5oDPwueBDgZw/k/kXhCfzCM4F3RX2mC/Jn10aOexRIM+0d2k5+ew56s/miS6Zwcxseh9GzlbN3gIAN0g8tKNdfNPxqP0HoVjXJ72OFocxS6kC4vpRLw4x3+H4ei6WUpv6ogj021U4Yi9EYVAK7XV0Y/VwUr6ta5BRT8IwpAXUGDThn0ZIrSPxgrCzvw1G5AH+g1cwAeHixwNxuoGdf4H2HHJiH+EzPdYkTH4cI0weqpW3Nni9teJWMe6oadSfEF+09YxH20ewGLNZV4JDJCIMgZ4BwaI/plpXmbblw6+yMOo/05dtjPuTpzW2Ux4IbvYvaFdwHfnlh/fth8cxFByD8hxSDjZhmv/zvqhI94hlwzYeYfqIsS0RpyXF8aIlw3JBTD05AdtKuvkjLGSKS84+rrP42hRsjhRpGsz2vKMTW/G+PRBNgIYEYjSdFFUw5d6iBg0RB2/uJtG0Y08Nzp2Dr9xqxyrhyEpu9I+U4QVHKT13g4QmOlqSfwMat8aFgczQCjC4moj4m0NWJRvof3eDxLvD+FO/ic74g/hzDuE7GFHOGJ7HddYbTkvDLZtHNlDDmys/ImxRz+wsXrIiTU31jy0e5fUY+fd+gF3fPFmuz3WUnQLNTL16K+mI/auEOr8Yqw0PBLgRXAnM8FKrhZ4RvbkkHkLDwkeoo2sGUBEFOpaVCrGKl3ykqdAG00lUxsUCCESghVaqsqADWpOET/Rw7a3s5d93sYKIjRLfWdIMY3xEKVg/JbLHBGMGhJv8llRKX/Td2zF1bqT54WC602HZGGcEUnj3u4WlFB4bSCabGahtQCRPLbHh9CXILO7EJ601MYwkOedFE6LGLH/J0TBpmoKmrHp6RRRHmM3IM8BRrB0wnZAmZXnIp/0KycPiN0SCmF+40ZsMo5SLfa/be0g0z+lWs3logbkH7rxh51Q1LHKsAmJE7UhhW5Rikx6zee+fMBrOyPxZGT2vXlPQ4mMESncf0eF6zmkeCwDnBGROiAFC8gjlK+ZXBWlvg1XllVyJQz5xuSAJlaQcoZUIjy+NWD/B6XvFONLSLwAzeRK2AAnpFQG0XcFOlQYFDARE7yUopFnAZ1v8MajLLaSCgKdl0ASENlxIId+JGz0iuibVAlL4kDdEB48XmDEK0KBixDFB8p+uw3sBzFQqXd4sxpcH7rX9gGM1BO9QvJWtcQTr/MQrYab1LOgnLVRMpy1nlciSlZCxjo4VnvmrHHkeNaNY96uIfnf14b8cdyier2g3nuyZbceazJ2KSmdd38ze0RnSGLzKdNIKn5nKXju3NOyFGsbjN6bfceGVLey70QB1sgnVDz/YH2SaLZCpsa+D3pVVJOUp0sx+SDWByhNrm2KCmKgSIGRRkdNKdSMXEnIQWMZR6SfgHT4KpMJCHstZcUwTQ6wR2iKNxSvOgW/rSmbS5FnD0ydRpscwkTk946WThv1ORHzuS4PcjV6rcEPk0tlGHVmmPTOxRXPdTlWUZzxZwSkjlXHsVl4WRzI2BT5GUpBlus7zMQmMlmvXKox8N2fAkwzsOAt4K1z67Jrhyc4xNXPuYFsEOEQYZS5uBOlQ1K432PV2DHG4/srPlbA7J0xL6X8ZViU2okmVqpBRPgSBsc31gnsR3bFrL3Mba7bmK7HsnY892ELBAgXvzBIPrbjGH5yc3xHKIAEwW1jEhzS98WLc58IwoRMTCFPR9s8toKkKja5IIhCl4IyeFSaTT+xMU5GWZogx8SELKokJzV+OGOfRqPRPbufWjbUjNVlVdZpVZf+aXRZr4QydSk6WaxWvEqXdB/hKHLIWAFG4LFyhtyeMJWeSQ4MWTF+BM8a7gMKgvI4ibZMGRLyTJRk/Ia9WXl6rBqbdjpAD4CQoYsC1K39e8Suka5OI2BPx0oaYITmoiwtG2IlRPIAIjRm8sEMYZzDE3CG3lT7fJ9Zk8DjGj92pp06ZH3R+TzOMqMBn2i19mhbq40+bWSd4zb7pJR68ixS7f1KfIQ3pJPKwI1jBYn4bCIpmxrN3W/rP/Jw40ZrNU+ZRiocQndJ9M07BrRWtU8CqbEi6mfhNKtlTjTCjm3YzVKUWxI8Ec8BYEurz9Mw0BhuTzxbUm4pEmhj/a9PGDMaqwtIsoNSGqa0aBN/K5nNdZ7DBn3qHzuJvS5j5ZLE2AvjeLJ38IBzZuoZTKLnDQjGxktc5FhNaQ9o2JzCgqaoWsz9AzOA1z/b9MaM1SM8Mg3Ry6l/KcbrP41WtPh9pI2YBLQkjuawxF0YPhfVehKc6g+69qZCHIhc8p2wtRR5gRc6RQG5RC0JPudRfmWXn2oNd/BW6pxQhRh4yNlZ48tY6FymUpiRsw1cvDGRb6uLs6LRaowSAO2NW5Qh/R/mVGQ/w30DnYuAfHXcLC1uSlx6iYkoQewWKwiP5VlWCmNArGG6zAjx+a2WGTPWHAhxB8rPtibu0/s+OgFhrA6TEOhYhjNhqmEB6ltQKf7J/hrAbew9FiLbX6P66mUPoQTCLDggJdwCqdi3ouII8IsoG9mAIUNG6Qcsn/YBTgAW6vh3l7hLeIKOmmdKCWbqdIkHBYw/pGAVWRDFkKh6J9ApbGtK6urg5OcTb5HV23uMosCAC37jZ11RYp+KLWpeclUJdIFWGSpd/EBB982mtLXJXKqFKItSQuYpR5/HKiSnQ6WbklWkh7PEaMbTD0JlG7LEGVHeylRMKnvTHha8XZQ6FTY2DxgLr4HC4S2xDSa6IQv09gsIpP4CKa6KmzkNuBvbpg0nANOiGSyaOeVvyNH1vtQkYxCPbEbsmkJZMGPxnTQi6jRodMFJIZTxCyuFfdFervv9wBVDSDkvyc6BOlRAomAV8W8vpWgTmY97akx+M8/FxwMU5bn4OAxnc/QFMBJf3GA6O1mtN+WrGroGUd/jowN3nOrSckuw3fjEcFM/UxZcPKRBbMxjM7Hkt5BvTxqLHHiJfnZzT2OUnUb5GeEhdYN5FmamszUtk5Kfib4xyKWlodqLMBpOgvG0qoFg+PcERhANvCbVqp+ZBrVvSTCMeccZ+g4na9HW72SPtAMWWJgIXnxMgL3jOWZktUcXS/GXtNkruOL0xla6OOSy/6irmEeBI85BHKxYW4eDJkPSq+iXQ6B5lN3yugCYvWSGEmWT0aIk+J2/v6ATgmXFHNq8Li1vB2oVjDNxOfzgDEDLFxJMO4UpXjZd+JkemuSrrYeO4VnpomEA8lAtxUN4V0pUiEgFQ0ScKckJkHV7QwEJ1KQBvQJ+oC6tRppl8MDlOS5uf3IR9zpiqKMTt/wQxRw2P4LLk9VpRfzRpUs34E3elLfCarUZqcLi35i+FSX7+odvPX3JSl0U6NuHMoLgq0PgvJm595cVvFr8bDRDSA6a5lKoCkPoJUqMwehKBTxukBiUQ8jIzcQtOJUGmPlcXKOV/hXAi/KOUMPrS4Ldz2J2fP7+4vgyDDKhQY5RvzIxnQ9/+g7+M6GFWj0c6IV8Gt9L/EKUl6ysM5Hruz1OEui/T8iL/kfHxg12NFZzF8YW8ZQDxnPtjEQ4ve8R1KQHSSPUeeg7H1mRhvVNDyTbyp1LrrJcWCLglkAgi3BnYm0620ngdh3KO2sWQians1sr8iHWwfX1xlT4ZUrtzxhJdp2AavtQTfRsXhuSSw6kKsoiOk3EooE8jpOwDAU9um14lpmxKrnK9Ip5bw1jk+9Oo6ZWwoUTCEDIRF5xC3X0MVBwUmjJKfga0kYwI38F9QVm/JeVXPiwrFSUSMaNzMQQGFkFiR2qiqcfDCXdhUS50VpxftL0W7ihGse+j5SAl2cZyGQrTHa/0jXcZT1HKoN2cHvNkQbQNkbsv6wXDh5Yz4mQ98Sc50Y0DDaADzOusjuZVUtI2w+nXpW1IfWTEtWdLj9AdYcPPiXwWHXkUJcn25qOID8oRkjYhaAYejgJHGDyZJ/hl6XLWn3okj0bbYSMEgDljN28effjd+BX5b1VrsjwBMlvAw7YpwoGNUt9Z6xbPeTRJcHZQIJu9PohSkLJaipZei4qpAifWr8VN36siAEh2yboBr/Ukq8p5zQ4veCTioswro/RnvvjzVUTgljdGYDOJlgesT//+cd3V9+d/fnPmD/YExQSRitRlnWBbrBwvsDkwQteilRI4grHai4BxLRvC2hy6CBOxQGn4nkDREEJR1zlFhM4XV1LiYgHqcQKy1bVTU7kUDvtz6iqt8muWTSozfQy8CZcZ79lf5MLAHycnvpk9HUkk0dcDVJwoi/k55JDPua7TvcXUXdpu811iiw46j5jcZ6kBBelDqx60Ktw54qLGAqebZUuJsUUTx9ODnzyoiDMpauQdIDBs7dyCroafWbVFLWmYX67milxuDqmqIexna6+Oz6a6XCtfR031VzBPbl6K8F8FYqtDBp/nbEtZVl8XRayiPcUZoHZRrtKsuCS9yzH0m3bEDT2C8+AwlV3IkPvur6o2MbnLQmVt80BwLCRVz1T4Ndu4bpgv3eRUSZynGykXQKuAGDgPeKNILWXBR1uolUPBbERJWG+0lY9ZDfrJlMBySGtU0DyBiraf/WVW2nnXGhPsYeU7cEYCX8YtIf04ZxGA+9aRzm2HdKWwA+8Cgg+WdnCDw07eKRa9jugYBgAx1v7YsDjwF5HsQbn7y9I2Nk4z0oulhXLtf6AJPdsrKZTNPmN1c1YMda0LG+5miy+mwtRTe4ElF4owQLN7gefNxY4KdJAY/ULLnFb9TCC0BWexpa4yrgdnecThdC17vihYZVB0cpdCHELLcCTlV0TH4pUsOcycKcyTzcWHtovNcDhFtynDE2HUhNbcAE+Z+HC/s5Cy12scH9A+W8J7D3jtA+DNvX5LeKs+yC+yQVqL8ruWeonWTs+uTuCBttrsdx984k5MDlI+/WJrpjLTNAqbxEe4KDZ2BF/9lRU5Lma2HM1sT9iNbHf3uelEXczYjs8X3a4vPiySDEUHuT1wvqcXiKXSjqFHodK52vZjic61NOyg2z9LMfOPLI2m8nTcxpdYvdAit58QNuDdtLXOGOvTfbri5hHtS2tX9z2p+IR+J8tCbcf9F68c6nFWs9Gi0kaeHjamB5CUYyr+FjtTNbt0/CAA2JPNh4I7rd6BsNqlTph3Bo9D3GcvW70npdCDGFeP85v6SR7zhYlX614GefEqmydiPZibNONjqXu+6607F7YBRv5WlX8Yx+u+EZ7PeCNbUjPAdjh+z0s8ZtDpZ4B2DtbND7n5QcKvoUArI9by8JDW9zwQnyMOdn/S+Nf0oK6jLz9Tm5374Fy7+PI50DecuILeZVCxcBQa/6zBZJw15xHGxphVcj9ZDbllIoqGPY3aBLup+L3Y1mleyc+g7xuFx96IvSbGqd2fVHiB5zhy1E2gUUpxuomWugBxtxurC4OM8RhrJNB49n6jLGPrAa/mPTVmXxI9UXbp23tOGmHr4R4bIx8xeCyta/OqJ3XCuMVuX6SwafAgDewn+OPjlOMqps2i1SCkWvqtjYltftMOPaNeCZUTYORo+EXBVeoE9uNJpDPsRJBTCUYVhTHsL2oPqp1BQh2o4GzgKg6zUVtfEu0/zhPIGuu0ArMv1YCIT/KKEkXwpTGhw3E8GErbtDkoNnJ6NTaKShbVOjaSq4DvkKnJ//aGYZSUdiliAyZfra/eSlsvcfIhInWD0m+BM6waASVxkZoNLLxdnymrQtWO3ZxSnrC7u/guU6GJfAj5ZJkTBsO6IzbKA0BePvcrIGNQSu2q+kODa8t5Z5pDCxvhjHCymeiAgyd1ekHgQJKJxgS9uHdt6013HpxY/4fSOsBXjd8hmrQB0U/HuDb3OtDeyvKGVzl9QNDJNEb0I3RCN7/wkQuJd/HFlIUznxQInlzA7qm4N0aZRMh4+qyYZxt9iXeleaPuu4RWbklnHIb09ITehg9ZH/3wESV152YTYzgZbrcz2un5+bg00pjWEPRnZghwvryx47I2Dom7WjNjhiKAieMQsPiFAeoOeCZG96J2ZC6/wf4XMF5/x9wt+pN8dRK3kQAsfLGBLxpDoEKmOptoS1YUcP/mPxjfJQ8uliqTN8xU/DUF3oJ3sqwkIh5bWEEkEIbX+t+bJlSPwNTfhazS5z/Na30EsBA1baMKCe5Dr5Te8enFkWpP8oVSMiuf6wSa+17h4CL3wYJD4P2mHijjz2yzOcuryVZy+qQ+PW3pRBRBoIgqkZkCkA9YGK0GLHpJVfsbcmBBqWaamKngE3lIZNicOKdHub0NNxcXL5jdhTMhRyumVCjO/lBFiKTpNaDv44vLt9Nvjp99Wp4etS3xuvLqY1uXxymGtgEDBqob6rXPJdzXSrJaUowY/4KVojDgHFzcf7jOXOdw/bhF17IkdTHDX3lseSKD1170wuF85UoZcqPv9dmcq4WIhdm2pSi/OX63l2jPSTEFqZ3xoISNm68Kxijd8a4FdznvjaR01Ms2UGeHNGbJwSWbxXHzu0hT84SSnaVxGWrb1zqx6iKblRvJfkvmx+FI08yr/NmGnffG5m8uOPfRJ7rf0Gpzfkwo/8gGqkD3bBMhX+IYbkvTk56nDRaPuHBK9nUKYiQ8zrP10mjVkzkY4hacXxNNobnNuf7SVhPF9Bvdx3SrSe198+frYmHGXQCK0CuzFylsL502+T4B//CA4Xf6MAGbm1dpaHt80AnGakyDKNw+Yt76rspaZYTinc4jAURNkoittxX6M/eUM37IAdpvEvmFBpamc71BQaaMwWyBbcxE5g2jdnaQxa8jSCIAQiOaAoKg1kLURBdrXwU9AyyFezDgRsFJj6D8SxWTSjPJY7rEutASw0GEpGxrBbkIjTP+YJ8fiitPTWl3gYGjOsMNXZsRUdu9UE9NYi+DFz50Ya+sdvDJvqFcTGDhCCVNCoy+Ftk95u0C6tE9O4t4grlziCGhNAr4GDQRezQQZC6OBNedUh47o7GqXNcire2iB/rQfZF2O+bOoeGp73nAHC+3YxqT6LhXflB+zkpQM3E7ydpaXk6t3W2rg7LSrrlQBoA3aRPs/3hglgi6fQL11dvh18zXFD4yGPDAm4OJXN4TKRCJ08eBUlFLviUmMuOYolsnVeyyL1+wxpFZYmTtuY0XmuU6tWMTAqtRFQYuJdWxOs0e/t6RlOAo7ulFKGrdP8mHcrQLpOAlBH0veJph8IJbkNLJeYjpHD8ASyJYnRAQ+TyQekCdHZd1dyIvXMRoYMoJm86/Oabb74ZnUyd6gxVW3KhQPnsXfNIhwcJUUS5hmJpOLJVQQK67qX1DmDp11semFqtdU+eL8bzxYgx7GC0vI/do4rvHXp2Dvx7GRI+9qjAA+hbqxwwlyW3qRIfsQvFSsoGjHosAqeNprXZtpa8nQ3VsioiIwf/oCt3XOm2cIorWIHd4h5p2j0LE6uk24cQZ17YUD7q+UF8vvfPD+Lzg/h8MZ4fxN/zg9hphzJggACWhLDqk92a7I4CaVeR2X1KtnCltMXeA72kt9GQuswnqaRxN/oj7VKNNgZBG0X8ww4qJlQ2cRoqU/Gy8n/RgeBo3TWFbo+hecg52HAdgXG/Xv/0PXPbcFqJqChXgyTGS3+MFc1l+blLsgi9FwWApm4OsP2Vwui6TCmNPBzAXsPA6voHie7XdZm/drjR8rZqKbdtWQ5bH9zjP452cGV2G+lMJSl8rC5W/FizUGSFkiaBpwPlcvURzVmo7B7G2VT+/GG2+r0ruoO6v4DQwAk9D1C2ZpBUJVe04x43QFsLaPsZfsuNePWSCQXGKFe+g96zftI1aGaL3KhfHZGJwq951w3pzZom2LWSH6kuQMVXBfsSkimIVKvMkG/7HeVRkp3q9DbqmEEqD3R5Qpd/YyTWYrGZdTATCY5j7eTIUwzRuSkuU9VVRe5T3PEaU+W13Up6FkuvdzjL3TfPt/WxoDjgQ0qaN68c3jF8/B5WVa1dhTAqQPj0pbspe+Fz7e7nesAHBF2G2t2PUwx7S9lZV4OvZxAdHmNeV0tdev4+eus3Gnowq2IycEbaUPaUjD6tStk+Xyks57lS9/PN/F3fzOdK3c+Vup8oAvy5UndH0XMVb26zDIBphrlpRl+2I1e2MZrdli6nwUZV44ZqdF43R2l6QGkEuVLkHGvDQXpD9Lqx+qRT8uOzjjgPFNu3CSUYlW1zu7ZXDI4kduoHCRTnNvt2V6TozDU6oKDilScLPtHxphEtxu41pBWnoWFU6NJXBVvyahRGn9pE2w9L33zVzqLsNxKncu4kt/VBCBhU4RPuWY+brZl5N0q9T53rtpNA+/DIC981qKzJidnm1vbppHkf51XW1jqIMSSvo4BciULzPxqh4haQLrkROF41snyH2HW6Nn4JNuv3kt8CFyIUW/HM8kqU5keuCgjgDknTVhviGXx2wYcmx7xuJfsLN6STwbat2LWhKaQmR68r1yP8hknz3J891fraPR5AtH5se8VZrA3kPb7nLcrUWcEkExWX+UFuWN+Wgn/IoFhjWIRDGt6afbsTFmVuENkklLrsQGav8LjGJlF/8rBxWjQaFTHE8Nh9btYqNJYQKpQ9wjKutk1to0XsdD0L+Qcmbf9sCDeXREnSKf6zXciRLEBd701pi8GHlRAJyGRGMlKjUl6ERuxv+k7cgos4WKriKmg0Ms5ngu0QisFDtmhyzg90HS/nWEU33fbAx6UuCw2hrXrOZhJFpIFVNA9skV4M5RgrG3WSy5XslKWNGaGmscq8sRfNVi6MackjUgEauFssN57vse98c94dN/4J76Z9/3aAAmP9ssdYwmsc6ZDZI/R4j997UKPxhDyEqcW8rZ3bFx+We72/pEWyf4tu29E+GH1NbP8eEQ4kKLRCHIA/ToBpTFdFPjx98VXwnqffI2YycrU/O33117+++suLF19/PWiFSWA+/6SfpUrmxeTly7+efJO9mqezyIkfsN6a/E4aBmIbJhGr3kKsBHBKGC9Bdu4B5l4C/c2FjbagvMo64+v/SBqOzoBWg7a7Pnl2g5jneZsWcfimj9U4O33RRpUXpzve9+6jdDLY+gSfbH8/Tu7xf4Pk5cnLfq1FYK1t3h5O+qy5rlUGwPlLX/SIjW4vIduJNeuIsgQ1aooViTMbOtqSB1oyzgHRJU3sxLkebRkgRyOQVqJaakD8Qhuki7xaJmfJ8e0ppbIPXYCRpAkJR9GAm4AZ8uz4GILr8qU21dnXJ9+cdG4/RCdBjhgMDwIloF36iLmyELY69nQ4hBGnlKvBiLRGeQVVwYWkcrI3qH4AcNlQpW91tv4uQMpacYleDpJ5STEd61CP96RjKXKNc72Q1WQmuUHrGhX/hsirMytsDkJDe38wC1Sy4h8nPffhpBUoFS5sEKRQ/xT+7sRSwbO3fz+rqT+sE2r1UXPYhF0YwQGzH2ydYZ2dI/wUmwBakEU7gANsh8pt2Ikfb4J0u72jyiWV2h8IXo8K6LUSFSf7ttujtesjx7BtA+4JCNMhGoBMmucin8SAINwJRCzM1i0ifdJXWjkGebv6sBuqU37WfeipHBfGaypKfBcshBP9BWnTaRcuRC/+K4Sguy6NfOsnrfThvhGlbdwG5aav44lPznHSyC7gBuwLinc8ZQIux8jSWJahrBVo3no1TqaQYBYOTXKZCmAlfCrR5LyA44FK3aDYjmEbfaEkCIYGPh2dno5OhrWiLBGYdVCbasVVNCwxMu2cxe1FxvhOPTDhebN2i1MuLOStUO5lCK4IEbsMogV4mEgVuezQE3GT3J4i18KrhtYBFQzwEkCTT58glPG6zO/v4ed/1qJEAj5IbnkpcbNAzgcJlJUAGN18SgivbUX14RWFhKNbLZCF9oMJV5x6nCPLsLXtL9F79/7d5VUySKDcVeLTeiclhwh6+P+zBNi/kEEBf/uU5FwtauSIEhoT/vc/e0lG8A==
9sidebar_class_name: "post api-method"
10info_path: docs/api/HTTP/runtime
11custom_edit_url: null
12proxy: http://localhost:8090
13---
14
15import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint";
16import ParamsDetails from "@theme/ParamsDetails";
17import RequestSchema from "@theme/RequestSchema";
18import StatusCodes from "@theme/StatusCodes";
19import OperationTabs from "@theme/OperationTabs";
20import TabItem from "@theme/TabItem";
21import Heading from "@theme/Heading";
22
23<Heading
24 as={"h1"}
25 className={"openapi__heading"}
26 children={"Create Chat Completion"}
27>
28</Heading>
29
30<MethodEndpoint
31 method={"post"}
32 path={"/v1/chat/completions"}
33 context={"endpoint"}
34>
35
36</MethodEndpoint>
37
38
39
40Creates a model response for the given chat conversation.
41
42<Heading
43 id={"request"}
44 as={"h2"}
45 className={"openapi-tabs__heading"}
46 children={"Request"}
47>
48</Heading>
49
50<ParamsDetails
51 parameters={undefined}
52>
53
54</ParamsDetails>
55
56<RequestSchema
57 title={"Body"}
58 body={{"description":"Create a chat completion request using a language model.","content":{"application/json":{"schema":{"type":"object","required":["messages","model"],"properties":{"audio":{"oneOf":[{"type":"null"},{"description":"Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](https://platform.openai.com/docs/guides/audio).","type":"object","required":["voice","format"],"properties":{"format":{"description":"Specifies the output audio format. Must be one of `wav`, `aac`, `mp3`, `flac`, `opus`, or `pcm16`.","type":"string","enum":["wav","aac","mp3","flac","opus","pcm16"],"title":"ChatCompletionAudioFormat"},"voice":{"description":"The voice the model uses to respond. Supported voices are\n`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.","oneOf":[{"type":"string","enum":["alloy"]},{"type":"string","enum":["ash"]},{"type":"string","enum":["ballad"]},{"type":"string","enum":["coral"]},{"type":"string","enum":["echo"]},{"type":"string","enum":["fable"]},{"type":"string","enum":["nova"]},{"type":"string","enum":["onyx"]},{"type":"string","enum":["sage"]},{"type":"string","enum":["shimmer"]},{"type":"object","required":["other"],"properties":{"other":{"type":"string"}}}],"title":"ChatCompletionAudioVoice"}},"title":"ChatCompletionAudio"}]},"frequency_penalty":{"type":["number","null"],"format":"float","description":"Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim."},"function_call":{"oneOf":[{"type":"null"},{"description":"Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.","oneOf":[{"type":"string","description":"The model does not call a function, and responds to the end-user.","enum":["none"]},{"type":"string","description":"The model can pick between an end-user or calling a function.","enum":["auto"]},{"type":"object","description":"Forces the model to call the specified function.","required":["Function"],"properties":{"Function":{"type":"object","description":"Forces the model to call the specified function.","required":["name"],"properties":{"name":{"type":"string"}}}}}],"title":"ChatCompletionFunctionCall"}]},"functions":{"type":["array","null"],"items":{"type":"object","required":["name","parameters"],"properties":{"description":{"type":["string","null"],"description":"A description of what the function does, used by the model to choose when and how to call the function."},"name":{"type":"string","description":"The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."},"parameters":{"description":"The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list."}},"title":"ChatCompletionFunctions"},"description":"Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.","deprecated":true},"logit_bias":{"type":["object","null"],"description":"Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\nMathematically, the bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\nvalues like -100 or 100 should result in a ban or exclusive selection of the relevant token.","additionalProperties":{"type":"integer","format":"int32"},"propertyNames":{"type":"string"}},"logprobs":{"type":["boolean","null"],"description":"Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the `content` of `message`."},"max_completion_tokens":{"type":["integer","null"],"format":"int32","description":"An upper bound for the number of tokens that can be generated for a completion, including\nvisible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).","minimum":0},"max_tokens":{"type":["integer","null"],"format":"int32","description":"The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in\nthe chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o-series models](https://platform.openai.com/docs/guides/reasoning).","deprecated":true,"minimum":0},"messages":{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["content"],"properties":{"content":{"description":"The contents of the developer message.","oneOf":[{"type":"string"},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]}],"title":"ChatCompletionRequestDeveloperMessageContentPart"}}],"title":"ChatCompletionRequestDeveloperMessageContent"},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."}},"title":"ChatCompletionRequestDeveloperMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["developer"]}}}]},{"allOf":[{"type":"object","required":["content"],"properties":{"content":{"description":"The contents of the system message.","oneOf":[{"type":"string","description":"The text contents of the system message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]}],"title":"ChatCompletionRequestSystemMessageContentPart"},"description":"An array of content parts with a defined type. For system messages, only type `text` is supported."}],"title":"ChatCompletionRequestSystemMessageContent"},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."}},"title":"ChatCompletionRequestSystemMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["system"]}}}]},{"allOf":[{"type":"object","required":["content"],"properties":{"content":{"description":"The contents of the user message.","oneOf":[{"type":"string","description":"The text contents of the message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]},{"allOf":[{"type":"object","required":["image_url"],"properties":{"image_url":{"type":"object","required":["url"],"properties":{"detail":{"oneOf":[{"type":"null"},{"description":"Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).","type":"string","enum":["auto","low","high"],"title":"ImageDetail"}]},"url":{"type":"string","description":"Either a URL of the image or the base64 encoded image data."}},"title":"ImageUrl"}},"title":"ChatCompletionRequestMessageContentPartImage"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["image_url"]}}}]},{"allOf":[{"type":"object","description":"Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).","required":["input_audio"],"properties":{"input_audio":{"type":"object","required":["data","format"],"properties":{"data":{"type":"string","description":"Base64 encoded audio data."},"format":{"description":"The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".","type":"string","enum":["wav","mp3"],"title":"InputAudioFormat"}},"title":"InputAudio"}},"title":"ChatCompletionRequestMessageContentPartAudio"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["input_audio"]}}}]},{"allOf":[{"type":"object","required":["file"],"properties":{"file":{"type":"object","properties":{"file_data":{"type":["string","null"],"description":"The base64 encoded file data, used when passing the file to the model\nas a string."},"file_id":{"type":["string","null"],"description":"The ID of an uploaded file to use as input."},"filename":{"type":["string","null"],"description":"The name of the file, used when passing the file to the model as a\nstring."}},"title":"FileObject"}},"title":"ChatCompletionRequestMessageContentPartFile"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["file"]}}}]}],"title":"ChatCompletionRequestUserMessageContentPart"},"description":"An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text, image, or audio inputs."}],"title":"ChatCompletionRequestUserMessageContent"},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."}},"title":"ChatCompletionRequestUserMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["user"]}}}]},{"allOf":[{"type":"object","properties":{"audio":{"oneOf":[{"type":"null"},{"description":"Data about a previous audio response from the model.\n[Learn more](https://platform.openai.com/docs/guides/audio).","type":"object","required":["id"],"properties":{"id":{"type":"string","description":"Unique identifier for a previous audio response from the model."}},"title":"ChatCompletionRequestAssistantMessageAudio"}]},"content":{"oneOf":[{"type":"null"},{"description":"The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.","oneOf":[{"type":"string","description":"The text contents of the message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]},{"allOf":[{"type":"object","required":["refusal"],"properties":{"refusal":{"type":"string","description":"The refusal message generated by the model."}},"title":"ChatCompletionRequestMessageContentPartRefusal"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["refusal"]}}}]}],"title":"ChatCompletionRequestAssistantMessageContentPart"},"description":"An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`."}],"title":"ChatCompletionRequestAssistantMessageContent"}]},"function_call":{"oneOf":[{"type":"null"},{"description":"Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"}]},"name":{"type":["string","null"],"description":"An optional name for the participant. Provides the model information to differentiate between participants of the same role."},"refusal":{"type":["string","null"],"description":"The refusal message by the assistant."},"tool_calls":{"type":["array","null"],"items":{"oneOf":[{"allOf":[{"type":"object","required":["id","function"],"properties":{"function":{"description":"The function that the model called.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}]},{"allOf":[{"type":"object","required":["id","custom_tool"],"properties":{"custom_tool":{"description":"The custom tool that the model called.","type":"object","required":["name","input"],"properties":{"input":{"type":"string","description":"The input for the custom tool call generated by the model."},"name":{"type":"string","description":"The name of the custom tool to call."}},"title":"CustomTool"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageCustomToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}]}],"title":"ChatCompletionMessageToolCalls"}}},"title":"ChatCompletionRequestAssistantMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["assistant"]}}}]},{"allOf":[{"type":"object","description":"Tool message","required":["content","tool_call_id"],"properties":{"content":{"description":"The contents of the tool message.","oneOf":[{"type":"string","description":"The text contents of the tool message."},{"type":"array","items":{"oneOf":[{"allOf":[{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}}]}],"title":"ChatCompletionRequestToolMessageContentPart"},"description":"An array of content parts with a defined type. For tool messages, only type `text` is supported."}],"title":"ChatCompletionRequestToolMessageContent"},"tool_call_id":{"type":"string"}},"title":"ChatCompletionRequestToolMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["tool"]}}}]},{"allOf":[{"type":"object","required":["name"],"properties":{"content":{"type":["string","null"],"description":"The return value from the function call, to return to the model."},"name":{"type":"string","description":"The name of the function to call."}},"title":"ChatCompletionRequestFunctionMessage"},{"type":"object","required":["role"],"properties":{"role":{"type":"string","enum":["function"]}}}]}],"title":"ChatCompletionRequestMessage"},"description":"A list of messages comprising the conversation so far. Depending on the\n[model](https://platform.openai.com/docs/models) you use, different message types (modalities)\nare supported, like [text](https://platform.openai.com/docs/guides/text-generation),\n[images](https://platform.openai.com/docs/guides/vision), and\n[audio](https://platform.openai.com/docs/guides/audio)."},"metadata":{"oneOf":[{"type":"null"},{"description":"Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions).","type":"object","required":["supports_responses_api"],"properties":{"supports_responses_api":{"type":"boolean"}},"title":"Metadata"}]},"modalities":{"type":["array","null"],"items":{"type":"string","description":"Output types that you would like the model to generate for this request.\n\nMost models are capable of generating text, which is the default: `[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to [generate\naudio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]`","enum":["text","audio"],"title":"ResponseModalities"},"description":"Output types that you would like the model to generate. Most models are capable of generating\ntext, which is the default:\n\n`[\"text\"]`\nThe `gpt-4o-audio-preview` model can also be used to\n[generate audio](https://platform.openai.com/docs/guides/audio). To request that this model\ngenerate both text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`"},"model":{"type":"string","description":"Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\noffers a wide range of models with different capabilities, performance\ncharacteristics, and price points. Refer to the\n[model guide](https://platform.openai.com/docs/models)\nto browse and compare available models."},"n":{"type":["integer","null"],"format":"int32","description":"How many chat completion choices to generate for each input message. Note that you will be\ncharged based on the number of generated tokens across all of the choices. Keep `n` as `1` to\nminimize costs.","minimum":0},"parallel_tool_calls":{"type":["boolean","null"],"description":"Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)\nduring tool use."},"prediction":{"oneOf":[{"type":"null"},{"description":"Configuration for a [Predicted Output](https://platform.openai.com/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.","oneOf":[{"type":"object","description":"The type of the predicted content you want to provide. This type is\ncurrently always `content`.","required":["content","type"],"properties":{"content":{"description":"The type of the predicted content you want to provide. This type is\ncurrently always `content`.","oneOf":[{"type":"string","description":"The content used for a Predicted Output. This is often the text of a file you are regenerating with minor changes."},{"type":"array","items":{"type":"object","required":["text"],"properties":{"text":{"type":"string"}},"title":"ChatCompletionRequestMessageContentPartText"},"description":"An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text inputs."}],"title":"PredictionContentContent"},"type":{"type":"string","enum":["content"]}}}],"title":"PredictionContent"}]},"presence_penalty":{"type":["number","null"],"format":"float","description":"Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics."},"prompt_cache_key":{"type":["string","null"],"description":"Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces\nthe `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)."},"reasoning_effort":{"oneOf":[{"type":"null"},{"description":"Constrains effort on reasoning for\n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `minimal`, `low`, `medium`, and `high`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response.\nNote: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.","type":"string","enum":["none","minimal","low","medium","high","xhigh"],"title":"ReasoningEffort"}]},"response_format":{"oneOf":[{"type":"null"},{"description":"An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.","oneOf":[{"type":"object","description":"The type of response format being defined: `text`","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}},{"type":"object","description":"The type of response format being defined: `json_object`","required":["type"],"properties":{"type":{"type":"string","enum":["json_object"]}}},{"type":"object","description":"The type of response format being defined: `json_schema`","required":["json_schema","type"],"properties":{"json_schema":{"type":"object","required":["name"],"properties":{"description":{"type":["string","null"],"description":"A description of what the response format is for, used by the model to determine how to respond in the format."},"name":{"type":"string","description":"The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."},"schema":{"description":"The schema for the response format, described as a JSON Schema object.\nLearn how to build JSON schemas [here](https://json-schema.org/)."},"strict":{"type":["boolean","null"],"description":"Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](https://platform.openai.com/docs/guides/structured-outputs)."}},"title":"ResponseFormatJsonSchema"},"type":{"type":"string","enum":["json_schema"]}}}],"title":"ResponseFormat"}]},"safety_identifier":{"type":["string","null"],"description":"A stable identifier used to help detect users of your application that may be violating OpenAI's\nusage policies.\n\nThe IDs should be a string that uniquely identifies each user. We recommend hashing their username\nor email address, in order to avoid sending us any identifying information. [Learn\nmore](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)."},"seed":{"type":["integer","null"],"format":"int64","description":"This feature is in Beta.\n\nIf specified, our system will make a best effort to sample deterministically, such that\nrepeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response\nparameter to monitor changes in the backend.","deprecated":true},"service_tier":{"oneOf":[{"type":"null"},{"description":"Specifies the processing type used for serving the request.\n- If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.\n- If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.\n- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.\n- When not set, the default behavior is 'auto'.\n\nWhen the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.","type":"string","enum":["auto","default","flex","scale","priority"],"title":"ServiceTier"}]},"stop":{"oneOf":[{"type":"null"},{"description":"Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.","oneOf":[{"type":"string"},{"type":"array","items":{"type":"string"}}],"title":"StopConfiguration"}]},"store":{"type":["boolean","null"],"description":"Whether or not to store the output of this chat completion request for\nuse in our [model distillation](https://platform.openai.com/docs/guides/distillation) or\n[evals](https://platform.openai.com/docs/guides/evals) products.\n\nSupports text and image inputs. Note: image inputs over 8MB will be dropped."},"stream":{"type":["boolean","null"],"description":"If set to true, the model response data will be streamed to the client\nas it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\nSee the [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)\nfor more information, along with the [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)\nguide for more information on how to handle the streaming events."},"stream_options":{"oneOf":[{"type":"null"},{"type":"object","description":"Options for streaming response. Only set this when you set `stream: true`.","properties":{"include_obfuscation":{"type":["boolean","null"],"description":"When true, stream obfuscation will be enabled. Stream obfuscation adds\nrandom characters to an `obfuscation` field on streaming delta events to\nnormalize payload sizes as a mitigation to certain side-channel attacks.\nThese obfuscation fields are included by default, but add a small amount\nof overhead to the data stream. You can set `include_obfuscation` to\nfalse to optimize for bandwidth if you trust the network links between\nyour application and the OpenAI API."},"include_usage":{"type":["boolean","null"],"description":"If set, an additional chunk will be streamed before the `data: [DONE]`\nmessage. The `usage` field on this chunk shows the token usage statistics\nfor the entire request, and the `choices` field will always be an empty\narray.\n\nAll other chunks will also include a `usage` field, but with a null\nvalue. **NOTE:** If the stream is interrupted, you may not receive the\nfinal usage chunk which contains the total token usage for the request."}},"title":"ChatCompletionStreamOptions"}]},"temperature":{"type":["number","null"],"format":"float","description":"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random,\nwhile lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both."},"tool_choice":{"oneOf":[{"type":"null"},{"description":"Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces\nthe model to call that tool.\n`none` is the default when no tools are present. `auto` is the default if tools are present.","oneOf":[{"allOf":[{"type":"object","required":["allowed_tools"],"properties":{"allowed_tools":{"type":"array","items":{"type":"object","required":["mode","tools"],"properties":{"mode":{"description":"Constrains the tools available to the model to a pre-defined set.\n\n`auto` allows the model to pick from among the allowed tools and generate a\nmessage.\n\n`required` requires the model to call one or more of the allowed tools.","type":"string","enum":["auto","required"],"title":"ToolChoiceAllowedMode"},"tools":{"type":"array","items":{},"description":"A list of tool definitions that the model should be allowed to call.\n\nFor the Chat Completions API, the list of tool definitions might look like:\n```json\n[\n { \"type\": \"function\", \"function\": { \"name\": \"get_weather\" } },\n { \"type\": \"function\", \"function\": { \"name\": \"get_time\" } }\n]\n```"}},"title":"ChatCompletionAllowedTools"}}},"title":"ChatCompletionAllowedToolsChoice"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["allowed_tools"]}}}]},{"allOf":[{"type":"object","description":"Specifies a tool the model should use. Use to force the model to call a specific function.","required":["function"],"properties":{"function":{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionName"}},"title":"ChatCompletionNamedToolChoice"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}]},{"allOf":[{"type":"object","required":["custom"],"properties":{"custom":{"type":"object","required":["name"],"properties":{"name":{"type":"string","description":"The name of the custom tool to call."}},"title":"CustomName"}},"title":"ChatCompletionNamedToolChoiceCustom"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}]},{"allOf":[{"type":"string","enum":["none","auto","required"],"title":"ToolChoiceOptions"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["mode"]}}}]}],"title":"ChatCompletionToolChoiceOption"}]},"tools":{"type":["array","null"],"items":{"oneOf":[{"allOf":[{"description":"A function tool that can be used to generate a response.","type":"object","required":["function"],"properties":{"function":{"type":"object","required":["name"],"properties":{"description":{"type":["string","null"],"description":"A description of what the function does, used by the model to choose when and how to call the function."},"name":{"type":"string","description":"The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64."},"parameters":{"description":"The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list."},"strict":{"type":["boolean","null"],"description":"Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/guides/function-calling)."}},"title":"FunctionObject"}},"title":"ChatCompletionTool"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}],"description":"A function tool that can be used to generate a response."},{"allOf":[{"description":"A custom tool that processes input using a specified format.","type":"object","required":["custom"],"properties":{"custom":{"type":"object","required":["name","format"],"properties":{"description":{"type":["string","null"],"description":"Optional description of the custom tool, used to provide more context."},"format":{"description":"The input format for the custom tool. Default is unconstrained text.","oneOf":[{"type":"object","description":"Unconstrained free-form text.","required":["type"],"properties":{"type":{"type":"string","enum":["text"]}}},{"type":"object","description":"A grammar defined by the user.","required":["grammar","type"],"properties":{"grammar":{"type":"object","required":["definition","syntax"],"properties":{"definition":{"type":"string","description":"The grammar definition."},"syntax":{"description":"The syntax of the grammar definition. One of `lark` or `regex`.","type":"string","enum":["lark","regex"],"title":"GrammarSyntax"}},"title":"CustomGrammarFormatParam"},"type":{"type":"string","enum":["grammar"]}}}],"title":"CustomToolPropertiesFormat"},"name":{"type":"string","description":"The name of the custom tool, used to identify it in tool calls."}},"title":"CustomToolProperties"}},"title":"CustomToolChatCompletions"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}],"description":"A custom tool that processes input using a specified format."}],"title":"ChatCompletionTools"},"description":"A list of tools the model may call. You can provide either\n[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) or\n[function tools](https://platform.openai.com/docs/guides/function-calling)."},"top_logprobs":{"type":["integer","null"],"format":"int32","description":"An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.","minimum":0},"top_p":{"type":["number","null"],"format":"float","description":"An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability mass.\nSo 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\n We generally recommend altering this or `temperature` but not both."},"user":{"type":["string","null"],"description":"This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key`\ninstead to maintain caching optimizations.\nA stable identifier for your end-users.\nUsed to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and\nprevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).","deprecated":true},"verbosity":{"oneOf":[{"type":"null"},{"description":"Constrains the verbosity of the model's response. Lower values will result in\nmore concise responses, while higher values will result in more verbose responses.\nCurrently supported values are `low`, `medium`, and `high`.","type":"string","enum":["low","medium","high"],"title":"Verbosity"}]},"web_search_options":{"oneOf":[{"type":"null"},{"description":"This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).","type":"object","properties":{"search_context_size":{"oneOf":[{"type":"null"},{"description":"High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default.","type":"string","enum":["low","medium","high"],"title":"WebSearchContextSize"}]},"user_location":{"oneOf":[{"type":"null"},{"description":"Approximate location parameters for the search.","type":"object","required":["type","approximate"],"properties":{"approximate":{"type":"object","description":"Approximate location parameters for the search.","properties":{"city":{"type":["string","null"],"description":"Free text input for the city of the user, e.g. `San Francisco`."},"country":{"type":["string","null"],"description":"The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`."},"region":{"type":["string","null"],"description":"Free text input for the region of the user, e.g. `California`."},"timezone":{"type":["string","null"],"description":"The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`."}},"title":"WebSearchLocation"},"type":{"type":"string","enum":["approximate"],"title":"WebSearchUserLocationType"}},"title":"WebSearchUserLocation"}]}},"title":"WebSearchOptions"}]}},"title":"CreateChatCompletionRequest"},"example":{"model":"gpt-4o","messages":[{"role":"developer","content":"You are a helpful assistant."},{"role":"user","content":"Hello!"}],"stream":false}}},"required":true}}59>
60
61</RequestSchema>
62
63<StatusCodes
64 id={undefined}
65 label={undefined}
66 responses={{"200":{"description":"Chat completion generated successfully","content":{"application/json":{"schema":{"type":"object","description":"Represents a chat completion response returned by model, based on the provided input.","required":["id","choices","created","model","object"],"properties":{"choices":{"type":"array","items":{"type":"object","required":["index","message"],"properties":{"finish_reason":{"oneOf":[{"type":"null"},{"description":"The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.","type":"string","enum":["stop","length","tool_calls","content_filter","function_call"],"title":"FinishReason"}]},"index":{"type":"integer","format":"int32","description":"The index of the choice in the list of choices.","minimum":0},"logprobs":{"oneOf":[{"type":"null"},{"description":"Log probability information for the choice.","type":"object","properties":{"content":{"type":["array","null"],"items":{"type":"object","required":["token","logprob","top_logprobs"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely."},"token":{"type":"string","description":"The token."},"top_logprobs":{"type":"array","items":{"type":"object","required":["token","logprob"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token."},"token":{"type":"string","description":"The token."}},"title":"TopLogprobs"},"description":"List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned."}},"title":"ChatCompletionTokenLogprob"},"description":"A list of message content tokens with log probability information."},"refusal":{"type":["array","null"],"items":{"type":"object","required":["token","logprob","top_logprobs"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely."},"token":{"type":"string","description":"The token."},"top_logprobs":{"type":"array","items":{"type":"object","required":["token","logprob"],"properties":{"bytes":{"type":["array","null"],"items":{"type":"integer","format":"int32","minimum":0},"description":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token."},"logprob":{"type":"number","format":"float","description":"The log probability of this token."},"token":{"type":"string","description":"The token."}},"title":"TopLogprobs"},"description":"List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned."}},"title":"ChatCompletionTokenLogprob"}}},"title":"ChatChoiceLogprobs"}]},"message":{"type":"object","description":"A chat completion message generated by the model.","required":["role"],"properties":{"annotations":{"type":["array","null"],"items":{"oneOf":[{"type":"object","required":["url_citation","type"],"properties":{"type":{"type":"string","enum":["url_citation"]},"url_citation":{"type":"object","required":["end_index","start_index","title","url"],"properties":{"end_index":{"type":"integer","format":"int32","description":"The index of the last character of the URL citation in the message.","minimum":0},"start_index":{"type":"integer","format":"int32","description":"The index of the first character of the URL citation in the message.","minimum":0},"title":{"type":"string","description":"The title of the web resource."},"url":{"type":"string","description":"The URL of the web resource."}},"title":"UrlCitation"}}}],"title":"ChatCompletionResponseMessageAnnotation"}},"audio":{"oneOf":[{"type":"null"},{"description":"If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).","type":"object","required":["id","expires_at","data","transcript"],"properties":{"data":{"type":"string","description":"Base64 encoded audio bytes generated by the model, in the format specified in the request."},"expires_at":{"type":"integer","format":"int64","description":"The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.","minimum":0},"id":{"type":"string","description":"Unique identifier for this audio response."},"transcript":{"type":"string","description":"Transcript of the audio generated by the model."}},"title":"ChatCompletionResponseMessageAudio"}]},"content":{"type":["string","null"],"description":"The contents of the message."},"function_call":{"oneOf":[{"type":"null"},{"description":"Deprecated and replaced by `tool_calls`.\nThe name and arguments of a function that should be called, as generated by the model.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"}]},"refusal":{"type":["string","null"],"description":"The refusal message generated by the model."},"role":{"description":"The role of the author of this message.","type":"string","enum":["system","user","assistant","tool","function"],"title":"Role"},"tool_calls":{"type":["array","null"],"items":{"oneOf":[{"allOf":[{"type":"object","required":["id","function"],"properties":{"function":{"description":"The function that the model called.","type":"object","required":["name","arguments"],"properties":{"arguments":{"type":"string","description":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."},"name":{"type":"string","description":"The name of the function to call."}},"title":"FunctionCall"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["function"]}}}]},{"allOf":[{"type":"object","required":["id","custom_tool"],"properties":{"custom_tool":{"description":"The custom tool that the model called.","type":"object","required":["name","input"],"properties":{"input":{"type":"string","description":"The input for the custom tool call generated by the model."},"name":{"type":"string","description":"The name of the custom tool to call."}},"title":"CustomTool"},"id":{"type":"string","description":"The ID of the tool call."}},"title":"ChatCompletionMessageCustomToolCall"},{"type":"object","required":["type"],"properties":{"type":{"type":"string","enum":["custom"]}}}]}],"title":"ChatCompletionMessageToolCalls"},"description":"The tool calls generated by the model, such as function calls."}},"title":"ChatCompletionResponseMessage"}},"title":"ChatChoice"},"description":"A list of chat completion choices. Can be more than one if `n` is greater than 1."},"created":{"type":"integer","format":"int32","description":"The Unix timestamp (in seconds) of when the chat completion was created.","minimum":0},"id":{"type":"string","description":"A unique identifier for the chat completion."},"model":{"type":"string","description":"The model used for the chat completion."},"object":{"type":"string","description":"The object type, which is always `chat.completion`."},"service_tier":{"oneOf":[{"type":"null"},{"description":"The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.","type":"string","enum":["auto","default","flex","scale","priority"],"title":"ServiceTier"}]},"system_fingerprint":{"type":["string","null"],"description":"This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.","deprecated":true},"usage":{"oneOf":[{"type":"null"},{"type":"object","description":"Usage statistics for the completion request.","required":["prompt_tokens","completion_tokens","total_tokens"],"properties":{"completion_tokens":{"type":"integer","format":"int32","description":"Number of tokens in the generated completion.","minimum":0},"completion_tokens_details":{"oneOf":[{"type":"null"},{"description":"Breakdown of tokens used in a completion.","type":"object","properties":{"accepted_prediction_tokens":{"type":["integer","null"],"format":"int32","minimum":0},"audio_tokens":{"type":["integer","null"],"format":"int32","description":"Audio input tokens generated by the model.","minimum":0},"reasoning_tokens":{"type":["integer","null"],"format":"int32","description":"Tokens generated by the model for reasoning.","minimum":0},"rejected_prediction_tokens":{"type":["integer","null"],"format":"int32","description":" When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context\nwindow limits.","minimum":0}},"title":"CompletionTokensDetails"}]},"prompt_tokens":{"type":"integer","format":"int32","description":"Number of tokens in the prompt.","minimum":0},"prompt_tokens_details":{"oneOf":[{"type":"null"},{"description":"Breakdown of tokens used in the prompt.","type":"object","properties":{"audio_tokens":{"type":["integer","null"],"format":"int32","description":"Audio input tokens present in the prompt.","minimum":0},"cached_tokens":{"type":["integer","null"],"format":"int32","description":"Cached tokens present in the prompt.","minimum":0}},"title":"PromptTokensDetails"}]},"total_tokens":{"type":"integer","format":"int32","description":"Total number of tokens used in the request (prompt + completion).","minimum":0}},"title":"CompletionUsage"}]}},"title":"CreateChatCompletionResponse"},"example":{"id":"chatcmpl-123","object":"chat.completion","created":1677652288,"model":"gpt-4o-mini","system_fingerprint":"fp_44709d6fcb","choices":[{"index":0,"message":{"role":"assistant","content":"\n\nHello there, how may I assist you today?"},"logprobs":null,"finish_reason":"stop"}],"usage":{"prompt_tokens":9,"completion_tokens":12,"total_tokens":21,"completion_tokens_details":{"reasoning_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}}}},"404":{"description":"The specified model was not found"},"500":{"description":"An internal server error occurred while processing the chat completion","content":{"application/json":{"schema":{},"example":{"error":"An internal server error occurred while processing the chat completion."}}}}}}67>
68
69</StatusCodes>
70
71
72