Spaces:
Runtime error
Runtime error
| openapi: 3.0.0 | |
| info: | |
| title: OpenAI API | |
| description: APIs for sampling from and fine-tuning language models | |
| version: '1.1.0' | |
| servers: | |
| - url: https://api.openai.com/v1 | |
| tags: | |
| - name: OpenAI | |
| description: The OpenAI REST API | |
| paths: | |
| /engines: | |
| get: | |
| operationId: listEngines | |
| deprecated: true | |
| tags: | |
| - OpenAI | |
| summary: Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ListEnginesResponse' | |
| x-oaiMeta: | |
| name: List engines | |
| group: engines | |
| path: list | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/engines \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Engine.list() | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.listEngines(); | |
| response: | | |
| { | |
| "data": [ | |
| { | |
| "id": "engine-id-0", | |
| "object": "engine", | |
| "owner": "organization-owner", | |
| "ready": true | |
| }, | |
| { | |
| "id": "engine-id-2", | |
| "object": "engine", | |
| "owner": "organization-owner", | |
| "ready": true | |
| }, | |
| { | |
| "id": "engine-id-3", | |
| "object": "engine", | |
| "owner": "openai", | |
| "ready": false | |
| }, | |
| ], | |
| "object": "list" | |
| } | |
| /engines/{engine_id}: | |
| get: | |
| operationId: retrieveEngine | |
| deprecated: true | |
| tags: | |
| - OpenAI | |
| summary: Retrieves a model instance, providing basic information about it such as the owner and availability. | |
| parameters: | |
| - in: path | |
| name: engine_id | |
| required: true | |
| schema: | |
| type: string | |
| # ideally this will be an actual ID, so this will always work from browser | |
| example: | |
| davinci | |
| description: &engine_id_description > | |
| The ID of the engine to use for this request | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/Engine' | |
| x-oaiMeta: | |
| name: Retrieve engine | |
| group: engines | |
| path: retrieve | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/engines/VAR_model_id \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Engine.retrieve("VAR_model_id") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.retrieveEngine("VAR_model_id"); | |
| response: | | |
| { | |
| "id": "VAR_model_id", | |
| "object": "engine", | |
| "owner": "openai", | |
| "ready": true | |
| } | |
| /completions: | |
| post: | |
| operationId: createCompletion | |
| tags: | |
| - OpenAI | |
| summary: Creates a completion for the provided prompt and parameters | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateCompletionRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateCompletionResponse' | |
| x-oaiMeta: | |
| name: Create completion | |
| group: completions | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/completions \ | |
| -H 'Content-Type: application/json' \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -d '{ | |
| "model": "VAR_model_id", | |
| "prompt": "Say this is a test", | |
| "max_tokens": 7, | |
| "temperature": 0 | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Completion.create( | |
| model="VAR_model_id", | |
| prompt="Say this is a test", | |
| max_tokens=7, | |
| temperature=0 | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createCompletion({ | |
| model: "VAR_model_id", | |
| prompt: "Say this is a test", | |
| max_tokens: 7, | |
| temperature: 0, | |
| }); | |
| parameters: | | |
| { | |
| "model": "VAR_model_id", | |
| "prompt": "Say this is a test", | |
| "max_tokens": 7, | |
| "temperature": 0, | |
| "top_p": 1, | |
| "n": 1, | |
| "stream": false, | |
| "logprobs": null, | |
| "stop": "\n" | |
| } | |
| response: | | |
| { | |
| "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", | |
| "object": "text_completion", | |
| "created": 1589478378, | |
| "model": "VAR_model_id", | |
| "choices": [ | |
| { | |
| "text": "\n\nThis is indeed a test", | |
| "index": 0, | |
| "logprobs": null, | |
| "finish_reason": "length" | |
| } | |
| ], | |
| "usage": { | |
| "prompt_tokens": 5, | |
| "completion_tokens": 7, | |
| "total_tokens": 12 | |
| } | |
| } | |
| /edits: | |
| post: | |
| operationId: createEdit | |
| tags: | |
| - OpenAI | |
| summary: Creates a new edit for the provided input, instruction, and parameters | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateEditRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateEditResponse' | |
| x-oaiMeta: | |
| name: Create edit | |
| group: edits | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/edits \ | |
| -H 'Content-Type: application/json' \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -d '{ | |
| "model": "VAR_model_id", | |
| "input": "What day of the wek is it?", | |
| "instruction": "Fix the spelling mistakes" | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Edit.create( | |
| model="VAR_model_id", | |
| input="What day of the wek is it?", | |
| instruction="Fix the spelling mistakes" | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createEdit({ | |
| model: "VAR_model_id", | |
| input: "What day of the wek is it?", | |
| instruction: "Fix the spelling mistakes", | |
| }); | |
| parameters: | | |
| { | |
| "model": "VAR_model_id", | |
| "input": "What day of the wek is it?", | |
| "instruction": "Fix the spelling mistakes", | |
| } | |
| response: | | |
| { | |
| "object": "edit", | |
| "created": 1589478378, | |
| "choices": [ | |
| { | |
| "text": "What day of the week is it?", | |
| "index": 0, | |
| } | |
| ], | |
| "usage": { | |
| "prompt_tokens": 25, | |
| "completion_tokens": 32, | |
| "total_tokens": 57 | |
| } | |
| } | |
| /images/generations: | |
| post: | |
| operationId: createImage | |
| tags: | |
| - OpenAI | |
| summary: Creates an image given a prompt. | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateImageRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ImagesResponse' | |
| x-oaiMeta: | |
| name: Create image | |
| group: images | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/images/generations \ | |
| -H 'Content-Type: application/json' \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -d '{ | |
| "prompt": "A cute baby sea otter", | |
| "n": 2, | |
| "size": "1024x1024" | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Image.create( | |
| prompt="A cute baby sea otter", | |
| n=2, | |
| size="1024x1024" | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createImage({ | |
| prompt: "A cute baby sea otter", | |
| n: 2, | |
| size: "1024x1024", | |
| }); | |
| parameters: | | |
| { | |
| "prompt": "A cute baby sea otter", | |
| "n": 2, | |
| "size": "1024x1024" | |
| } | |
| response: | | |
| { | |
| "created": 1589478378, | |
| "data": [ | |
| { | |
| "url": "https://..." | |
| }, | |
| { | |
| "url": "https://..." | |
| } | |
| ] | |
| } | |
| /images/edits: | |
| post: | |
| operationId: createImageEdit | |
| tags: | |
| - OpenAI | |
| summary: Creates an edited or extended image given an original image and a prompt. | |
| requestBody: | |
| required: true | |
| content: | |
| multipart/form-data: | |
| schema: | |
| $ref: '#/components/schemas/CreateImageEditRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ImagesResponse' | |
| x-oaiMeta: | |
| name: Create image edit | |
| group: images | |
| path: create-edit | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/images/edits \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -F image='@otter.png' \ | |
| -F mask='@mask.png' \ | |
| -F prompt="A cute baby sea otter wearing a beret" \ | |
| -F n=2 \ | |
| -F size="1024x1024" | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Image.create_edit( | |
| image=open("otter.png", "rb"), | |
| mask=open("mask.png", "rb"), | |
| prompt="A cute baby sea otter wearing a beret", | |
| n=2, | |
| size="1024x1024" | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createImageEdit( | |
| fs.createReadStream("otter.png"), | |
| fs.createReadStream("mask.png"), | |
| "A cute baby sea otter wearing a beret", | |
| 2, | |
| "1024x1024" | |
| ); | |
| response: | | |
| { | |
| "created": 1589478378, | |
| "data": [ | |
| { | |
| "url": "https://..." | |
| }, | |
| { | |
| "url": "https://..." | |
| } | |
| ] | |
| } | |
| /images/variations: | |
| post: | |
| operationId: createImageVariation | |
| tags: | |
| - OpenAI | |
| summary: Creates a variation of a given image. | |
| requestBody: | |
| required: true | |
| content: | |
| multipart/form-data: | |
| schema: | |
| $ref: '#/components/schemas/CreateImageVariationRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ImagesResponse' | |
| x-oaiMeta: | |
| name: Create image variation | |
| group: images | |
| path: create-variation | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/images/variations \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -F image='@otter.png' \ | |
| -F n=2 \ | |
| -F size="1024x1024" | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Image.create_variation( | |
| image=open("otter.png", "rb"), | |
| n=2, | |
| size="1024x1024" | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createImageVariation( | |
| fs.createReadStream("otter.png"), | |
| 2, | |
| "1024x1024" | |
| ); | |
| response: | | |
| { | |
| "created": 1589478378, | |
| "data": [ | |
| { | |
| "url": "https://..." | |
| }, | |
| { | |
| "url": "https://..." | |
| } | |
| ] | |
| } | |
| /embeddings: | |
| post: | |
| operationId: createEmbedding | |
| tags: | |
| - OpenAI | |
| summary: Creates an embedding vector representing the input text. | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateEmbeddingRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateEmbeddingResponse' | |
| x-oaiMeta: | |
| name: Create embeddings | |
| group: embeddings | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/embeddings \ | |
| -X POST \ | |
| -H "Authorization: Bearer YOUR_API_KEY" \ | |
| -H "Content-Type: application/json" \ | |
| -d '{"input": "The food was delicious and the waiter...", | |
| "model": "text-embedding-ada-002"}' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Embedding.create( | |
| model="text-embedding-ada-002", | |
| input="The food was delicious and the waiter..." | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createEmbedding({ | |
| model: "text-embedding-ada-002", | |
| input: "The food was delicious and the waiter...", | |
| }); | |
| parameters: | | |
| { | |
| "model": "text-embedding-ada-002", | |
| "input": "The food was delicious and the waiter..." | |
| } | |
| response: | | |
| { | |
| "object": "list", | |
| "data": [ | |
| { | |
| "object": "embedding", | |
| "embedding": [ | |
| 0.0023064255, | |
| -0.009327292, | |
| .... (1056 floats total for ada) | |
| -0.0028842222, | |
| ], | |
| "index": 0 | |
| } | |
| ], | |
| "model": "text-embedding-ada-002", | |
| "usage": { | |
| "prompt_tokens": 8, | |
| "total_tokens": 8 | |
| } | |
| } | |
| /engines/{engine_id}/search: | |
| post: | |
| operationId: createSearch | |
| deprecated: true | |
| tags: | |
| - OpenAI | |
| summary: | | |
| The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. | |
| To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. | |
| The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. | |
| parameters: | |
| - in: path | |
| name: engine_id | |
| required: true | |
| schema: | |
| type: string | |
| example: davinci | |
| description: The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateSearchRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateSearchResponse' | |
| x-oaiMeta: | |
| name: Create search | |
| group: searches | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/engines/davinci/search \ | |
| -H "Content-Type: application/json" \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -d '{ | |
| "documents": ["White House", "hospital", "school"], | |
| "query": "the president" | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Engine("davinci").search( | |
| documents=["White House", "hospital", "school"], | |
| query="the president" | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createSearch("davinci", { | |
| documents: ["White House", "hospital", "school"], | |
| query: "the president", | |
| }); | |
| parameters: | | |
| { | |
| "documents": [ | |
| "White House", | |
| "hospital", | |
| "school" | |
| ], | |
| "query": "the president" | |
| } | |
| response: | | |
| { | |
| "data": [ | |
| { | |
| "document": 0, | |
| "object": "search_result", | |
| "score": 215.412 | |
| }, | |
| { | |
| "document": 1, | |
| "object": "search_result", | |
| "score": 40.316 | |
| }, | |
| { | |
| "document": 2, | |
| "object": "search_result", | |
| "score": 55.226 | |
| } | |
| ], | |
| "object": "list" | |
| } | |
| /files: | |
| get: | |
| operationId: listFiles | |
| tags: | |
| - OpenAI | |
| summary: Returns a list of files that belong to the user's organization. | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ListFilesResponse' | |
| x-oaiMeta: | |
| name: List files | |
| group: files | |
| path: list | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/files \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.File.list() | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.listFiles(); | |
| response: | | |
| { | |
| "data": [ | |
| { | |
| "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", | |
| "object": "file", | |
| "bytes": 175, | |
| "created_at": 1613677385, | |
| "filename": "train.jsonl", | |
| "purpose": "search" | |
| }, | |
| { | |
| "id": "file-XjGxS3KTG0uNmNOK362iJua3", | |
| "object": "file", | |
| "bytes": 140, | |
| "created_at": 1613779121, | |
| "filename": "puppy.jsonl", | |
| "purpose": "search" | |
| } | |
| ], | |
| "object": "list" | |
| } | |
| post: | |
| operationId: createFile | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. | |
| requestBody: | |
| required: true | |
| content: | |
| multipart/form-data: | |
| schema: | |
| $ref: '#/components/schemas/CreateFileRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/OpenAIFile' | |
| x-oaiMeta: | |
| name: Upload file | |
| group: files | |
| path: upload | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/files \ | |
| -H "Authorization: Bearer YOUR_API_KEY" \ | |
| -F purpose="fine-tune" \ | |
| -F file='@mydata.jsonl' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.File.create( | |
| file=open("mydata.jsonl", "rb"), | |
| purpose='fine-tune' | |
| ) | |
| node.js: | | |
| const fs = require("fs"); | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createFile( | |
| fs.createReadStream("mydata.jsonl"), | |
| "fine-tune" | |
| ); | |
| response: | | |
| { | |
| "id": "file-XjGxS3KTG0uNmNOK362iJua3", | |
| "object": "file", | |
| "bytes": 140, | |
| "created_at": 1613779121, | |
| "filename": "mydata.jsonl", | |
| "purpose": "fine-tune" | |
| } | |
| /files/{file_id}: | |
| delete: | |
| operationId: deleteFile | |
| tags: | |
| - OpenAI | |
| summary: Delete a file. | |
| parameters: | |
| - in: path | |
| name: file_id | |
| required: true | |
| schema: | |
| type: string | |
| description: The ID of the file to use for this request | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/DeleteFileResponse' | |
| x-oaiMeta: | |
| name: Delete file | |
| group: files | |
| path: delete | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \ | |
| -X DELETE \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.File.delete("file-XjGxS3KTG0uNmNOK362iJua3") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.deleteFile("file-XjGxS3KTG0uNmNOK362iJua3"); | |
| response: | | |
| { | |
| "id": "file-XjGxS3KTG0uNmNOK362iJua3", | |
| "object": "file", | |
| "deleted": true | |
| } | |
| get: | |
| operationId: retrieveFile | |
| tags: | |
| - OpenAI | |
| summary: Returns information about a specific file. | |
| parameters: | |
| - in: path | |
| name: file_id | |
| required: true | |
| schema: | |
| type: string | |
| description: The ID of the file to use for this request | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/OpenAIFile' | |
| x-oaiMeta: | |
| name: Retrieve file | |
| group: files | |
| path: retrieve | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.File.retrieve("file-XjGxS3KTG0uNmNOK362iJua3") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.retrieveFile("file-XjGxS3KTG0uNmNOK362iJua3"); | |
| response: | | |
| { | |
| "id": "file-XjGxS3KTG0uNmNOK362iJua3", | |
| "object": "file", | |
| "bytes": 140, | |
| "created_at": 1613779657, | |
| "filename": "mydata.jsonl", | |
| "purpose": "fine-tune" | |
| } | |
| /files/{file_id}/content: | |
| get: | |
| operationId: downloadFile | |
| tags: | |
| - OpenAI | |
| summary: Returns the contents of the specified file | |
| parameters: | |
| - in: path | |
| name: file_id | |
| required: true | |
| schema: | |
| type: string | |
| description: The ID of the file to use for this request | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| type: string | |
| x-oaiMeta: | |
| name: Retrieve file content | |
| group: files | |
| path: retrieve-content | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3/content \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' > file.jsonl | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| content = openai.File.download("file-XjGxS3KTG0uNmNOK362iJua3") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.downloadFile("file-XjGxS3KTG0uNmNOK362iJua3"); | |
| /answers: | |
| post: | |
| operationId: createAnswer | |
| deprecated: true | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Answers the specified question using the provided documents and examples. | |
| The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateAnswerRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateAnswerResponse' | |
| x-oaiMeta: | |
| name: Create answer | |
| group: answers | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/answers \ | |
| -X POST \ | |
| -H "Authorization: Bearer YOUR_API_KEY" \ | |
| -H 'Content-Type: application/json' \ | |
| -d '{ | |
| "documents": ["Puppy A is happy.", "Puppy B is sad."], | |
| "question": "which puppy is happy?", | |
| "search_model": "ada", | |
| "model": "curie", | |
| "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", | |
| "examples": [["What is human life expectancy in the United States?","78 years."]], | |
| "max_tokens": 5, | |
| "stop": ["\n", "<|endoftext|>"] | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Answer.create( | |
| search_model="ada", | |
| model="curie", | |
| question="which puppy is happy?", | |
| documents=["Puppy A is happy.", "Puppy B is sad."], | |
| examples_context="In 2017, U.S. life expectancy was 78.6 years.", | |
| examples=[["What is human life expectancy in the United States?","78 years."]], | |
| max_tokens=5, | |
| stop=["\n", "<|endoftext|>"], | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createAnswer({ | |
| search_model: "ada", | |
| model: "curie", | |
| question: "which puppy is happy?", | |
| documents: ["Puppy A is happy.", "Puppy B is sad."], | |
| examples_context: "In 2017, U.S. life expectancy was 78.6 years.", | |
| examples: [["What is human life expectancy in the United States?","78 years."]], | |
| max_tokens: 5, | |
| stop: ["\n", "<|endoftext|>"], | |
| }); | |
| parameters: | | |
| { | |
| "documents": ["Puppy A is happy.", "Puppy B is sad."], | |
| "question": "which puppy is happy?", | |
| "search_model": "ada", | |
| "model": "curie", | |
| "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", | |
| "examples": [["What is human life expectancy in the United States?","78 years."]], | |
| "max_tokens": 5, | |
| "stop": ["\n", "<|endoftext|>"] | |
| } | |
| response: | | |
| { | |
| "answers": [ | |
| "puppy A." | |
| ], | |
| "completion": "cmpl-2euVa1kmKUuLpSX600M41125Mo9NI", | |
| "model": "curie:2020-05-03", | |
| "object": "answer", | |
| "search_model": "ada", | |
| "selected_documents": [ | |
| { | |
| "document": 0, | |
| "text": "Puppy A is happy. " | |
| }, | |
| { | |
| "document": 1, | |
| "text": "Puppy B is sad. " | |
| } | |
| ] | |
| } | |
| /classifications: | |
| post: | |
| operationId: createClassification | |
| deprecated: true | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Classifies the specified `query` using provided examples. | |
| The endpoint first [searches](/docs/api-reference/searches) over the labeled examples | |
| to select the ones most relevant for the particular query. Then, the relevant examples | |
| are combined with the query to construct a prompt to produce the final label via the | |
| [completions](/docs/api-reference/completions) endpoint. | |
| Labeled examples can be provided via an uploaded `file`, or explicitly listed in the | |
| request using the `examples` parameter for quick tests and small scale use cases. | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateClassificationRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateClassificationResponse' | |
| x-oaiMeta: | |
| name: Create classification | |
| group: classifications | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/classifications \ | |
| -X POST \ | |
| -H "Authorization: Bearer YOUR_API_KEY" \ | |
| -H 'Content-Type: application/json' \ | |
| -d '{ | |
| "examples": [ | |
| ["A happy moment", "Positive"], | |
| ["I am sad.", "Negative"], | |
| ["I am feeling awesome", "Positive"]], | |
| "query": "It is a raining day :(", | |
| "search_model": "ada", | |
| "model": "curie", | |
| "labels":["Positive", "Negative", "Neutral"] | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Classification.create( | |
| search_model="ada", | |
| model="curie", | |
| examples=[ | |
| ["A happy moment", "Positive"], | |
| ["I am sad.", "Negative"], | |
| ["I am feeling awesome", "Positive"] | |
| ], | |
| query="It is a raining day :(", | |
| labels=["Positive", "Negative", "Neutral"], | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createClassification({ | |
| search_model: "ada", | |
| model: "curie", | |
| examples: [ | |
| ["A happy moment", "Positive"], | |
| ["I am sad.", "Negative"], | |
| ["I am feeling awesome", "Positive"] | |
| ], | |
| query:"It is a raining day :(", | |
| labels: ["Positive", "Negative", "Neutral"], | |
| }); | |
| parameters: | | |
| { | |
| "examples": [ | |
| ["A happy moment", "Positive"], | |
| ["I am sad.", "Negative"], | |
| ["I am feeling awesome", "Positive"] | |
| ], | |
| "labels": ["Positive", "Negative", "Neutral"], | |
| "query": "It is a raining day :(", | |
| "search_model": "ada", | |
| "model": "curie" | |
| } | |
| response: | | |
| { | |
| "completion": "cmpl-2euN7lUVZ0d4RKbQqRV79IiiE6M1f", | |
| "label": "Negative", | |
| "model": "curie:2020-05-03", | |
| "object": "classification", | |
| "search_model": "ada", | |
| "selected_examples": [ | |
| { | |
| "document": 1, | |
| "label": "Negative", | |
| "text": "I am sad." | |
| }, | |
| { | |
| "document": 0, | |
| "label": "Positive", | |
| "text": "A happy moment" | |
| }, | |
| { | |
| "document": 2, | |
| "label": "Positive", | |
| "text": "I am feeling awesome" | |
| } | |
| ] | |
| } | |
| /fine-tunes: | |
| post: | |
| operationId: createFineTune | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Creates a job that fine-tunes a specified model from a given dataset. | |
| Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. | |
| [Learn more about Fine-tuning](/docs/guides/fine-tuning) | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateFineTuneRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/FineTune' | |
| x-oaiMeta: | |
| name: Create fine-tune | |
| group: fine-tunes | |
| path: create | |
| beta: true | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/fine-tunes \ | |
| -X POST \ | |
| -H "Content-Type: application/json" \ | |
| -H "Authorization: Bearer YOUR_API_KEY" \ | |
| -d '{ | |
| "training_file": "file-XGinujblHPwGLSztz8cPS8XY" | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.FineTune.create(training_file="file-XGinujblHPwGLSztz8cPS8XY") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createFineTune({ | |
| training_file: "file-XGinujblHPwGLSztz8cPS8XY", | |
| }); | |
| response: | | |
| { | |
| "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", | |
| "object": "fine-tune", | |
| "model": "curie", | |
| "created_at": 1614807352, | |
| "events": [ | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807352, | |
| "level": "info", | |
| "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." | |
| } | |
| ], | |
| "fine_tuned_model": null, | |
| "hyperparams": { | |
| "batch_size": 4, | |
| "learning_rate_multiplier": 0.1, | |
| "n_epochs": 4, | |
| "prompt_loss_weight": 0.1, | |
| }, | |
| "organization_id": "org-...", | |
| "result_files": [], | |
| "status": "pending", | |
| "validation_files": [], | |
| "training_files": [ | |
| { | |
| "id": "file-XGinujblHPwGLSztz8cPS8XY", | |
| "object": "file", | |
| "bytes": 1547276, | |
| "created_at": 1610062281, | |
| "filename": "my-data-train.jsonl", | |
| "purpose": "fine-tune-train" | |
| } | |
| ], | |
| "updated_at": 1614807352, | |
| } | |
| get: | |
| operationId: listFineTunes | |
| tags: | |
| - OpenAI | |
| summary: | | |
| List your organization's fine-tuning jobs | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ListFineTunesResponse' | |
| x-oaiMeta: | |
| name: List fine-tunes | |
| group: fine-tunes | |
| path: list | |
| beta: true | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/fine-tunes \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.FineTune.list() | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.listFineTunes(); | |
| response: | | |
| { | |
| "object": "list", | |
| "data": [ | |
| { | |
| "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", | |
| "object": "fine-tune", | |
| "model": "curie", | |
| "created_at": 1614807352, | |
| "fine_tuned_model": null, | |
| "hyperparams": { ... }, | |
| "organization_id": "org-...", | |
| "result_files": [], | |
| "status": "pending", | |
| "validation_files": [], | |
| "training_files": [ { ... } ], | |
| "updated_at": 1614807352, | |
| }, | |
| { ... }, | |
| { ... } | |
| ] | |
| } | |
| /fine-tunes/{fine_tune_id}: | |
| get: | |
| operationId: retrieveFineTune | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Gets info about the fine-tune job. | |
| [Learn more about Fine-tuning](/docs/guides/fine-tuning) | |
| parameters: | |
| - in: path | |
| name: fine_tune_id | |
| required: true | |
| schema: | |
| type: string | |
| example: | |
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | |
| description: | | |
| The ID of the fine-tune job | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/FineTune' | |
| x-oaiMeta: | |
| name: Retrieve fine-tune | |
| group: fine-tunes | |
| path: retrieve | |
| beta: true | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ | |
| -H "Authorization: Bearer YOUR_API_KEY" | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.FineTune.retrieve(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.retrieveFineTune("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); | |
| response: | | |
| { | |
| "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", | |
| "object": "fine-tune", | |
| "model": "curie", | |
| "created_at": 1614807352, | |
| "events": [ | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807352, | |
| "level": "info", | |
| "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807356, | |
| "level": "info", | |
| "message": "Job started." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807861, | |
| "level": "info", | |
| "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807864, | |
| "level": "info", | |
| "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807864, | |
| "level": "info", | |
| "message": "Job succeeded." | |
| } | |
| ], | |
| "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", | |
| "hyperparams": { | |
| "batch_size": 4, | |
| "learning_rate_multiplier": 0.1, | |
| "n_epochs": 4, | |
| "prompt_loss_weight": 0.1, | |
| }, | |
| "organization_id": "org-...", | |
| "result_files": [ | |
| { | |
| "id": "file-QQm6ZpqdNwAaVC3aSz5sWwLT", | |
| "object": "file", | |
| "bytes": 81509, | |
| "created_at": 1614807863, | |
| "filename": "compiled_results.csv", | |
| "purpose": "fine-tune-results" | |
| } | |
| ], | |
| "status": "succeeded", | |
| "validation_files": [], | |
| "training_files": [ | |
| { | |
| "id": "file-XGinujblHPwGLSztz8cPS8XY", | |
| "object": "file", | |
| "bytes": 1547276, | |
| "created_at": 1610062281, | |
| "filename": "my-data-train.jsonl", | |
| "purpose": "fine-tune-train" | |
| } | |
| ], | |
| "updated_at": 1614807865, | |
| } | |
| /fine-tunes/{fine_tune_id}/cancel: | |
| post: | |
| operationId: cancelFineTune | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Immediately cancel a fine-tune job. | |
| parameters: | |
| - in: path | |
| name: fine_tune_id | |
| required: true | |
| schema: | |
| type: string | |
| example: | |
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | |
| description: | | |
| The ID of the fine-tune job to cancel | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/FineTune' | |
| x-oaiMeta: | |
| name: Cancel fine-tune | |
| group: fine-tunes | |
| path: cancel | |
| beta: true | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ | |
| -X POST \ | |
| -H "Authorization: Bearer YOUR_API_KEY" | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.FineTune.cancel(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.cancelFineTune("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); | |
| response: | | |
| { | |
| "id": "ft-xhrpBbvVUzYGo8oUO1FY4nI7", | |
| "object": "fine-tune", | |
| "model": "curie", | |
| "created_at": 1614807770, | |
| "events": [ { ... } ], | |
| "fine_tuned_model": null, | |
| "hyperparams": { ... }, | |
| "organization_id": "org-...", | |
| "result_files": [], | |
| "status": "cancelled", | |
| "validation_files": [], | |
| "training_files": [ | |
| { | |
| "id": "file-XGinujblHPwGLSztz8cPS8XY", | |
| "object": "file", | |
| "bytes": 1547276, | |
| "created_at": 1610062281, | |
| "filename": "my-data-train.jsonl", | |
| "purpose": "fine-tune-train" | |
| } | |
| ], | |
| "updated_at": 1614807789, | |
| } | |
| /fine-tunes/{fine_tune_id}/events: | |
| get: | |
| operationId: listFineTuneEvents | |
| tags: | |
| - OpenAI | |
| summary: | | |
| Get fine-grained status updates for a fine-tune job. | |
| parameters: | |
| - in: path | |
| name: fine_tune_id | |
| required: true | |
| schema: | |
| type: string | |
| example: | |
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | |
| description: | | |
| The ID of the fine-tune job to get events for. | |
| - in: query | |
| name: stream | |
| required: false | |
| schema: | |
| type: boolean | |
| default: false | |
| description: | | |
| Whether to stream events for the fine-tune job. If set to true, | |
| events will be sent as data-only | |
| [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | |
| as they become available. The stream will terminate with a | |
| `data: [DONE]` message when the job is finished (succeeded, cancelled, | |
| or failed). | |
| If set to false, only events generated so far will be returned. | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ListFineTuneEventsResponse' | |
| x-oaiMeta: | |
| name: List fine-tune events | |
| group: fine-tunes | |
| path: events | |
| beta: true | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ | |
| -H "Authorization: Bearer YOUR_API_KEY" | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.FineTune.list_events(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.listFineTuneEvents("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); | |
| response: | | |
| { | |
| "object": "list", | |
| "data": [ | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807352, | |
| "level": "info", | |
| "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807356, | |
| "level": "info", | |
| "message": "Job started." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807861, | |
| "level": "info", | |
| "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807864, | |
| "level": "info", | |
| "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." | |
| }, | |
| { | |
| "object": "fine-tune-event", | |
| "created_at": 1614807864, | |
| "level": "info", | |
| "message": "Job succeeded." | |
| } | |
| ] | |
| } | |
| /models: | |
| get: | |
| operationId: listModels | |
| tags: | |
| - OpenAI | |
| summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/ListModelsResponse' | |
| x-oaiMeta: | |
| name: List models | |
| group: models | |
| path: list | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/models \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Model.list() | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.listModels(); | |
| response: | | |
| { | |
| "data": [ | |
| { | |
| "id": "model-id-0", | |
| "object": "model", | |
| "owned_by": "organization-owner", | |
| "permission": [...] | |
| }, | |
| { | |
| "id": "model-id-1", | |
| "object": "model", | |
| "owned_by": "organization-owner", | |
| "permission": [...] | |
| }, | |
| { | |
| "id": "model-id-2", | |
| "object": "model", | |
| "owned_by": "openai", | |
| "permission": [...] | |
| }, | |
| ], | |
| "object": "list" | |
| } | |
| /models/{model}: | |
| get: | |
| operationId: retrieveModel | |
| tags: | |
| - OpenAI | |
| summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. | |
| parameters: | |
| - in: path | |
| name: model | |
| required: true | |
| schema: | |
| type: string | |
| # ideally this will be an actual ID, so this will always work from browser | |
| example: | |
| text-davinci-001 | |
| description: | |
| The ID of the model to use for this request | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/Model' | |
| x-oaiMeta: | |
| name: Retrieve model | |
| group: models | |
| path: retrieve | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/models/VAR_model_id \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Model.retrieve("VAR_model_id") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.retrieveModel("VAR_model_id"); | |
| response: | | |
| { | |
| "id": "VAR_model_id", | |
| "object": "model", | |
| "owned_by": "openai", | |
| "permission": [...] | |
| } | |
| delete: | |
| operationId: deleteModel | |
| tags: | |
| - OpenAI | |
| summary: Delete a fine-tuned model. You must have the Owner role in your organization. | |
| parameters: | |
| - in: path | |
| name: model | |
| required: true | |
| schema: | |
| type: string | |
| example: curie:ft-acmeco-2021-03-03-21-44-20 | |
| description: The model to delete | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/DeleteModelResponse' | |
| x-oaiMeta: | |
| name: Delete fine-tune model | |
| group: fine-tunes | |
| path: delete-model | |
| beta: true | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/models/curie:ft-acmeco-2021-03-03-21-44-20 \ | |
| -X DELETE \ | |
| -H "Authorization: Bearer YOUR_API_KEY" | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Model.delete("curie:ft-acmeco-2021-03-03-21-44-20") | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.deleteModel('curie:ft-acmeco-2021-03-03-21-44-20'); | |
| response: | | |
| { | |
| "id": "curie:ft-acmeco-2021-03-03-21-44-20", | |
| "object": "model", | |
| "deleted": true | |
| } | |
| /moderations: | |
| post: | |
| operationId: createModeration | |
| tags: | |
| - OpenAI | |
| summary: Classifies if text violates OpenAI's Content Policy | |
| requestBody: | |
| required: true | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateModerationRequest' | |
| responses: | |
| "200": | |
| description: OK | |
| content: | |
| application/json: | |
| schema: | |
| $ref: '#/components/schemas/CreateModerationResponse' | |
| x-oaiMeta: | |
| name: Create moderation | |
| group: moderations | |
| path: create | |
| examples: | |
| curl: | | |
| curl https://api.openai.com/v1/moderations \ | |
| -H 'Content-Type: application/json' \ | |
| -H 'Authorization: Bearer YOUR_API_KEY' \ | |
| -d '{ | |
| "input": "I want to kill them." | |
| }' | |
| python: | | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| openai.Moderation.create( | |
| input="I want to kill them.", | |
| ) | |
| node.js: | | |
| const { Configuration, OpenAIApi } = require("openai"); | |
| const configuration = new Configuration({ | |
| apiKey: process.env.OPENAI_API_KEY, | |
| }); | |
| const openai = new OpenAIApi(configuration); | |
| const response = await openai.createModeration({ | |
| input: "I want to kill them.", | |
| }); | |
| parameters: | | |
| { | |
| "input": "I want to kill them." | |
| } | |
| response: | | |
| { | |
| "id": "modr-5MWoLO", | |
| "model": "text-moderation-001", | |
| "results": [ | |
| { | |
| "categories": { | |
| "hate": false, | |
| "hate/threatening": true, | |
| "self-harm": false, | |
| "sexual": false, | |
| "sexual/minors": false, | |
| "violence": true, | |
| "violence/graphic": false | |
| }, | |
| "category_scores": { | |
| "hate": 0.22714105248451233, | |
| "hate/threatening": 0.4132447838783264, | |
| "self-harm": 0.005232391878962517, | |
| "sexual": 0.01407341007143259, | |
| "sexual/minors": 0.0038522258400917053, | |
| "violence": 0.9223177433013916, | |
| "violence/graphic": 0.036865197122097015 | |
| }, | |
| "flagged": true | |
| } | |
| ] | |
| } | |
| components: | |
| schemas: | |
| ListEnginesResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/Engine' | |
| required: | |
| - object | |
| - data | |
| ListModelsResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/Model' | |
| required: | |
| - object | |
| - data | |
| DeleteModelResponse: | |
| type: object | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| deleted: | |
| type: boolean | |
| required: | |
| - id | |
| - object | |
| - deleted | |
| CreateCompletionRequest: | |
| type: object | |
| properties: | |
| model: | |
| description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. | |
| type: string | |
| prompt: | |
| description: &completions_prompt_description | | |
| The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. | |
| Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | |
| default: '<|endoftext|>' | |
| nullable: true | |
| oneOf: | |
| - type: string | |
| default: '' | |
| example: "This is a test." | |
| - type: array | |
| items: | |
| type: string | |
| default: '' | |
| example: "This is a test." | |
| - type: array | |
| minItems: 1 | |
| items: | |
| type: integer | |
| example: "[1212, 318, 257, 1332, 13]" | |
| - type: array | |
| minItems: 1 | |
| items: | |
| type: array | |
| minItems: 1 | |
| items: | |
| type: integer | |
| example: "[[1212, 318, 257, 1332, 13]]" | |
| suffix: | |
| description: | |
| The suffix that comes after a completion of inserted text. | |
| default: null | |
| nullable: true | |
| type: string | |
| example: "test." | |
| max_tokens: | |
| type: integer | |
| minimum: 0 | |
| default: 16 | |
| example: 16 | |
| nullable: true | |
| description: &completions_max_tokens_description | | |
| The maximum number of [tokens](/tokenizer) to generate in the completion. | |
| The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). | |
| temperature: | |
| type: number | |
| minimum: 0 | |
| maximum: 2 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: &completions_temperature_description | | |
| What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. | |
| We generally recommend altering this or `top_p` but not both. | |
| top_p: | |
| type: number | |
| minimum: 0 | |
| maximum: 1 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: &completions_top_p_description | | |
| An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. | |
| We generally recommend altering this or `temperature` but not both. | |
| n: | |
| type: integer | |
| minimum: 1 | |
| maximum: 128 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: &completions_completions_description | | |
| How many completions to generate for each prompt. | |
| **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | |
| stream: | |
| description: > | |
| Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | |
| as they become available, with the stream terminated by a `data: [DONE]` message. | |
| type: boolean | |
| nullable: true | |
| default: false | |
| logprobs: | |
| type: integer | |
| minimum: 0 | |
| maximum: 5 | |
| default: null | |
| nullable: true | |
| description: &completions_logprobs_description | | |
| Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. | |
| The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. | |
| echo: | |
| type: boolean | |
| default: false | |
| nullable: true | |
| description: &completions_echo_description > | |
| Echo back the prompt in addition to the completion | |
| stop: | |
| description: &completions_stop_description > | |
| Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | |
| default: null | |
| nullable: true | |
| oneOf: | |
| - type: string | |
| default: <|endoftext|> | |
| example: "\n" | |
| nullable: true | |
| - type: array | |
| minItems: 1 | |
| maxItems: 4 | |
| items: | |
| type: string | |
| example: '["\n"]' | |
| presence_penalty: | |
| type: number | |
| default: 0 | |
| minimum: -2 | |
| maximum: 2 | |
| nullable: true | |
| description: &completions_presence_penalty_description | | |
| Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | |
| [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | |
| frequency_penalty: | |
| type: number | |
| default: 0 | |
| minimum: -2 | |
| maximum: 2 | |
| nullable: true | |
| description: &completions_frequency_penalty_description | | |
| Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | |
| [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | |
| best_of: | |
| type: integer | |
| default: 1 | |
| minimum: 0 | |
| maximum: 20 | |
| nullable: true | |
| description: &completions_best_of_description | | |
| Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. | |
| When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. | |
| **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | |
| logit_bias: | |
| type: object | |
| x-oaiTypeLabel: map | |
| default: null | |
| nullable: true | |
| description: &completions_logit_bias_description | | |
| Modify the likelihood of specified tokens appearing in the completion. | |
| Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | |
| As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. | |
| user: | |
| type: string | |
| example: user-1234 | |
| description: | | |
| A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | |
| required: | |
| - model | |
| CreateCompletionResponse: | |
| type: object | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| created: | |
| type: integer | |
| model: | |
| type: string | |
| choices: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| text: | |
| type: string | |
| index: | |
| type: integer | |
| logprobs: | |
| type: object | |
| nullable: true | |
| properties: | |
| tokens: | |
| type: array | |
| items: | |
| type: string | |
| token_logprobs: | |
| type: array | |
| items: | |
| type: number | |
| top_logprobs: | |
| type: array | |
| items: | |
| type: object | |
| text_offset: | |
| type: array | |
| items: | |
| type: integer | |
| finish_reason: | |
| type: string | |
| usage: | |
| type: object | |
| properties: | |
| prompt_tokens: | |
| type: integer | |
| completion_tokens: | |
| type: integer | |
| total_tokens: | |
| type: integer | |
| required: | |
| - prompt_tokens | |
| - completion_tokens | |
| - total_tokens | |
| required: | |
| - id | |
| - object | |
| - created | |
| - model | |
| - choices | |
| CreateEditRequest: | |
| type: object | |
| properties: | |
| model: | |
| input: | |
| description: | |
| The input text to use as a starting point for the edit. | |
| type: string | |
| default: '' | |
| nullable: true | |
| example: "What day of the wek is it?" | |
| instruction: | |
| description: | |
| The instruction that tells the model how to edit the prompt. | |
| type: string | |
| example: "Fix the spelling mistakes." | |
| n: | |
| type: integer | |
| minimum: 1 | |
| maximum: 20 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: | |
| How many edits to generate for the input and instruction. | |
| temperature: | |
| type: number | |
| minimum: 0 | |
| maximum: 2 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: | |
| top_p: | |
| type: number | |
| minimum: 0 | |
| maximum: 1 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: | |
| required: | |
| - model | |
| - instruction | |
| CreateEditResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| created: | |
| type: integer | |
| choices: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| text: | |
| type: string | |
| index: | |
| type: integer | |
| logprobs: | |
| type: object | |
| nullable: true | |
| properties: | |
| tokens: | |
| type: array | |
| items: | |
| type: string | |
| token_logprobs: | |
| type: array | |
| items: | |
| type: number | |
| top_logprobs: | |
| type: array | |
| items: | |
| type: object | |
| text_offset: | |
| type: array | |
| items: | |
| type: integer | |
| finish_reason: | |
| type: string | |
| usage: | |
| type: object | |
| properties: | |
| prompt_tokens: | |
| type: integer | |
| completion_tokens: | |
| type: integer | |
| total_tokens: | |
| type: integer | |
| required: | |
| - prompt_tokens | |
| - completion_tokens | |
| - total_tokens | |
| required: | |
| - object | |
| - created | |
| - choices | |
| - usage | |
| CreateImageRequest: | |
| type: object | |
| properties: | |
| prompt: | |
| description: A text description of the desired image(s). The maximum length is 1000 characters. | |
| type: string | |
| example: "A cute baby sea otter" | |
| n: | |
| type: integer | |
| minimum: 1 | |
| maximum: 10 | |
| default: 1 | |
| example: 1 | |
| nullable: true | |
| description: The number of images to generate. Must be between 1 and 10. | |
| size: | |
| type: string | |
| enum: ["256x256", "512x512", "1024x1024"] | |
| default: "1024x1024" | |
| example: "1024x1024" | |
| nullable: true | |
| description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. | |
| response_format: | |
| type: string | |
| enum: ["url", "b64_json"] | |
| default: "url" | |
| example: "url" | |
| nullable: true | |
| description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. | |
| user: | |
| required: | |
| - prompt | |
| ImagesResponse: | |
| properties: | |
| created: | |
| type: integer | |
| data: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| url: | |
| type: string | |
| b64_json: | |
| type: string | |
| required: | |
| - created | |
| - data | |
| CreateImageEditRequest: | |
| type: object | |
| properties: | |
| image: | |
| description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. | |
| type: string | |
| format: binary | |
| mask: | |
| description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. | |
| type: string | |
| format: binary | |
| prompt: | |
| description: A text description of the desired image(s). The maximum length is 1000 characters. | |
| type: string | |
| example: "A cute baby sea otter wearing a beret" | |
| n: | |
| size: | |
| response_format: | |
| user: | |
| required: | |
| - prompt | |
| - image | |
| CreateImageVariationRequest: | |
| type: object | |
| properties: | |
| image: | |
| description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. | |
| type: string | |
| format: binary | |
| n: | |
| size: | |
| response_format: | |
| user: | |
| required: | |
| - image | |
| CreateModerationRequest: | |
| type: object | |
| properties: | |
| input: | |
| description: The input text to classify | |
| oneOf: | |
| - type: string | |
| default: '' | |
| example: "I want to kill them." | |
| - type: array | |
| items: | |
| type: string | |
| default: '' | |
| example: "I want to kill them." | |
| model: | |
| description: | | |
| Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. | |
| The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. | |
| type: string | |
| nullable: false | |
| default: "text-moderation-latest" | |
| example: "text-moderation-stable" | |
| required: | |
| - input | |
| CreateModerationResponse: | |
| type: object | |
| properties: | |
| id: | |
| type: string | |
| model: | |
| type: string | |
| results: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| flagged: | |
| type: boolean | |
| categories: | |
| type: object | |
| properties: | |
| hate: | |
| type: boolean | |
| hate/threatening: | |
| type: boolean | |
| self-harm: | |
| type: boolean | |
| sexual: | |
| type: boolean | |
| sexual/minors: | |
| type: boolean | |
| violence: | |
| type: boolean | |
| violence/graphic: | |
| type: boolean | |
| required: | |
| - hate | |
| - hate/threatening | |
| - self-harm | |
| - sexual | |
| - sexual/minors | |
| - violence | |
| - violence/graphic | |
| category_scores: | |
| type: object | |
| properties: | |
| hate: | |
| type: number | |
| hate/threatening: | |
| type: number | |
| self-harm: | |
| type: number | |
| sexual: | |
| type: number | |
| sexual/minors: | |
| type: number | |
| violence: | |
| type: number | |
| violence/graphic: | |
| type: number | |
| required: | |
| - hate | |
| - hate/threatening | |
| - self-harm | |
| - sexual | |
| - sexual/minors | |
| - violence | |
| - violence/graphic | |
| required: | |
| - flagged | |
| - categories | |
| - category_scores | |
| required: | |
| - id | |
| - model | |
| - results | |
| CreateSearchRequest: | |
| type: object | |
| properties: | |
| query: | |
| description: Query to search against the documents. | |
| type: string | |
| example: "the president" | |
| minLength: 1 | |
| documents: | |
| description: | | |
| Up to 200 documents to search over, provided as a list of strings. | |
| The maximum document length (in tokens) is 2034 minus the number of tokens in the query. | |
| You should specify either `documents` or a `file`, but not both. | |
| type: array | |
| minItems: 1 | |
| maxItems: 200 | |
| items: | |
| type: string | |
| nullable: true | |
| example: "['White House', 'hospital', 'school']" | |
| file: | |
| description: | | |
| The ID of an uploaded file that contains documents to search over. | |
| You should specify either `documents` or a `file`, but not both. | |
| type: string | |
| nullable: true | |
| max_rerank: | |
| description: | | |
| The maximum number of documents to be re-ranked and returned by search. | |
| This flag only takes effect when `file` is set. | |
| type: integer | |
| minimum: 1 | |
| default: 200 | |
| nullable: true | |
| return_metadata: | |
| description: | | |
| A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. | |
| This flag only takes effect when `file` is set. | |
| type: boolean | |
| default: false | |
| nullable: true | |
| user: | |
| required: | |
| - query | |
| CreateSearchResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| model: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| document: | |
| type: integer | |
| score: | |
| type: number | |
| ListFilesResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/OpenAIFile' | |
| required: | |
| - object | |
| - data | |
| CreateFileRequest: | |
| type: object | |
| additionalProperties: false | |
| properties: | |
| file: | |
| description: | | |
| Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. | |
| If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). | |
| type: string | |
| format: binary | |
| purpose: | |
| description: | | |
| The intended purpose of the uploaded documents. | |
| Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. | |
| type: string | |
| required: | |
| - file | |
| - purpose | |
| DeleteFileResponse: | |
| type: object | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| deleted: | |
| type: boolean | |
| required: | |
| - id | |
| - object | |
| - deleted | |
| CreateAnswerRequest: | |
| type: object | |
| additionalProperties: false | |
| properties: | |
| model: | |
| description: ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | |
| type: string | |
| question: | |
| description: Question to get answered. | |
| type: string | |
| minLength: 1 | |
| example: "What is the capital of Japan?" | |
| examples: | |
| description: List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. | |
| type: array | |
| minItems: 1 | |
| maxItems: 200 | |
| items: | |
| type: array | |
| minItems: 2 | |
| maxItems: 2 | |
| items: | |
| type: string | |
| minLength: 1 | |
| example: "[['What is the capital of Canada?', 'Ottawa'], ['Which province is Ottawa in?', 'Ontario']]" | |
| examples_context: | |
| description: A text snippet containing the contextual information used to generate the answers for the `examples` you provide. | |
| type: string | |
| example: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border." | |
| documents: | |
| description: | | |
| List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. | |
| You should specify either `documents` or a `file`, but not both. | |
| type: array | |
| maxItems: 200 | |
| items: | |
| type: string | |
| example: "['Japan is an island country in East Asia, located in the northwest Pacific Ocean.', 'Tokyo is the capital and most populous prefecture of Japan.']" | |
| nullable: true | |
| file: | |
| description: | | |
| The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. | |
| You should specify either `documents` or a `file`, but not both. | |
| type: string | |
| nullable: true | |
| search_model: | |
| description: ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. | |
| type: string | |
| default: ada | |
| nullable: true | |
| max_rerank: | |
| description: The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | |
| type: integer | |
| default: 200 | |
| nullable: true | |
| temperature: | |
| description: What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values mean the model will take more risks and value 0 (argmax sampling) works better for scenarios with a well-defined answer. | |
| type: number | |
| default: 0 | |
| nullable: true | |
| logprobs: | |
| type: integer | |
| minimum: 0 | |
| maximum: 5 | |
| default: null | |
| nullable: true | |
| description: | | |
| Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. | |
| The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. | |
| When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. | |
| max_tokens: | |
| description: The maximum number of tokens allowed for the generated answer | |
| type: integer | |
| default: 16 | |
| nullable: true | |
| stop: | |
| description: | |
| default: null | |
| oneOf: | |
| - type: string | |
| default: <|endoftext|> | |
| example: "\n" | |
| - type: array | |
| minItems: 1 | |
| maxItems: 4 | |
| items: | |
| type: string | |
| example: '["\n"]' | |
| nullable: true | |
| n: | |
| description: How many answers to generate for each question. | |
| type: integer | |
| minimum: 1 | |
| maximum: 10 | |
| default: 1 | |
| nullable: true | |
| logit_bias: | |
| return_metadata: | |
| return_prompt: | |
| description: If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. | |
| type: boolean | |
| default: false | |
| nullable: true | |
| expand: | |
| description: If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. | |
| type: array | |
| items: {} | |
| nullable: true | |
| default: [] | |
| user: | |
| required: | |
| - model | |
| - question | |
| - examples | |
| - examples_context | |
| CreateAnswerResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| model: | |
| type: string | |
| search_model: | |
| type: string | |
| completion: | |
| type: string | |
| answers: | |
| type: array | |
| items: | |
| type: string | |
| selected_documents: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| document: | |
| type: integer | |
| text: | |
| type: string | |
| CreateClassificationRequest: | |
| type: object | |
| additionalProperties: false | |
| properties: | |
| model: | |
| query: | |
| description: Query to be classified. | |
| type: string | |
| minLength: 1 | |
| example: "The plot is not very attractive." | |
| examples: | |
| description: | | |
| A list of examples with labels, in the following format: | |
| `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` | |
| All the label strings will be normalized to be capitalized. | |
| You should specify either `examples` or `file`, but not both. | |
| type: array | |
| minItems: 2 | |
| maxItems: 200 | |
| items: | |
| type: array | |
| minItems: 2 | |
| maxItems: 2 | |
| items: | |
| type: string | |
| minLength: 1 | |
| example: "[['Do not see this film.', 'Negative'], ['Smart, provocative and blisteringly funny.', 'Positive']]" | |
| nullable: true | |
| file: | |
| description: | | |
| The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. | |
| You should specify either `examples` or `file`, but not both. | |
| type: string | |
| nullable: true | |
| labels: | |
| description: The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. | |
| type: array | |
| minItems: 2 | |
| maxItems: 200 | |
| default: null | |
| items: | |
| type: string | |
| example: ["Positive", "Negative"] | |
| nullable: true | |
| search_model: | |
| temperature: | |
| description: | |
| What sampling `temperature` to use. Higher values mean the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. | |
| type: number | |
| minimum: 0 | |
| maximum: 2 | |
| default: 0 | |
| nullable: true | |
| example: 0 | |
| logprobs: | |
| max_examples: | |
| description: The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | |
| type: integer | |
| default: 200 | |
| nullable: true | |
| logit_bias: | |
| return_prompt: | |
| return_metadata: | |
| expand: | |
| user: | |
| required: | |
| - model | |
| - query | |
| CreateClassificationResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| model: | |
| type: string | |
| search_model: | |
| type: string | |
| completion: | |
| type: string | |
| label: | |
| type: string | |
| selected_examples: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| document: | |
| type: integer | |
| text: | |
| type: string | |
| label: | |
| type: string | |
| CreateFineTuneRequest: | |
| type: object | |
| properties: | |
| training_file: | |
| description: | | |
| The ID of an uploaded file that contains training data. | |
| See [upload file](/docs/api-reference/files/upload) for how to upload a file. | |
| Your dataset must be formatted as a JSONL file, where each training | |
| example is a JSON object with the keys "prompt" and "completion". | |
| Additionally, you must upload your file with the purpose `fine-tune`. | |
| See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. | |
| type: string | |
| example: "file-ajSREls59WBbvgSzJSVWxMCB" | |
| validation_file: | |
| description: | | |
| The ID of an uploaded file that contains validation data. | |
| If you provide this file, the data is used to generate validation | |
| metrics periodically during fine-tuning. These metrics can be viewed in | |
| the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). | |
| Your train and validation data should be mutually exclusive. | |
| Your dataset must be formatted as a JSONL file, where each validation | |
| example is a JSON object with the keys "prompt" and "completion". | |
| Additionally, you must upload your file with the purpose `fine-tune`. | |
| See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. | |
| type: string | |
| nullable: true | |
| example: "file-XjSREls59WBbvgSzJSVWxMCa" | |
| model: | |
| description: | | |
| The name of the base model to fine-tune. You can select one of "ada", | |
| "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. | |
| To learn more about these models, see the | |
| [Models](https://beta.openai.com/docs/models) documentation. | |
| default: "curie" | |
| type: string | |
| nullable: true | |
| n_epochs: | |
| description: | | |
| The number of epochs to train the model for. An epoch refers to one | |
| full cycle through the training dataset. | |
| default: 4 | |
| type: integer | |
| nullable: true | |
| batch_size: | |
| description: | | |
| The batch size to use for training. The batch size is the number of | |
| training examples used to train a single forward and backward pass. | |
| By default, the batch size will be dynamically configured to be | |
| ~0.2% of the number of examples in the training set, capped at 256 - | |
| in general, we've found that larger batch sizes tend to work better | |
| for larger datasets. | |
| default: null | |
| type: integer | |
| nullable: true | |
| learning_rate_multiplier: | |
| description: | | |
| The learning rate multiplier to use for training. | |
| The fine-tuning learning rate is the original learning rate used for | |
| pretraining multiplied by this value. | |
| By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 | |
| depending on final `batch_size` (larger learning rates tend to | |
| perform better with larger batch sizes). We recommend experimenting | |
| with values in the range 0.02 to 0.2 to see what produces the best | |
| results. | |
| default: null | |
| type: number | |
| nullable: true | |
| prompt_loss_weight: | |
| description: | | |
| The weight to use for loss on the prompt tokens. This controls how | |
| much the model tries to learn to generate the prompt (as compared | |
| to the completion which always has a weight of 1.0), and can add | |
| a stabilizing effect to training when completions are short. | |
| If prompts are extremely long (relative to completions), it may make | |
| sense to reduce this weight so as to avoid over-prioritizing | |
| learning the prompt. | |
| default: 0.01 | |
| type: number | |
| nullable: true | |
| compute_classification_metrics: | |
| description: | | |
| If set, we calculate classification-specific metrics such as accuracy | |
| and F-1 score using the validation set at the end of every epoch. | |
| These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). | |
| In order to compute classification metrics, you must provide a | |
| `validation_file`. Additionally, you must | |
| specify `classification_n_classes` for multiclass classification or | |
| `classification_positive_class` for binary classification. | |
| type: boolean | |
| default: false | |
| nullable: true | |
| classification_n_classes: | |
| description: | | |
| The number of classes in a classification task. | |
| This parameter is required for multiclass classification. | |
| type: integer | |
| default: null | |
| nullable: true | |
| classification_positive_class: | |
| description: | | |
| The positive class in binary classification. | |
| This parameter is needed to generate precision, recall, and F1 | |
| metrics when doing binary classification. | |
| type: string | |
| default: null | |
| nullable: true | |
| classification_betas: | |
| description: | | |
| If this is provided, we calculate F-beta scores at the specified | |
| beta values. The F-beta score is a generalization of F-1 score. | |
| This is only used for binary classification. | |
| With a beta of 1 (i.e. the F-1 score), precision and recall are | |
| given the same weight. A larger beta score puts more weight on | |
| recall and less on precision. A smaller beta score puts more weight | |
| on precision and less on recall. | |
| type: array | |
| items: | |
| type: number | |
| example: [0.6, 1, 1.5, 2] | |
| default: null | |
| nullable: true | |
| suffix: | |
| description: | | |
| A string of up to 40 characters that will be added to your fine-tuned model name. | |
| For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. | |
| type: string | |
| minLength: 1 | |
| maxLength: 40 | |
| default: null | |
| nullable: true | |
| required: | |
| - training_file | |
| ListFineTunesResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/FineTune' | |
| required: | |
| - object | |
| - data | |
| ListFineTuneEventsResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/FineTuneEvent' | |
| required: | |
| - object | |
| - data | |
| CreateEmbeddingRequest: | |
| type: object | |
| additionalProperties: false | |
| properties: | |
| model: | |
| input: | |
| description: | | |
| Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. | |
| example: "The quick brown fox jumped over the lazy dog" | |
| oneOf: | |
| - type: string | |
| default: '' | |
| example: "This is a test." | |
| - type: array | |
| items: | |
| type: string | |
| default: '' | |
| example: "This is a test." | |
| - type: array | |
| minItems: 1 | |
| items: | |
| type: integer | |
| example: "[1212, 318, 257, 1332, 13]" | |
| - type: array | |
| minItems: 1 | |
| items: | |
| type: array | |
| minItems: 1 | |
| items: | |
| type: integer | |
| example: "[[1212, 318, 257, 1332, 13]]" | |
| user: | |
| required: | |
| - model | |
| - input | |
| CreateEmbeddingResponse: | |
| type: object | |
| properties: | |
| object: | |
| type: string | |
| model: | |
| type: string | |
| data: | |
| type: array | |
| items: | |
| type: object | |
| properties: | |
| index: | |
| type: integer | |
| object: | |
| type: string | |
| embedding: | |
| type: array | |
| items: | |
| type: number | |
| required: | |
| - index | |
| - object | |
| - embedding | |
| usage: | |
| type: object | |
| properties: | |
| prompt_tokens: | |
| type: integer | |
| total_tokens: | |
| type: integer | |
| required: | |
| - prompt_tokens | |
| - total_tokens | |
| required: | |
| - object | |
| - model | |
| - data | |
| - usage | |
| Engine: | |
| title: Engine | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| created: | |
| type: integer | |
| nullable: true | |
| ready: | |
| type: boolean | |
| required: | |
| - id | |
| - object | |
| - created | |
| - ready | |
| Model: | |
| title: Model | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| created: | |
| type: integer | |
| owned_by: | |
| type: string | |
| required: | |
| - id | |
| - object | |
| - created | |
| - owned_by | |
| OpenAIFile: | |
| title: OpenAIFile | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| bytes: | |
| type: integer | |
| created_at: | |
| type: integer | |
| filename: | |
| type: string | |
| purpose: | |
| type: string | |
| status: | |
| type: string | |
| status_details: | |
| type: object | |
| nullable: true | |
| required: | |
| - id | |
| - object | |
| - bytes | |
| - created_at | |
| - filename | |
| - purpose | |
| FineTune: | |
| title: FineTune | |
| properties: | |
| id: | |
| type: string | |
| object: | |
| type: string | |
| created_at: | |
| type: integer | |
| updated_at: | |
| type: integer | |
| model: | |
| type: string | |
| fine_tuned_model: | |
| type: string | |
| nullable: true | |
| organization_id: | |
| type: string | |
| status: | |
| type: string | |
| hyperparams: | |
| type: object | |
| training_files: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/OpenAIFile' | |
| validation_files: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/OpenAIFile' | |
| result_files: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/OpenAIFile' | |
| events: | |
| type: array | |
| items: | |
| $ref: '#/components/schemas/FineTuneEvent' | |
| required: | |
| - id | |
| - object | |
| - created_at | |
| - updated_at | |
| - model | |
| - fine_tuned_model | |
| - organization_id | |
| - status | |
| - hyperparams | |
| - training_files | |
| - validation_files | |
| - result_files | |
| FineTuneEvent: | |
| title: FineTuneEvent | |
| properties: | |
| object: | |
| type: string | |
| created_at: | |
| type: integer | |
| level: | |
| type: string | |
| message: | |
| type: string | |
| required: | |
| - object | |
| - created_at | |
| - level | |
| - message | |
| x-oaiMeta: | |
| groups: | |
| - id: models | |
| title: Models | |
| description: | | |
| List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. | |
| - id: completions | |
| title: Completions | |
| description: | | |
| Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. | |
| - id: edits | |
| title: Edits | |
| description: | | |
| Given a prompt and an instruction, the model will return an edited version of the prompt. | |
| - id: images | |
| title: Images | |
| description: | | |
| Given a prompt and/or an input image, the model will generate a new image. | |
| Related guide: [Image generation](/docs/guides/images) | |
| - id: embeddings | |
| title: Embeddings | |
| description: | | |
| Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. | |
| Related guide: [Embeddings](/docs/guides/embeddings) | |
| - id: files | |
| title: Files | |
| description: | | |
| Files are used to upload documents that can be used with features like [Fine-tuning](/docs/api-reference/fine-tunes). | |
| - id: fine-tunes | |
| title: Fine-tunes | |
| description: | | |
| Manage fine-tuning jobs to tailor a model to your specific training data. | |
| Related guide: [Fine-tune models](/docs/guides/fine-tuning) | |
| - id: moderations | |
| title: Moderations | |
| description: | | |
| Given a input text, outputs if the model classifies it as violating OpenAI's content policy. | |
| Related guide: [Moderations](/docs/guides/moderation) | |
| - id: searches | |
| title: Searches | |
| warning: | |
| title: This endpoint is deprecated and will be removed on December 3rd, 2022 | |
| message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272952-search-transition-guide). | |
| description: | | |
| Given a query and a set of documents or labels, the model ranks each document based on its semantic similarity to the provided query. | |
| Related guide: [Search](/docs/guides/search) | |
| - id: classifications | |
| title: Classifications | |
| warning: | |
| title: This endpoint is deprecated and will be removed on December 3rd, 2022 | |
| message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272941-classifications-transition-guide). | |
| description: | | |
| Given a query and a set of labeled examples, the model will predict the most likely label for the query. Useful as a drop-in replacement for any ML classification or text-to-label task. | |
| Related guide: [Classification](/docs/guides/classifications) | |
| - id: answers | |
| title: Answers | |
| warning: | |
| title: This endpoint is deprecated and will be removed on December 3rd, 2022 | |
| message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6233728-answers-transition-guide). | |
| description: | | |
| Given a question, a set of documents, and some examples, the API generates an answer to the question based on the information in the set of documents. This is useful for question-answering applications on sources of truth, like company documentation or a knowledge base. | |
| Related guide: [Question answering](/docs/guides/answers) | |
| - id: engines | |
| title: Engines | |
| description: These endpoints describe and provide access to the various engines available in the API. | |
| warning: | |
| title: The Engines endpoints are deprecated. | |
| message: Please use their replacement, [Models](/docs/api-reference/models), instead. [Learn more](https://help.openai.com/TODO). |