diff --git a/openapi.yaml b/openapi.yaml index e614890a..07bbd753 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -36,7 +36,7 @@ tags: - name: Models description: List and describe the various models available in the API. - name: Moderations - description: Given a input text, outputs if the model classifies it as potentially harmful. + description: Given text and/or image inputs, classifies if those inputs are potentially harmful. - name: Audit Logs description: List user actions and configuration changes within this organization. paths: @@ -135,7 +135,10 @@ paths: "usage": { "prompt_tokens": 9, "completion_tokens": 12, - "total_tokens": 21 + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0 + } } } - title: Image input @@ -235,7 +238,10 @@ paths: "usage": { "prompt_tokens": 9, "completion_tokens": 12, - "total_tokens": 21 + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0 + } } } - title: Streaming @@ -444,7 +450,10 @@ paths: "usage": { "prompt_tokens": 82, "completion_tokens": 17, - "total_tokens": 99 + "total_tokens": 99, + "completion_tokens_details": { + "reasoning_tokens": 0 + } } } - title: Logprobs @@ -677,7 +686,10 @@ paths: "usage": { "prompt_tokens": 9, "completion_tokens": 9, - "total_tokens": 18 + "total_tokens": 18, + "completion_tokens_details": { + "reasoning_tokens": 0 + } }, "system_fingerprint": null } @@ -2724,7 +2736,9 @@ paths: operationId: createModeration tags: - Moderations - summary: Classifies if text is potentially harmful. + summary: | + Classifies if text and/or image inputs are potentially harmful. Learn + more in the [moderation guide](/docs/guides/moderation). requestBody: required: true content: @@ -2743,34 +2757,35 @@ paths: group: moderations returns: A [moderation](/docs/api-reference/moderations/object) object. examples: - request: - curl: | - curl https://api.openai.com/v1/moderations \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "input": "I want to kill them." - }' - python: | - from openai import OpenAI - client = OpenAI() + - title: Single string + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' + python: | + from openai import OpenAI + client = OpenAI() - moderation = client.moderations.create(input="I want to kill them.") - print(moderation) - node.js: | - import OpenAI from "openai"; + moderation = client.moderations.create(input="I want to kill them.") + print(moderation) + node.js: | + import OpenAI from "openai"; - const openai = new OpenAI(); + const openai = new OpenAI(); - async function main() { - const moderation = await openai.moderations.create({ input: "I want to kill them." }); + async function main() { + const moderation = await openai.moderations.create({ input: "I want to kill them." }); - console.log(moderation); - } - main(); - response: &moderation_example | + console.log(moderation); + } + main(); + response: &moderation_example | { - "id": "modr-XXXXX", + "id": "modr-AB8CjOTu2jiq12hp1AQPfeqFWaORR", "model": "text-moderation-007", "results": [ { @@ -2778,7 +2793,7 @@ paths: "categories": { "sexual": false, "hate": false, - "harassment": false, + "harassment": true, "self-harm": false, "sexual/minors": false, "hate/threatening": false, @@ -2786,20 +2801,166 @@ paths: "self-harm/intent": false, "self-harm/instructions": false, "harassment/threatening": true, + "violence": true + }, + "category_scores": { + "sexual": 0.000011726012417057063, + "hate": 0.22706663608551025, + "harassment": 0.5215635299682617, + "self-harm": 2.227119921371923e-6, + "sexual/minors": 7.107352217872176e-8, + "hate/threatening": 0.023547329008579254, + "violence/graphic": 0.00003391829886822961, + "self-harm/intent": 1.646940972932498e-6, + "self-harm/instructions": 1.1198755256458526e-9, + "harassment/threatening": 0.5694745779037476, + "violence": 0.9971134662628174 + } + } + ] + } + - title: Image and text + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "omni-moderation-latest", + "input": [ + { "type": "text", "text": "...text to classify goes here..." }, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.png" + } + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + response = client.moderations.create( + model="omni-moderation-latest", + input=[ + {"type": "text", "text": "...text to classify goes here..."}, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.png", + # can also use base64 encoded image URLs + # "url": "data:image/jpeg;base64,abcdefg..." + } + }, + ], + ) + + print(response) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + const moderation = await openai.moderations.create({ + model: "omni-moderation-latest", + input: [ + { type: "text", text: "...text to classify goes here..." }, + { + type: "image_url", + image_url: { + url: "https://example.com/image.png" + // can also use base64 encoded image URLs + // url: "data:image/jpeg;base64,abcdefg..." + } + } + ], + }); + + console.log(moderation); + response: &moderation_example | + { + "id": "modr-0d9740456c391e43c445bf0f010940c7", + "model": "omni-moderation-latest", + "results": [ + { + "flagged": true, + "categories": { + "harassment": true, + "harassment/threatening": true, + "sexual": false, + "hate": false, + "hate/threatening": false, + "illicit": false, + "illicit/violent": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "self-harm": false, + "sexual/minors": false, "violence": true, + "violence/graphic": true }, "category_scores": { - "sexual": 1.2282071e-06, - "hate": 0.010696256, - "harassment": 0.29842457, - "self-harm": 1.5236925e-08, - "sexual/minors": 5.7246268e-08, - "hate/threatening": 0.0060676364, - "violence/graphic": 4.435014e-06, - "self-harm/intent": 8.098441e-10, - "self-harm/instructions": 2.8498655e-11, - "harassment/threatening": 0.63055265, - "violence": 0.99011886, + "harassment": 0.8189693396524255, + "harassment/threatening": 0.804985420696006, + "sexual": 1.573112165348997e-6, + "hate": 0.007562942636942845, + "hate/threatening": 0.004208854591835476, + "illicit": 0.030535955153511665, + "illicit/violent": 0.008925306722380033, + "self-harm/intent": 0.00023023930975076432, + "self-harm/instructions": 0.0002293869201073356, + "self-harm": 0.012598046106750154, + "sexual/minors": 2.212566909570261e-8, + "violence": 0.9999992735124786, + "violence/graphic": 0.843064871157054 + }, + "category_applied_input_types": { + "harassment": [ + "text" + ], + "harassment/threatening": [ + "text" + ], + "sexual": [ + "text", + "image" + ], + "hate": [ + "text" + ], + "hate/threatening": [ + "text" + ], + "illicit": [ + "text" + ], + "illicit/violent": [ + "text" + ], + "self-harm/intent": [ + "text", + "image" + ], + "self-harm/instructions": [ + "text", + "image" + ], + "self-harm": [ + "text", + "image" + ], + "sexual/minors": [ + "text" + ], + "violence": [ + "text", + "image" + ], + "violence/graphic": [ + "text", + "image" + ] } } ] @@ -10310,28 +10471,73 @@ components: type: object properties: input: - description: The input text to classify + description: | + Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. oneOf: - type: string + description: A string of text to classify for moderation. default: "" example: "I want to kill them." - type: array + description: An array of strings to classify for moderation. items: type: string default: "" example: "I want to kill them." + - type: array + description: An array of multi-modal inputs to the moderation model. + items: + x-oaiExpandable: true + oneOf: + - type: object + description: An object describing an image to classify. + properties: + type: + description: Always `image_url`. + type: string + enum: [ "image_url" ] + image_url: + type: object + description: Contains either an image URL or a data URL for a base64 encoded image. + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + example: "https://example.com/image.jpg" + required: + - url + required: + - type + - image_url + - type: object + description: An object describing text to classify. + properties: + type: + description: Always `text`. + type: string + enum: [ "text" ] + text: + description: A string of text to classify. + type: string + example: "I want to kill them" + required: + - type + - text + x-oaiExpandable: true model: description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + The content moderation model you would like to use. Learn more in + [the moderation guide](/docs/guides/moderation), and learn about + available models [here](/docs/models/moderation). nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" + default: "omni-moderation-latest" + example: "omni-moderation-2024-09-26" anyOf: - type: string - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] + enum: ["omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable"] x-oaiTypeLabel: string required: - input @@ -10371,6 +10577,12 @@ components: harassment/threatening: type: boolean description: Harassment content that also includes violence or serious harm towards any target. + illicit: + type: boolean + description: Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category. + illicit/violent: + type: boolean + description: Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon. self-harm: type: boolean description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. @@ -10397,6 +10609,8 @@ components: - hate/threatening - harassment - harassment/threatening + - illicit + - illicit/violent - self-harm - self-harm/intent - self-harm/instructions @@ -10420,6 +10634,12 @@ components: harassment/threatening: type: number description: The score for the category 'harassment/threatening'. + illicit: + type: number + description: The score for the category 'illicit'. + illicit/violent: + type: number + description: The score for the category 'illicit/violent'. self-harm: type: number description: The score for the category 'self-harm'. @@ -10446,6 +10666,104 @@ components: - hate/threatening - harassment - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_applied_input_types: + type: object + description: A list of the categories along with the input type(s) that the score applies to. + properties: + hate: + type: array + description: The applied input type(s) for the category 'hate'. + items: + type: string + enum: [ "text" ] + hate/threatening: + type: array + description: The applied input type(s) for the category 'hate/threatening'. + items: + type: string + enum: [ "text" ] + harassment: + type: array + description: The applied input type(s) for the category 'harassment'. + items: + type: string + enum: [ "text" ] + harassment/threatening: + type: array + description: The applied input type(s) for the category 'harassment/threatening'. + items: + type: string + enum: [ "text" ] + illicit: + type: array + description: The applied input type(s) for the category 'illicit'. + items: + type: string + enum: [ "text" ] + illicit/violent: + type: array + description: The applied input type(s) for the category 'illicit/violent'. + items: + type: string + enum: [ "text" ] + self-harm: + type: array + description: The applied input type(s) for the category 'self-harm'. + items: + type: string + enum: [ "text", "image" ] + self-harm/intent: + type: array + description: The applied input type(s) for the category 'self-harm/intent'. + items: + type: string + enum: [ "text", "image" ] + self-harm/instructions: + type: array + description: The applied input type(s) for the category 'self-harm/instructions'. + items: + type: string + enum: [ "text", "image" ] + sexual: + type: array + description: The applied input type(s) for the category 'sexual'. + items: + type: string + enum: [ "text", "image" ] + sexual/minors: + type: array + description: The applied input type(s) for the category 'sexual/minors'. + items: + type: string + enum: [ "text" ] + violence: + type: array + description: The applied input type(s) for the category 'violence'. + items: + type: string + enum: [ "text", "image" ] + violence/graphic: + type: array + description: The applied input type(s) for the category 'violence/graphic'. + items: + type: string + enum: [ "text", "image" ] + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent - self-harm - self-harm/intent - self-harm/instructions @@ -10457,6 +10775,7 @@ components: - flagged - categories - category_scores + - category_applied_input_types required: - id - model @@ -10900,7 +11219,16 @@ components: An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string response_format: - $ref: "#/components/schemas/AudioResponseFormat" + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json temperature: description: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. @@ -11033,18 +11361,6 @@ components: group: audio example: *verbose_transcription_response_example - AudioResponseFormat: - description: | - The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - CreateTranslationRequest: type: object additionalProperties: false @@ -11069,7 +11385,10 @@ components: An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. type: string response_format: - $ref: "#/components/schemas/AudioResponseFormat" + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json temperature: description: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. @@ -16808,7 +17127,7 @@ x-oaiMeta: - id: moderations title: Moderations description: | - Given some input text, outputs if the model classifies it as potentially harmful across several categories. + Given text and/or image inputs, classifies if those inputs are potentially harmful across several categories. Related guide: [Moderations](/docs/guides/moderation) navigationGroup: endpoints @@ -17229,4 +17548,4 @@ x-oaiMeta: path: create - type: object key: CreateCompletionResponse - path: object + path: object \ No newline at end of file