Creates and executes a batch from an uploaded file of requests
Parameters
The time frame within which the batch should be processed. Currently only 24h is supported.
The ID of an uploaded file that contains requests for the new batch.
See upload file for how to upload a file.
Your input file must be formatted as a JSONL file, and must be uploaded with the purpose batch. The file can contain up to 50,000 requests, and can be up to 200 MB in size.
Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.
Returns
Create batch
import os
from openai import OpenAI
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted
)
batch = client.batches.create(
completion_window="24h",
endpoint="/v1/responses",
input_file_id="input_file_id",
)
print(batch.id){
"id": "id",
"completion_window": "completion_window",
"created_at": 0,
"endpoint": "endpoint",
"input_file_id": "input_file_id",
"object": "batch",
"status": "validating",
"cancelled_at": 0,
"cancelling_at": 0,
"completed_at": 0,
"error_file_id": "error_file_id",
"errors": {
"data": [
{
"code": "code",
"line": 0,
"message": "message",
"param": "param"
}
],
"object": "object"
},
"expired_at": 0,
"expires_at": 0,
"failed_at": 0,
"finalizing_at": 0,
"in_progress_at": 0,
"metadata": {
"foo": "string"
},
"model": "model",
"output_file_id": "output_file_id",
"request_counts": {
"completed": 0,
"failed": 0,
"total": 0
},
"usage": {
"input_tokens": 0,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 0,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 0
}
}Returns Examples
{
"id": "id",
"completion_window": "completion_window",
"created_at": 0,
"endpoint": "endpoint",
"input_file_id": "input_file_id",
"object": "batch",
"status": "validating",
"cancelled_at": 0,
"cancelling_at": 0,
"completed_at": 0,
"error_file_id": "error_file_id",
"errors": {
"data": [
{
"code": "code",
"line": 0,
"message": "message",
"param": "param"
}
],
"object": "object"
},
"expired_at": 0,
"expires_at": 0,
"failed_at": 0,
"finalizing_at": 0,
"in_progress_at": 0,
"metadata": {
"foo": "string"
},
"model": "model",
"output_file_id": "output_file_id",
"request_counts": {
"completed": 0,
"failed": 0,
"total": 0
},
"usage": {
"input_tokens": 0,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 0,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 0
}
}