Cancel batch
batches.cancel(batch_id) -> Batch { id, completion_window, created_at, 19 more }
POST/batches/{batch_id}/cancel
Cancels an in-progress batch. The batch will be in status cancelling for up to 10 minutes, before changing to cancelled, where it will have partial results (if any) available in the output file.
Parameters
batch_id: String
Returns
Cancel batch
require "openai"
openai = OpenAI::Client.new(api_key: "My API Key")
batch = openai.batches.cancel("batch_id")
puts(batch){
"id": "id",
"completion_window": "completion_window",
"created_at": 0,
"endpoint": "endpoint",
"input_file_id": "input_file_id",
"object": "batch",
"status": "validating",
"cancelled_at": 0,
"cancelling_at": 0,
"completed_at": 0,
"error_file_id": "error_file_id",
"errors": {
"data": [
{
"code": "code",
"line": 0,
"message": "message",
"param": "param"
}
],
"object": "object"
},
"expired_at": 0,
"expires_at": 0,
"failed_at": 0,
"finalizing_at": 0,
"in_progress_at": 0,
"metadata": {
"foo": "string"
},
"model": "model",
"output_file_id": "output_file_id",
"request_counts": {
"completed": 0,
"failed": 0,
"total": 0
},
"usage": {
"input_tokens": 0,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 0,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 0
}
}Returns Examples
{
"id": "id",
"completion_window": "completion_window",
"created_at": 0,
"endpoint": "endpoint",
"input_file_id": "input_file_id",
"object": "batch",
"status": "validating",
"cancelled_at": 0,
"cancelling_at": 0,
"completed_at": 0,
"error_file_id": "error_file_id",
"errors": {
"data": [
{
"code": "code",
"line": 0,
"message": "message",
"param": "param"
}
],
"object": "object"
},
"expired_at": 0,
"expires_at": 0,
"failed_at": 0,
"finalizing_at": 0,
"in_progress_at": 0,
"metadata": {
"foo": "string"
},
"model": "model",
"output_file_id": "output_file_id",
"request_counts": {
"completed": 0,
"failed": 0,
"total": 0
},
"usage": {
"input_tokens": 0,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 0,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 0
}
}