Cancel batch
client.batches.cancel(stringbatchID, RequestOptionsoptions?): Batch { id, completion_window, created_at, 19 more }
POST/batches/{batch_id}/cancel
Cancels an in-progress batch. The batch will be in status cancelling for up to 10 minutes, before changing to cancelled, where it will have partial results (if any) available in the output file.
Parameters
batchID: string
Returns
Cancel batch
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted
});
const batch = await client.batches.cancel('batch_id');
console.log(batch.id);{
"id": "id",
"completion_window": "completion_window",
"created_at": 0,
"endpoint": "endpoint",
"input_file_id": "input_file_id",
"object": "batch",
"status": "validating",
"cancelled_at": 0,
"cancelling_at": 0,
"completed_at": 0,
"error_file_id": "error_file_id",
"errors": {
"data": [
{
"code": "code",
"line": 0,
"message": "message",
"param": "param"
}
],
"object": "object"
},
"expired_at": 0,
"expires_at": 0,
"failed_at": 0,
"finalizing_at": 0,
"in_progress_at": 0,
"metadata": {
"foo": "string"
},
"model": "model",
"output_file_id": "output_file_id",
"request_counts": {
"completed": 0,
"failed": 0,
"total": 0
},
"usage": {
"input_tokens": 0,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 0,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 0
}
}Returns Examples
{
"id": "id",
"completion_window": "completion_window",
"created_at": 0,
"endpoint": "endpoint",
"input_file_id": "input_file_id",
"object": "batch",
"status": "validating",
"cancelled_at": 0,
"cancelling_at": 0,
"completed_at": 0,
"error_file_id": "error_file_id",
"errors": {
"data": [
{
"code": "code",
"line": 0,
"message": "message",
"param": "param"
}
],
"object": "object"
},
"expired_at": 0,
"expires_at": 0,
"failed_at": 0,
"finalizing_at": 0,
"in_progress_at": 0,
"metadata": {
"foo": "string"
},
"model": "model",
"output_file_id": "output_file_id",
"request_counts": {
"completed": 0,
"failed": 0,
"total": 0
},
"usage": {
"input_tokens": 0,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 0,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 0
}
}