Perform batch inference with OpenAI
With Portkey, you can perform OpenAI Batch Inference operations. This is the most efficient way to
from portkey_ai import Portkey
# Initialize the Portkey client
portkey = Portkey(
api_key="PORTKEY_API_KEY", # Replace with your Portkey API key
provider="@PROVIDER"
)
start_batch_response = portkey.batches.create(
input_file_id="file_id", # file id of the input file
endpoint="endpoint", # ex: /v1/chat/completions
completion_window="completion_window", # ex: 24h
metadata={} # metadata for the batch
)
print(start_batch_response)
from portkey_ai import Portkey
# Initialize the Portkey client
portkey = Portkey(
api_key="PORTKEY_API_KEY", # Replace with your Portkey API key
provider="@PROVIDER"
)
batches = portkey.batches.list()
print(batches)
from portkey_ai import Portkey
# Initialize the Portkey client
portkey = Portkey(
api_key="PORTKEY_API_KEY", # Replace with your Portkey API key
provider="@PROVIDER"
)
batch = portkey.batches.retrieve(batch_id="batch_id")
print(batch)
curl --location 'https://api.portkey.ai/v1/batches/<batch_id>/output' \
--header 'x-portkey-api-key: <portkey_api_key>' \
--header 'x-portkey-provider: @provider'
from portkey_ai import Portkey
# Initialize the Portkey client
portkey = Portkey(
api_key="PORTKEY_API_KEY", # Replace with your Portkey API key
provider="@PROVIDER"
)
batches = portkey.batches.list()
print(batches)
from portkey_ai import Portkey
# Initialize the Portkey client
portkey = Portkey(
api_key="PORTKEY_API_KEY", # Replace with your Portkey API key
provider="@PROVIDER"
)
cancel_batch_response = portkey.batches.cancel(batch_id="batch_id")
print(cancel_batch_response)
Was this page helpful?