Skip to main content

Realtime Endpoints

Use this to loadbalance across Azure + OpenAI.

Proxy Usage

Add model to config

model_list:
- model_name: openai-gpt-4o-realtime-audio
litellm_params:
model: openai/gpt-4o-realtime-preview-2024-10-01
api_key: os.environ/OPENAI_API_KEY

Start proxy

litellm --config /path/to/config.yaml 

# RUNNING on http://0.0.0.0:8000

Test

Run this script using node - node test.js

// test.js
const WebSocket = require("ws");

const url = "ws://0.0.0.0:4000/v1/realtime?model=openai-gpt-4o-realtime-audio";
// const url = "wss://my-endpoint-sweden-berri992.openai.azure.com/openai/realtime?api-version=2024-10-01-preview&deployment=gpt-4o-realtime-preview";
const ws = new WebSocket(url, {
headers: {
"api-key": `f28ab7b695af4154bc53498e5bdccb07`,
"OpenAI-Beta": "realtime=v1",
},
});

ws.on("open", function open() {
console.log("Connected to server.");
ws.send(JSON.stringify({
type: "response.create",
response: {
modalities: ["text"],
instructions: "Please assist the user.",
}
}));
});

ws.on("message", function incoming(message) {
console.log(JSON.parse(message.toString()));
});

ws.on("error", function handleError(error) {
console.error("Error: ", error);
});