Skip to content

Openai

openai

LLMeter targets for testing OpenAI ChatCompletions-compatible endpoints (wherever they're hosted)

OpenAICompletionEndpoint

OpenAICompletionEndpoint(model_id, endpoint_name='openai', api_key=None, provider='openai', **kwargs)

Bases: OpenAIEndpoint[ChatCompletion]

Endpoint for OpenAI-compatible Chat Completion APIs (non-streaming mode)

Source code in llmeter/endpoints/openai.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def __init__(
    self,
    model_id: str,
    endpoint_name: str = "openai",
    api_key: str | None = None,
    provider: str = "openai",
    **kwargs: Any,
):
    """Initialize OpenAI endpoint.

    Args:
        model_id: ID of the OpenAI model to use
        endpoint_name: Name of the endpoint. Defaults to "openai".
        api_key: OpenAI API key. Defaults to None.
        provider: Provider name. Defaults to "openai".
        **kwargs: Additional arguments passed to OpenAI client
    """
    super().__init__(endpoint_name, model_id, provider=provider)
    self._client = OpenAI(api_key=api_key, **kwargs)

invoke

invoke(payload)

Invoke the OpenAI chat completion API.

Source code in llmeter/endpoints/openai.py
235
236
237
238
239
240
241
@OpenAIEndpoint.llmeter_invoke
def invoke(self, payload: CompletionCreateParamsNonStreaming) -> ChatCompletion:
    """Invoke the OpenAI chat completion API."""
    client_response: ChatCompletion = self._client.chat.completions.create(
        **payload
    )
    return client_response

prepare_payload

prepare_payload(payload)

Ensure payload specifies correct model ID and streaming disabled

Source code in llmeter/endpoints/openai.py
243
244
245
246
247
248
249
def prepare_payload(self, payload):
    """Ensure payload specifies correct model ID and streaming disabled"""
    return {
        **payload,
        "model": self.model_id,
        "stream": False,
    }

OpenAICompletionStreamEndpoint

OpenAICompletionStreamEndpoint(model_id, endpoint_name='openai', api_key=None, provider='openai', **kwargs)

Bases: OpenAIEndpoint[Iterable[ChatCompletionChunk]]

Endpoint for OpenAI-compatible Chat Completion APIs (streaming mode)

Source code in llmeter/endpoints/openai.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def __init__(
    self,
    model_id: str,
    endpoint_name: str = "openai",
    api_key: str | None = None,
    provider: str = "openai",
    **kwargs: Any,
):
    """Initialize OpenAI endpoint.

    Args:
        model_id: ID of the OpenAI model to use
        endpoint_name: Name of the endpoint. Defaults to "openai".
        api_key: OpenAI API key. Defaults to None.
        provider: Provider name. Defaults to "openai".
        **kwargs: Additional arguments passed to OpenAI client
    """
    super().__init__(endpoint_name, model_id, provider=provider)
    self._client = OpenAI(api_key=api_key, **kwargs)

invoke

invoke(payload)

Invoke the OpenAI streaming chat completion API.

Source code in llmeter/endpoints/openai.py
276
277
278
279
280
@OpenAIEndpoint.llmeter_invoke
def invoke(self, payload: CompletionCreateParamsStreaming):
    """Invoke the OpenAI streaming chat completion API."""
    client_response = self._client.chat.completions.create(**payload)
    return client_response

prepare_payload

prepare_payload(payload)

Ensure payload specifies correct model ID and streaming settings

Source code in llmeter/endpoints/openai.py
282
283
284
285
286
287
288
289
290
291
def prepare_payload(self, payload):
    """Ensure payload specifies correct model ID and streaming settings"""
    payload = {
        **payload,
        "model": self.model_id,
        "stream": True,
    }
    if not payload.get("stream_options"):
        payload["stream_options"] = {"include_usage": True}
    return payload

process_raw_response

process_raw_response(raw_response, start_t, response)

Parse the streaming API response from OpenAI chat completion API.

Source code in llmeter/endpoints/openai.py
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
def process_raw_response(
    self,
    raw_response: Iterable[ChatCompletionChunk],
    start_t: float,
    response: InvocationResponse,
) -> None:
    """Parse the streaming API response from OpenAI chat completion API."""
    got_chunk_id = False
    for chunk in raw_response:
        now = time.perf_counter()

        if not got_chunk_id and chunk.id is not None:
            response.id = chunk.id
            got_chunk_id = True

        if chunk.choices:
            content = chunk.choices[0].delta.content
            if content:
                if response.response_text is None:
                    response.time_to_first_token = now - start_t
                    response.response_text = content
                else:
                    response.response_text += content
                response.time_to_last_token = now - start_t

        if chunk.usage is not None:
            response.num_tokens_input = chunk.usage.prompt_tokens
            response.num_tokens_output = chunk.usage.completion_tokens
            if chunk.usage.prompt_tokens_details:
                response.num_tokens_input_cached = getattr(
                    chunk.usage.prompt_tokens_details, "cached_tokens", None
                )
            if chunk.usage.completion_tokens_details:
                response.num_tokens_output_reasoning = getattr(
                    chunk.usage.completion_tokens_details,
                    "reasoning_tokens",
                    None,
                )

OpenAIEndpoint

OpenAIEndpoint(model_id, endpoint_name='openai', api_key=None, provider='openai', **kwargs)

Bases: Endpoint[TOpenAICompletionBase], Generic[TOpenAICompletionBase]

Base class for OpenAI API endpoints.

Parameters:

Name Type Description Default
model_id str

ID of the OpenAI model to use

required
endpoint_name str

Name of the endpoint. Defaults to "openai".

'openai'
api_key str | None

OpenAI API key. Defaults to None.

None
provider str

Provider name. Defaults to "openai".

'openai'
**kwargs Any

Additional arguments passed to OpenAI client

{}
Source code in llmeter/endpoints/openai.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def __init__(
    self,
    model_id: str,
    endpoint_name: str = "openai",
    api_key: str | None = None,
    provider: str = "openai",
    **kwargs: Any,
):
    """Initialize OpenAI endpoint.

    Args:
        model_id: ID of the OpenAI model to use
        endpoint_name: Name of the endpoint. Defaults to "openai".
        api_key: OpenAI API key. Defaults to None.
        provider: Provider name. Defaults to "openai".
        **kwargs: Additional arguments passed to OpenAI client
    """
    super().__init__(endpoint_name, model_id, provider=provider)
    self._client = OpenAI(api_key=api_key, **kwargs)

create_payload staticmethod

create_payload(user_message, max_tokens=256, **kwargs)

Create a payload for the OpenAI Chat Completions API request.

This is a convenience helper. You can also build the payload directly using openai.types.chat.CompletionCreateParams (though the model field is optional).

Parameters:

Name Type Description Default
user_message str | list[ContentItem]

A single text string, or an ordered list mixing strings and :class:~llmeter.prompt_utils.MediaContent objects.

required
max_tokens int

Maximum tokens in response. Defaults to 256.

256
**kwargs Any

Additional payload parameters.

{}

Returns:

Name Type Description
dict CompletionCreateParams

Formatted OpenAI CompletionCreateParams input payload

Examples:

Text only::

create_payload("Hello")

Image with text::

create_payload([
    ImageContent.from_path("photo.jpg"),
    "What's in this image?",
])
Source code in llmeter/endpoints/openai.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
@staticmethod
def create_payload(
    user_message: str | list[ContentItem], max_tokens: int = 256, **kwargs: Any
) -> CompletionCreateParams:
    """Create a payload for the OpenAI Chat Completions API request.

    This is a convenience helper. You can also build the payload directly
    using ``openai.types.chat.CompletionCreateParams`` (though the `model`
    field is optional).

    Args:
        user_message: A single text string, or an ordered list mixing strings
            and :class:`~llmeter.prompt_utils.MediaContent` objects.
        max_tokens: Maximum tokens in response. Defaults to 256.
        **kwargs: Additional payload parameters.

    Returns:
        dict: Formatted OpenAI CompletionCreateParams input payload

    Examples:
        Text only::

            create_payload("Hello")

        Image with text::

            create_payload([
                ImageContent.from_path("photo.jpg"),
                "What's in this image?",
            ])

    """
    if not isinstance(max_tokens, int) or max_tokens <= 0:
        raise ValueError("max_tokens must be a positive integer")

    if isinstance(user_message, str):
        items: list[ContentItem] = [user_message]
    elif isinstance(user_message, list):
        items = user_message
    else:
        raise TypeError(
            "user_message must be a str or list of str/MediaContent, "
            f"got {type(user_message).__name__}"
        )

    if not items:
        raise ValueError("user_message must not be empty")

    # Text-only shortcut: single string → simple content field
    if len(items) == 1 and isinstance(items[0], str):
        payload = {
            "messages": [{"role": "user", "content": items[0]}],
            "max_tokens": max_tokens,
        }
    else:
        content_blocks = _build_content_blocks_openai(items)
        payload = {
            "messages": [{"role": "user", "content": content_blocks}],
            "max_tokens": max_tokens,
        }
    payload.update(kwargs)
    return cast(CompletionCreateParams, payload)