Skip to content

Openai

openai

LLMeter targets for testing OpenAI ChatCompletions-compatible endpoints (wherever they're hosted)

OpenAICompletionEndpoint

OpenAICompletionEndpoint(model_id, endpoint_name='openai', api_key=None, provider='openai', **kwargs)

Bases: OpenAIEndpoint

Endpoint for OpenAI-compatible Chat Completion APIs (non-streaming mode)

Source code in llmeter/endpoints/openai.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def __init__(
    self,
    model_id: str,
    endpoint_name: str = "openai",
    api_key: str | None = None,
    provider: str = "openai",
    **kwargs: Any,
):
    """Initialize OpenAI endpoint.

    Args:
        model_id (str): ID of the OpenAI model to use
        endpoint_name (str, optional): Name of the endpoint. Defaults to "openai".
        api_key (str | None, optional): OpenAI API key. Defaults to None.
        provider (str, optional): Provider name. Defaults to "openai".
        **kwargs (Any): Additional arguments passed to OpenAI client
    """
    super().__init__(
        endpoint_name,
        model_id,
        provider=provider,
    )

    self._client = OpenAI(api_key=api_key, **kwargs)

invoke

invoke(payload, **kwargs)

Invoke the OpenAI chat completion API.

Parameters:

Name Type Description Default
payload Dict

Request payload

required
**kwargs Any

Additional parameters for the request

{}

Returns:

Name Type Description
InvocationResponse InvocationResponse

Response from the API

Source code in llmeter/endpoints/openai.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def invoke(self, payload: Dict, **kwargs: Any) -> InvocationResponse:
    """Invoke the OpenAI chat completion API.

    Args:
        payload (Dict): Request payload
        **kwargs (Any): Additional parameters for the request

    Returns:
        InvocationResponse: Response from the API
    """
    payload = {**kwargs, **payload}
    payload["model"] = self.model_id

    start_t = time.perf_counter()
    try:
        client_response: ChatCompletion = self._client.chat.completions.create(
            **payload
        )

    except (APIConnectionError, Exception) as e:
        logger.error(e)
        return InvocationResponse.error_output(
            input_payload=payload, id=uuid4().hex, error=str(e)
        )

    response = self._parse_converse_response(client_response, start_t)
    response.input_payload = payload
    response.input_prompt = self._parse_payload(payload)
    return response

OpenAICompletionStreamEndpoint

OpenAICompletionStreamEndpoint(model_id, endpoint_name='openai', api_key=None, provider='openai', **kwargs)

Bases: OpenAIEndpoint

Endpoint for OpenAI-compatible Chat Completion APIs (streaming mode)

Source code in llmeter/endpoints/openai.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def __init__(
    self,
    model_id: str,
    endpoint_name: str = "openai",
    api_key: str | None = None,
    provider: str = "openai",
    **kwargs: Any,
):
    """Initialize OpenAI endpoint.

    Args:
        model_id (str): ID of the OpenAI model to use
        endpoint_name (str, optional): Name of the endpoint. Defaults to "openai".
        api_key (str | None, optional): OpenAI API key. Defaults to None.
        provider (str, optional): Provider name. Defaults to "openai".
        **kwargs (Any): Additional arguments passed to OpenAI client
    """
    super().__init__(
        endpoint_name,
        model_id,
        provider=provider,
    )

    self._client = OpenAI(api_key=api_key, **kwargs)

invoke

invoke(payload, **kwargs)

Invoke the OpenAI streaming chat completion API.

Parameters:

Name Type Description Default
payload Dict

Request payload

required
**kwargs Any

Additional parameters for the request

{}

Returns:

Name Type Description
InvocationResponse InvocationResponse

Response from the API

Source code in llmeter/endpoints/openai.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def invoke(self, payload: Dict, **kwargs: Any) -> InvocationResponse:
    """Invoke the OpenAI streaming chat completion API.

    Args:
        payload (Dict): Request payload
        **kwargs (Any): Additional parameters for the request

    Returns:
        InvocationResponse: Response from the API
    """
    payload = {**kwargs, **payload}

    payload["model"] = self.model_id

    if not payload.get("stream"):
        payload["stream"] = True
        payload["stream_options"] = {"include_usage": True}

    try:
        start_t = time.perf_counter()
        client_response: ChatCompletion = self._client.chat.completions.create(
            **payload
        )
    except (APIConnectionError, Exception) as e:
        logger.error(e)
        return InvocationResponse.error_output(
            input_payload=payload, id=uuid4().hex, error=str(e)
        )
    response = self._parse_converse_stream_response(client_response, start_t)
    response.input_payload = payload
    response.input_prompt = self._parse_payload(payload)
    return response

OpenAIEndpoint

OpenAIEndpoint(model_id, endpoint_name='openai', api_key=None, provider='openai', **kwargs)

Bases: Endpoint

Base class for OpenAI API endpoints.

Provides common functionality for interacting with OpenAI's API endpoints.

Parameters:

Name Type Description Default
model_id str

ID of the OpenAI model to use

required
endpoint_name str

Name of the endpoint. Defaults to "openai".

'openai'
api_key str | None

OpenAI API key. Defaults to None.

None
provider str

Provider name. Defaults to "openai".

'openai'
**kwargs Any

Additional arguments passed to OpenAI client

{}
Source code in llmeter/endpoints/openai.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def __init__(
    self,
    model_id: str,
    endpoint_name: str = "openai",
    api_key: str | None = None,
    provider: str = "openai",
    **kwargs: Any,
):
    """Initialize OpenAI endpoint.

    Args:
        model_id (str): ID of the OpenAI model to use
        endpoint_name (str, optional): Name of the endpoint. Defaults to "openai".
        api_key (str | None, optional): OpenAI API key. Defaults to None.
        provider (str, optional): Provider name. Defaults to "openai".
        **kwargs (Any): Additional arguments passed to OpenAI client
    """
    super().__init__(
        endpoint_name,
        model_id,
        provider=provider,
    )

    self._client = OpenAI(api_key=api_key, **kwargs)

create_payload staticmethod

create_payload(user_message, max_tokens=256, **kwargs)

Create a payload for the OpenAI API request.

Parameters:

Name Type Description Default
user_message str | Sequence[str]

User message(s) to send

required
max_tokens int

Maximum tokens in response. Defaults to 256.

256
**kwargs Any

Additional payload parameters

{}

Returns:

Name Type Description
dict dict

Formatted payload for API request

Source code in llmeter/endpoints/openai.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
@staticmethod
def create_payload(
    user_message: str | Sequence[str], max_tokens: int = 256, **kwargs: Any
) -> dict:
    """Create a payload for the OpenAI API request.

    Args:
        user_message (str | Sequence[str]): User message(s) to send
        max_tokens (int, optional): Maximum tokens in response. Defaults to 256.
        **kwargs: Additional payload parameters

    Returns:
        dict: Formatted payload for API request
    """
    if isinstance(user_message, str):
        user_message = [user_message]
    payload = {
        "messages": [{"role": "user", "content": k} for k in user_message],
        "max_tokens": max_tokens,
    }
    payload.update(kwargs)
    return payload