first commit

This commit is contained in:
2026-02-08 14:42:58 +08:00
commit 20e1deae21
8197 changed files with 2264639 additions and 0 deletions

View File

@@ -0,0 +1,229 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .beta import (
Beta,
AsyncBeta,
BetaWithRawResponse,
AsyncBetaWithRawResponse,
BetaWithStreamingResponse,
AsyncBetaWithStreamingResponse,
)
from .chat import (
Chat,
AsyncChat,
ChatWithRawResponse,
AsyncChatWithRawResponse,
ChatWithStreamingResponse,
AsyncChatWithStreamingResponse,
)
from .audio import (
Audio,
AsyncAudio,
AudioWithRawResponse,
AsyncAudioWithRawResponse,
AudioWithStreamingResponse,
AsyncAudioWithStreamingResponse,
)
from .evals import (
Evals,
AsyncEvals,
EvalsWithRawResponse,
AsyncEvalsWithRawResponse,
EvalsWithStreamingResponse,
AsyncEvalsWithStreamingResponse,
)
from .files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from .images import (
Images,
AsyncImages,
ImagesWithRawResponse,
AsyncImagesWithRawResponse,
ImagesWithStreamingResponse,
AsyncImagesWithStreamingResponse,
)
from .models import (
Models,
AsyncModels,
ModelsWithRawResponse,
AsyncModelsWithRawResponse,
ModelsWithStreamingResponse,
AsyncModelsWithStreamingResponse,
)
from .videos import (
Videos,
AsyncVideos,
VideosWithRawResponse,
AsyncVideosWithRawResponse,
VideosWithStreamingResponse,
AsyncVideosWithStreamingResponse,
)
from .batches import (
Batches,
AsyncBatches,
BatchesWithRawResponse,
AsyncBatchesWithRawResponse,
BatchesWithStreamingResponse,
AsyncBatchesWithStreamingResponse,
)
from .uploads import (
Uploads,
AsyncUploads,
UploadsWithRawResponse,
AsyncUploadsWithRawResponse,
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)
from .containers import (
Containers,
AsyncContainers,
ContainersWithRawResponse,
AsyncContainersWithRawResponse,
ContainersWithStreamingResponse,
AsyncContainersWithStreamingResponse,
)
from .embeddings import (
Embeddings,
AsyncEmbeddings,
EmbeddingsWithRawResponse,
AsyncEmbeddingsWithRawResponse,
EmbeddingsWithStreamingResponse,
AsyncEmbeddingsWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
from .fine_tuning import (
FineTuning,
AsyncFineTuning,
FineTuningWithRawResponse,
AsyncFineTuningWithRawResponse,
FineTuningWithStreamingResponse,
AsyncFineTuningWithStreamingResponse,
)
from .moderations import (
Moderations,
AsyncModerations,
ModerationsWithRawResponse,
AsyncModerationsWithRawResponse,
ModerationsWithStreamingResponse,
AsyncModerationsWithStreamingResponse,
)
from .vector_stores import (
VectorStores,
AsyncVectorStores,
VectorStoresWithRawResponse,
AsyncVectorStoresWithRawResponse,
VectorStoresWithStreamingResponse,
AsyncVectorStoresWithStreamingResponse,
)
__all__ = [
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
"Chat",
"AsyncChat",
"ChatWithRawResponse",
"AsyncChatWithRawResponse",
"ChatWithStreamingResponse",
"AsyncChatWithStreamingResponse",
"Embeddings",
"AsyncEmbeddings",
"EmbeddingsWithRawResponse",
"AsyncEmbeddingsWithRawResponse",
"EmbeddingsWithStreamingResponse",
"AsyncEmbeddingsWithStreamingResponse",
"Files",
"AsyncFiles",
"FilesWithRawResponse",
"AsyncFilesWithRawResponse",
"FilesWithStreamingResponse",
"AsyncFilesWithStreamingResponse",
"Images",
"AsyncImages",
"ImagesWithRawResponse",
"AsyncImagesWithRawResponse",
"ImagesWithStreamingResponse",
"AsyncImagesWithStreamingResponse",
"Audio",
"AsyncAudio",
"AudioWithRawResponse",
"AsyncAudioWithRawResponse",
"AudioWithStreamingResponse",
"AsyncAudioWithStreamingResponse",
"Moderations",
"AsyncModerations",
"ModerationsWithRawResponse",
"AsyncModerationsWithRawResponse",
"ModerationsWithStreamingResponse",
"AsyncModerationsWithStreamingResponse",
"Models",
"AsyncModels",
"ModelsWithRawResponse",
"AsyncModelsWithRawResponse",
"ModelsWithStreamingResponse",
"AsyncModelsWithStreamingResponse",
"FineTuning",
"AsyncFineTuning",
"FineTuningWithRawResponse",
"AsyncFineTuningWithRawResponse",
"FineTuningWithStreamingResponse",
"AsyncFineTuningWithStreamingResponse",
"VectorStores",
"AsyncVectorStores",
"VectorStoresWithRawResponse",
"AsyncVectorStoresWithRawResponse",
"VectorStoresWithStreamingResponse",
"AsyncVectorStoresWithStreamingResponse",
"Beta",
"AsyncBeta",
"BetaWithRawResponse",
"AsyncBetaWithRawResponse",
"BetaWithStreamingResponse",
"AsyncBetaWithStreamingResponse",
"Batches",
"AsyncBatches",
"BatchesWithRawResponse",
"AsyncBatchesWithRawResponse",
"BatchesWithStreamingResponse",
"AsyncBatchesWithStreamingResponse",
"Uploads",
"AsyncUploads",
"UploadsWithRawResponse",
"AsyncUploadsWithRawResponse",
"UploadsWithStreamingResponse",
"AsyncUploadsWithStreamingResponse",
"Evals",
"AsyncEvals",
"EvalsWithRawResponse",
"AsyncEvalsWithRawResponse",
"EvalsWithStreamingResponse",
"AsyncEvalsWithStreamingResponse",
"Containers",
"AsyncContainers",
"ContainersWithRawResponse",
"AsyncContainersWithRawResponse",
"ContainersWithStreamingResponse",
"AsyncContainersWithStreamingResponse",
"Videos",
"AsyncVideos",
"VideosWithRawResponse",
"AsyncVideosWithRawResponse",
"VideosWithStreamingResponse",
"AsyncVideosWithStreamingResponse",
]

View File

@@ -0,0 +1,61 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .audio import (
Audio,
AsyncAudio,
AudioWithRawResponse,
AsyncAudioWithRawResponse,
AudioWithStreamingResponse,
AsyncAudioWithStreamingResponse,
)
from .speech import (
Speech,
AsyncSpeech,
SpeechWithRawResponse,
AsyncSpeechWithRawResponse,
SpeechWithStreamingResponse,
AsyncSpeechWithStreamingResponse,
)
from .translations import (
Translations,
AsyncTranslations,
TranslationsWithRawResponse,
AsyncTranslationsWithRawResponse,
TranslationsWithStreamingResponse,
AsyncTranslationsWithStreamingResponse,
)
from .transcriptions import (
Transcriptions,
AsyncTranscriptions,
TranscriptionsWithRawResponse,
AsyncTranscriptionsWithRawResponse,
TranscriptionsWithStreamingResponse,
AsyncTranscriptionsWithStreamingResponse,
)
__all__ = [
"Transcriptions",
"AsyncTranscriptions",
"TranscriptionsWithRawResponse",
"AsyncTranscriptionsWithRawResponse",
"TranscriptionsWithStreamingResponse",
"AsyncTranscriptionsWithStreamingResponse",
"Translations",
"AsyncTranslations",
"TranslationsWithRawResponse",
"AsyncTranslationsWithRawResponse",
"TranslationsWithStreamingResponse",
"AsyncTranslationsWithStreamingResponse",
"Speech",
"AsyncSpeech",
"SpeechWithRawResponse",
"AsyncSpeechWithRawResponse",
"SpeechWithStreamingResponse",
"AsyncSpeechWithStreamingResponse",
"Audio",
"AsyncAudio",
"AudioWithRawResponse",
"AsyncAudioWithRawResponse",
"AudioWithStreamingResponse",
"AsyncAudioWithStreamingResponse",
]

View File

@@ -0,0 +1,166 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from .speech import (
Speech,
AsyncSpeech,
SpeechWithRawResponse,
AsyncSpeechWithRawResponse,
SpeechWithStreamingResponse,
AsyncSpeechWithStreamingResponse,
)
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from .translations import (
Translations,
AsyncTranslations,
TranslationsWithRawResponse,
AsyncTranslationsWithRawResponse,
TranslationsWithStreamingResponse,
AsyncTranslationsWithStreamingResponse,
)
from .transcriptions import (
Transcriptions,
AsyncTranscriptions,
TranscriptionsWithRawResponse,
AsyncTranscriptionsWithRawResponse,
TranscriptionsWithStreamingResponse,
AsyncTranscriptionsWithStreamingResponse,
)
__all__ = ["Audio", "AsyncAudio"]
class Audio(SyncAPIResource):
@cached_property
def transcriptions(self) -> Transcriptions:
return Transcriptions(self._client)
@cached_property
def translations(self) -> Translations:
return Translations(self._client)
@cached_property
def speech(self) -> Speech:
return Speech(self._client)
@cached_property
def with_raw_response(self) -> AudioWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AudioWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AudioWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AudioWithStreamingResponse(self)
class AsyncAudio(AsyncAPIResource):
@cached_property
def transcriptions(self) -> AsyncTranscriptions:
return AsyncTranscriptions(self._client)
@cached_property
def translations(self) -> AsyncTranslations:
return AsyncTranslations(self._client)
@cached_property
def speech(self) -> AsyncSpeech:
return AsyncSpeech(self._client)
@cached_property
def with_raw_response(self) -> AsyncAudioWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncAudioWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncAudioWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncAudioWithStreamingResponse(self)
class AudioWithRawResponse:
def __init__(self, audio: Audio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> TranscriptionsWithRawResponse:
return TranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithRawResponse:
return TranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithRawResponse:
return SpeechWithRawResponse(self._audio.speech)
class AsyncAudioWithRawResponse:
def __init__(self, audio: AsyncAudio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithRawResponse:
return AsyncTranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithRawResponse:
return AsyncTranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithRawResponse:
return AsyncSpeechWithRawResponse(self._audio.speech)
class AudioWithStreamingResponse:
def __init__(self, audio: Audio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> TranscriptionsWithStreamingResponse:
return TranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithStreamingResponse:
return TranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithStreamingResponse:
return SpeechWithStreamingResponse(self._audio.speech)
class AsyncAudioWithStreamingResponse:
def __init__(self, audio: AsyncAudio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithStreamingResponse:
return AsyncTranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithStreamingResponse:
return AsyncTranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithStreamingResponse:
return AsyncSpeechWithStreamingResponse(self._audio.speech)

View File

@@ -0,0 +1,257 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ...types.audio import speech_create_params
from ..._base_client import make_request_options
from ...types.audio.speech_model import SpeechModel
__all__ = ["Speech", "AsyncSpeech"]
class Speech(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SpeechWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SpeechWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SpeechWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SpeechWithStreamingResponse(self)
def create(
self,
*,
input: str,
model: Union[str, SpeechModel],
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]
],
instructions: str | Omit = omit,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit,
speed: float | Omit = omit,
stream_format: Literal["sse", "audio"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Generates audio from the input text.
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
model:
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
voice: The voice to use when generating the audio. Supported built-in voices are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
`shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
work with `tts-1` or `tts-1-hd`.
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`.
`sse` is not supported for `tts-1` or `tts-1-hd`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
return self._post(
"/audio/speech",
body=maybe_transform(
{
"input": input,
"model": model,
"voice": voice,
"instructions": instructions,
"response_format": response_format,
"speed": speed,
"stream_format": stream_format,
},
speech_create_params.SpeechCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class AsyncSpeech(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSpeechWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSpeechWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSpeechWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSpeechWithStreamingResponse(self)
async def create(
self,
*,
input: str,
model: Union[str, SpeechModel],
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]
],
instructions: str | Omit = omit,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit,
speed: float | Omit = omit,
stream_format: Literal["sse", "audio"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Generates audio from the input text.
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
model:
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
voice: The voice to use when generating the audio. Supported built-in voices are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
`shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
work with `tts-1` or `tts-1-hd`.
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`.
`sse` is not supported for `tts-1` or `tts-1-hd`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
return await self._post(
"/audio/speech",
body=await async_maybe_transform(
{
"input": input,
"model": model,
"voice": voice,
"instructions": instructions,
"response_format": response_format,
"speed": speed,
"stream_format": stream_format,
},
speech_create_params.SpeechCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class SpeechWithRawResponse:
def __init__(self, speech: Speech) -> None:
self._speech = speech
self.create = _legacy_response.to_raw_response_wrapper(
speech.create,
)
class AsyncSpeechWithRawResponse:
def __init__(self, speech: AsyncSpeech) -> None:
self._speech = speech
self.create = _legacy_response.async_to_raw_response_wrapper(
speech.create,
)
class SpeechWithStreamingResponse:
def __init__(self, speech: Speech) -> None:
self._speech = speech
self.create = to_custom_streamed_response_wrapper(
speech.create,
StreamedBinaryAPIResponse,
)
class AsyncSpeechWithStreamingResponse:
def __init__(self, speech: AsyncSpeech) -> None:
self._speech = speech
self.create = async_to_custom_streamed_response_wrapper(
speech.create,
AsyncStreamedBinaryAPIResponse,
)

View File

@@ -0,0 +1,987 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, List, Union, Mapping, Optional, cast
from typing_extensions import Literal, overload, assert_never
import httpx
from ... import _legacy_response
from ..._types import (
Body,
Omit,
Query,
Headers,
NotGiven,
FileTypes,
SequenceNotStr,
omit,
not_given,
)
from ..._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._streaming import Stream, AsyncStream
from ...types.audio import transcription_create_params
from ..._base_client import make_request_options
from ...types.audio_model import AudioModel
from ...types.audio.transcription import Transcription
from ...types.audio_response_format import AudioResponseFormat
from ...types.audio.transcription_include import TranscriptionInclude
from ...types.audio.transcription_verbose import TranscriptionVerbose
from ...types.audio.transcription_diarized import TranscriptionDiarized
from ...types.audio.transcription_stream_event import TranscriptionStreamEvent
from ...types.audio.transcription_create_response import TranscriptionCreateResponse
__all__ = ["Transcriptions", "AsyncTranscriptions"]
log: logging.Logger = logging.getLogger("openai.audio.transcriptions")
class Transcriptions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> TranscriptionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return TranscriptionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> TranscriptionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return TranscriptionsWithStreamingResponse(self)
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[Literal["json"], Omit] = omit,
stream: Optional[Literal[False]] | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Transcription:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
first normalizes loudness and then uses voice activity detection (VAD) to choose
boundaries. `server_vad` object can be provided to tweak VAD detection
parameters manually. If unset, the audio is transcribed as a single block.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
not supported when using `gpt-4o-transcribe-diarize`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
the only supported format is `json`.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
"""
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
response_format: Literal["verbose_json"],
language: str | Omit = omit,
prompt: str | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranscriptionVerbose: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
response_format: Literal["text", "srt", "vtt"],
include: List[TranscriptionInclude] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
response_format: Literal["diarized_json"],
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranscriptionDiarized: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: Literal[True],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Stream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
first normalizes loudness and then uses voice activity detection (VAD) to choose
boundaries. `server_vad` object can be provided to tweak VAD detection
parameters manually. If unset, the audio is transcribed as a single block.
Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30
seconds.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
not supported when using `gpt-4o-transcribe-diarize`.
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
`known_speaker_references[]`. Each entry should be a short identifier (for
example `customer` or `agent`). Up to 4 speakers are supported.
known_speaker_references: Optional list of audio samples (as
[data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs))
that contain known speaker references matching `known_speaker_names[]`. Each
sample must be between 2 and 10 seconds, and can use any of the same input audio
formats supported by `file`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language. This field is not supported when using
`gpt-4o-transcribe-diarize`.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`, the only supported format is `json`. For
`gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and
`diarized_json`, with `diarized_json` required to receive speaker annotations.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency. This option is not available for
`gpt-4o-transcribe-diarize`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: bool,
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranscriptionCreateResponse | Stream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
first normalizes loudness and then uses voice activity detection (VAD) to choose
boundaries. `server_vad` object can be provided to tweak VAD detection
parameters manually. If unset, the audio is transcribed as a single block.
Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30
seconds.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
not supported when using `gpt-4o-transcribe-diarize`.
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
`known_speaker_references[]`. Each entry should be a short identifier (for
example `customer` or `agent`). Up to 4 speakers are supported.
known_speaker_references: Optional list of audio samples (as
[data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs))
that contain known speaker references matching `known_speaker_names[]`. Each
sample must be between 2 and 10 seconds, and can use any of the same input audio
formats supported by `file`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language. This field is not supported when using
`gpt-4o-transcribe-diarize`.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`, the only supported format is `json`. For
`gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and
`diarized_json`, with `diarized_json` required to receive speaker annotations.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency. This option is not available for
`gpt-4o-transcribe-diarize`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["file", "model"], ["file", "model", "stream"])
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str | Transcription | TranscriptionDiarized | TranscriptionVerbose | Stream[TranscriptionStreamEvent]:
body = deepcopy_minimal(
{
"file": file,
"model": model,
"chunking_strategy": chunking_strategy,
"include": include,
"known_speaker_names": known_speaker_names,
"known_speaker_references": known_speaker_references,
"language": language,
"prompt": prompt,
"response_format": response_format,
"stream": stream,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post( # type: ignore[return-value]
"/audio/transcriptions",
body=maybe_transform(
body,
transcription_create_params.TranscriptionCreateParamsStreaming
if stream
else transcription_create_params.TranscriptionCreateParamsNonStreaming,
),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
stream=stream or False,
stream_cls=Stream[TranscriptionStreamEvent],
)
class AsyncTranscriptions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranscriptionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranscriptionsWithStreamingResponse(self)
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[Literal["json"], Omit] = omit,
stream: Optional[Literal[False]] | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranscriptionCreateResponse:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
first normalizes loudness and then uses voice activity detection (VAD) to choose
boundaries. `server_vad` object can be provided to tweak VAD detection
parameters manually. If unset, the audio is transcribed as a single block.
Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30
seconds.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
not supported when using `gpt-4o-transcribe-diarize`.
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
`known_speaker_references[]`. Each entry should be a short identifier (for
example `customer` or `agent`). Up to 4 speakers are supported.
known_speaker_references: Optional list of audio samples (as
[data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs))
that contain known speaker references matching `known_speaker_names[]`. Each
sample must be between 2 and 10 seconds, and can use any of the same input audio
formats supported by `file`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language. This field is not supported when using
`gpt-4o-transcribe-diarize`.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`, the only supported format is `json`. For
`gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and
`diarized_json`, with `diarized_json` required to receive speaker annotations.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency. This option is not available for
`gpt-4o-transcribe-diarize`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
"""
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
response_format: Literal["verbose_json"],
language: str | Omit = omit,
prompt: str | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranscriptionVerbose: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
response_format: Literal["text", "srt", "vtt"],
language: str | Omit = omit,
prompt: str | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: Literal[True],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncStream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
first normalizes loudness and then uses voice activity detection (VAD) to choose
boundaries. `server_vad` object can be provided to tweak VAD detection
parameters manually. If unset, the audio is transcribed as a single block.
Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30
seconds.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
not supported when using `gpt-4o-transcribe-diarize`.
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
`known_speaker_references[]`. Each entry should be a short identifier (for
example `customer` or `agent`). Up to 4 speakers are supported.
known_speaker_references: Optional list of audio samples (as
[data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs))
that contain known speaker references matching `known_speaker_names[]`. Each
sample must be between 2 and 10 seconds, and can use any of the same input audio
formats supported by `file`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language. This field is not supported when using
`gpt-4o-transcribe-diarize`.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`, the only supported format is `json`. For
`gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and
`diarized_json`, with `diarized_json` required to receive speaker annotations.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency. This option is not available for
`gpt-4o-transcribe-diarize`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: bool,
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranscriptionCreateResponse | AsyncStream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
first normalizes loudness and then uses voice activity detection (VAD) to choose
boundaries. `server_vad` object can be provided to tweak VAD detection
parameters manually. If unset, the audio is transcribed as a single block.
Required when using `gpt-4o-transcribe-diarize` for inputs longer than 30
seconds.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
not supported when using `gpt-4o-transcribe-diarize`.
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
`known_speaker_references[]`. Each entry should be a short identifier (for
example `customer` or `agent`). Up to 4 speakers are supported.
known_speaker_references: Optional list of audio samples (as
[data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs))
that contain known speaker references matching `known_speaker_names[]`. Each
sample must be between 2 and 10 seconds, and can use any of the same input audio
formats supported by `file`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language. This field is not supported when using
`gpt-4o-transcribe-diarize`.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`, the only supported format is `json`. For
`gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and
`diarized_json`, with `diarized_json` required to receive speaker annotations.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency. This option is not available for
`gpt-4o-transcribe-diarize`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["file", "model"], ["file", "model", "stream"])
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | Omit = omit,
include: List[TranscriptionInclude] | Omit = omit,
known_speaker_names: SequenceNotStr[str] | Omit = omit,
known_speaker_references: SequenceNotStr[str] | Omit = omit,
language: str | Omit = omit,
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
temperature: float | Omit = omit,
timestamp_granularities: List[Literal["word", "segment"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Transcription | TranscriptionVerbose | TranscriptionDiarized | str | AsyncStream[TranscriptionStreamEvent]:
body = deepcopy_minimal(
{
"file": file,
"model": model,
"chunking_strategy": chunking_strategy,
"include": include,
"known_speaker_names": known_speaker_names,
"known_speaker_references": known_speaker_references,
"language": language,
"prompt": prompt,
"response_format": response_format,
"stream": stream,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/audio/transcriptions",
body=await async_maybe_transform(
body,
transcription_create_params.TranscriptionCreateParamsStreaming
if stream
else transcription_create_params.TranscriptionCreateParamsNonStreaming,
),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
stream=stream or False,
stream_cls=AsyncStream[TranscriptionStreamEvent],
)
class TranscriptionsWithRawResponse:
def __init__(self, transcriptions: Transcriptions) -> None:
self._transcriptions = transcriptions
self.create = _legacy_response.to_raw_response_wrapper(
transcriptions.create,
)
class AsyncTranscriptionsWithRawResponse:
def __init__(self, transcriptions: AsyncTranscriptions) -> None:
self._transcriptions = transcriptions
self.create = _legacy_response.async_to_raw_response_wrapper(
transcriptions.create,
)
class TranscriptionsWithStreamingResponse:
def __init__(self, transcriptions: Transcriptions) -> None:
self._transcriptions = transcriptions
self.create = to_streamed_response_wrapper(
transcriptions.create,
)
class AsyncTranscriptionsWithStreamingResponse:
def __init__(self, transcriptions: AsyncTranscriptions) -> None:
self._transcriptions = transcriptions
self.create = async_to_streamed_response_wrapper(
transcriptions.create,
)
def _get_response_format_type(
response_format: AudioResponseFormat | Omit,
) -> type[Transcription | TranscriptionVerbose | TranscriptionDiarized | str]:
if isinstance(response_format, Omit) or response_format is None: # pyright: ignore[reportUnnecessaryComparison]
return Transcription
if response_format == "json":
return Transcription
elif response_format == "verbose_json":
return TranscriptionVerbose
elif response_format == "diarized_json":
return TranscriptionDiarized
elif response_format == "srt" or response_format == "text" or response_format == "vtt":
return str
elif TYPE_CHECKING: # type: ignore[unreachable]
assert_never(response_format)
else:
log.warn("Unexpected audio response format: %s", response_format)
return Transcription

View File

@@ -0,0 +1,367 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Union, Mapping, cast
from typing_extensions import Literal, overload, assert_never
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...types.audio import translation_create_params
from ..._base_client import make_request_options
from ...types.audio_model import AudioModel
from ...types.audio.translation import Translation
from ...types.audio_response_format import AudioResponseFormat
from ...types.audio.translation_verbose import TranslationVerbose
__all__ = ["Translations", "AsyncTranslations"]
log: logging.Logger = logging.getLogger("openai.audio.transcriptions")
class Translations(SyncAPIResource):
@cached_property
def with_raw_response(self) -> TranslationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return TranslationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> TranslationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return TranslationsWithStreamingResponse(self)
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Union[Literal["json"], Omit] = omit,
prompt: str | Omit = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Translation: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["verbose_json"],
prompt: str | Omit = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranslationVerbose: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["text", "srt", "vtt"],
prompt: str | Omit = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str: ...
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
prompt: str | Omit = omit,
response_format: Union[Literal["json", "text", "srt", "verbose_json", "vtt"], Omit] = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Translation | TranslationVerbose | str:
"""
Translates audio into English.
Args:
file: The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"model": model,
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post( # type: ignore[return-value]
"/audio/translations",
body=maybe_transform(body, translation_create_params.TranslationCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
)
class AsyncTranslations(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranslationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranslationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranslationsWithStreamingResponse(self)
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Union[Literal["json"], Omit] = omit,
prompt: str | Omit = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Translation: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["verbose_json"],
prompt: str | Omit = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> TranslationVerbose: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["text", "srt", "vtt"],
prompt: str | Omit = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str: ...
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
prompt: str | Omit = omit,
response_format: Union[AudioResponseFormat, Omit] = omit,
temperature: float | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Translation | TranslationVerbose | str:
"""
Translates audio into English.
Args:
file: The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"model": model,
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/audio/translations",
body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
)
class TranslationsWithRawResponse:
def __init__(self, translations: Translations) -> None:
self._translations = translations
self.create = _legacy_response.to_raw_response_wrapper(
translations.create,
)
class AsyncTranslationsWithRawResponse:
def __init__(self, translations: AsyncTranslations) -> None:
self._translations = translations
self.create = _legacy_response.async_to_raw_response_wrapper(
translations.create,
)
class TranslationsWithStreamingResponse:
def __init__(self, translations: Translations) -> None:
self._translations = translations
self.create = to_streamed_response_wrapper(
translations.create,
)
class AsyncTranslationsWithStreamingResponse:
def __init__(self, translations: AsyncTranslations) -> None:
self._translations = translations
self.create = async_to_streamed_response_wrapper(
translations.create,
)
def _get_response_format_type(
response_format: AudioResponseFormat | Omit,
) -> type[Translation | TranslationVerbose | str]:
if isinstance(response_format, Omit) or response_format is None: # pyright: ignore[reportUnnecessaryComparison]
return Translation
if response_format == "json":
return Translation
elif response_format == "verbose_json":
return TranslationVerbose
elif response_format == "srt" or response_format == "text" or response_format == "vtt":
return str
elif TYPE_CHECKING and response_format != "diarized_json": # type: ignore[unreachable]
assert_never(response_format)
else:
log.warning("Unexpected audio response format: %s", response_format)
return Translation

View File

@@ -0,0 +1,530 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import batch_list_params, batch_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncCursorPage, AsyncCursorPage
from ..types.batch import Batch
from .._base_client import AsyncPaginator, make_request_options
from ..types.shared_params.metadata import Metadata
__all__ = ["Batches", "AsyncBatches"]
class Batches(SyncAPIResource):
@cached_property
def with_raw_response(self) -> BatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return BatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> BatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return BatchesWithStreamingResponse(self)
def create(
self,
*,
completion_window: Literal["24h"],
endpoint: Literal[
"/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
],
input_file_id: str,
metadata: Optional[Metadata] | Omit = omit,
output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Batch:
"""
Creates and executes a batch from an uploaded file of requests
Args:
completion_window: The time frame within which the batch should be processed. Currently only `24h`
is supported.
endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.
input_file_id: The ID of an uploaded file that contains requests for the new batch.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
output_expires_after: The expiration policy for the output and/or error file that are generated for a
batch.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/batches",
body=maybe_transform(
{
"completion_window": completion_window,
"endpoint": endpoint,
"input_file_id": input_file_id,
"metadata": metadata,
"output_expires_after": output_expires_after,
},
batch_create_params.BatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
def retrieve(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Batch:
"""
Retrieves a batch.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get(
f"/batches/{batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[Batch]:
"""List your organization's batches.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/batches",
page=SyncCursorPage[Batch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=Batch,
)
def cancel(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Batch:
"""Cancels an in-progress batch.
The batch will be in status `cancelling` for up to
10 minutes, before changing to `cancelled`, where it will have partial results
(if any) available in the output file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._post(
f"/batches/{batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
class AsyncBatches(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncBatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncBatchesWithStreamingResponse(self)
async def create(
self,
*,
completion_window: Literal["24h"],
endpoint: Literal[
"/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
],
input_file_id: str,
metadata: Optional[Metadata] | Omit = omit,
output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Batch:
"""
Creates and executes a batch from an uploaded file of requests
Args:
completion_window: The time frame within which the batch should be processed. Currently only `24h`
is supported.
endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.
input_file_id: The ID of an uploaded file that contains requests for the new batch.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
output_expires_after: The expiration policy for the output and/or error file that are generated for a
batch.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/batches",
body=await async_maybe_transform(
{
"completion_window": completion_window,
"endpoint": endpoint,
"input_file_id": input_file_id,
"metadata": metadata,
"output_expires_after": output_expires_after,
},
batch_create_params.BatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
async def retrieve(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Batch:
"""
Retrieves a batch.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._get(
f"/batches/{batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
"""List your organization's batches.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/batches",
page=AsyncCursorPage[Batch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=Batch,
)
async def cancel(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Batch:
"""Cancels an in-progress batch.
The batch will be in status `cancelling` for up to
10 minutes, before changing to `cancelled`, where it will have partial results
(if any) available in the output file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._post(
f"/batches/{batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
class BatchesWithRawResponse:
def __init__(self, batches: Batches) -> None:
self._batches = batches
self.create = _legacy_response.to_raw_response_wrapper(
batches.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
batches.cancel,
)
class AsyncBatchesWithRawResponse:
def __init__(self, batches: AsyncBatches) -> None:
self._batches = batches
self.create = _legacy_response.async_to_raw_response_wrapper(
batches.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
batches.cancel,
)
class BatchesWithStreamingResponse:
def __init__(self, batches: Batches) -> None:
self._batches = batches
self.create = to_streamed_response_wrapper(
batches.create,
)
self.retrieve = to_streamed_response_wrapper(
batches.retrieve,
)
self.list = to_streamed_response_wrapper(
batches.list,
)
self.cancel = to_streamed_response_wrapper(
batches.cancel,
)
class AsyncBatchesWithStreamingResponse:
def __init__(self, batches: AsyncBatches) -> None:
self._batches = batches
self.create = async_to_streamed_response_wrapper(
batches.create,
)
self.retrieve = async_to_streamed_response_wrapper(
batches.retrieve,
)
self.list = async_to_streamed_response_wrapper(
batches.list,
)
self.cancel = async_to_streamed_response_wrapper(
batches.cancel,
)

View File

@@ -0,0 +1,61 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .beta import (
Beta,
AsyncBeta,
BetaWithRawResponse,
AsyncBetaWithRawResponse,
BetaWithStreamingResponse,
AsyncBetaWithStreamingResponse,
)
from .chatkit import (
ChatKit,
AsyncChatKit,
ChatKitWithRawResponse,
AsyncChatKitWithRawResponse,
ChatKitWithStreamingResponse,
AsyncChatKitWithStreamingResponse,
)
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .assistants import (
Assistants,
AsyncAssistants,
AssistantsWithRawResponse,
AsyncAssistantsWithRawResponse,
AssistantsWithStreamingResponse,
AsyncAssistantsWithStreamingResponse,
)
__all__ = [
"ChatKit",
"AsyncChatKit",
"ChatKitWithRawResponse",
"AsyncChatKitWithRawResponse",
"ChatKitWithStreamingResponse",
"AsyncChatKitWithStreamingResponse",
"Assistants",
"AsyncAssistants",
"AssistantsWithRawResponse",
"AsyncAssistantsWithRawResponse",
"AssistantsWithStreamingResponse",
"AsyncAssistantsWithStreamingResponse",
"Threads",
"AsyncThreads",
"ThreadsWithRawResponse",
"AsyncThreadsWithRawResponse",
"ThreadsWithStreamingResponse",
"AsyncThreadsWithStreamingResponse",
"Beta",
"AsyncBeta",
"BetaWithRawResponse",
"AsyncBetaWithRawResponse",
"BetaWithStreamingResponse",
"AsyncBetaWithStreamingResponse",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ..._compat import cached_property
from .assistants import (
Assistants,
AsyncAssistants,
AssistantsWithRawResponse,
AsyncAssistantsWithRawResponse,
AssistantsWithStreamingResponse,
AsyncAssistantsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
from .chatkit.chatkit import (
ChatKit,
AsyncChatKit,
ChatKitWithRawResponse,
AsyncChatKitWithRawResponse,
ChatKitWithStreamingResponse,
AsyncChatKitWithStreamingResponse,
)
from .threads.threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from ...resources.chat import Chat, AsyncChat
from .realtime.realtime import (
Realtime,
AsyncRealtime,
)
__all__ = ["Beta", "AsyncBeta"]
class Beta(SyncAPIResource):
@cached_property
def chat(self) -> Chat:
return Chat(self._client)
@cached_property
def realtime(self) -> Realtime:
return Realtime(self._client)
@cached_property
def chatkit(self) -> ChatKit:
return ChatKit(self._client)
@cached_property
def assistants(self) -> Assistants:
return Assistants(self._client)
@cached_property
def threads(self) -> Threads:
return Threads(self._client)
@cached_property
def with_raw_response(self) -> BetaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return BetaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> BetaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return BetaWithStreamingResponse(self)
class AsyncBeta(AsyncAPIResource):
@cached_property
def chat(self) -> AsyncChat:
return AsyncChat(self._client)
@cached_property
def realtime(self) -> AsyncRealtime:
return AsyncRealtime(self._client)
@cached_property
def chatkit(self) -> AsyncChatKit:
return AsyncChatKit(self._client)
@cached_property
def assistants(self) -> AsyncAssistants:
return AsyncAssistants(self._client)
@cached_property
def threads(self) -> AsyncThreads:
return AsyncThreads(self._client)
@cached_property
def with_raw_response(self) -> AsyncBetaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncBetaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncBetaWithStreamingResponse(self)
class BetaWithRawResponse:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def chatkit(self) -> ChatKitWithRawResponse:
return ChatKitWithRawResponse(self._beta.chatkit)
@cached_property
def assistants(self) -> AssistantsWithRawResponse:
return AssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithRawResponse:
return ThreadsWithRawResponse(self._beta.threads)
class AsyncBetaWithRawResponse:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
@cached_property
def chatkit(self) -> AsyncChatKitWithRawResponse:
return AsyncChatKitWithRawResponse(self._beta.chatkit)
@cached_property
def assistants(self) -> AsyncAssistantsWithRawResponse:
return AsyncAssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithRawResponse:
return AsyncThreadsWithRawResponse(self._beta.threads)
class BetaWithStreamingResponse:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def chatkit(self) -> ChatKitWithStreamingResponse:
return ChatKitWithStreamingResponse(self._beta.chatkit)
@cached_property
def assistants(self) -> AssistantsWithStreamingResponse:
return AssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithStreamingResponse:
return ThreadsWithStreamingResponse(self._beta.threads)
class AsyncBetaWithStreamingResponse:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
@cached_property
def chatkit(self) -> AsyncChatKitWithStreamingResponse:
return AsyncChatKitWithStreamingResponse(self._beta.chatkit)
@cached_property
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithStreamingResponse:
return AsyncThreadsWithStreamingResponse(self._beta.threads)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .chatkit import (
ChatKit,
AsyncChatKit,
ChatKitWithRawResponse,
AsyncChatKitWithRawResponse,
ChatKitWithStreamingResponse,
AsyncChatKitWithStreamingResponse,
)
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .sessions import (
Sessions,
AsyncSessions,
SessionsWithRawResponse,
AsyncSessionsWithRawResponse,
SessionsWithStreamingResponse,
AsyncSessionsWithStreamingResponse,
)
__all__ = [
"Sessions",
"AsyncSessions",
"SessionsWithRawResponse",
"AsyncSessionsWithRawResponse",
"SessionsWithStreamingResponse",
"AsyncSessionsWithStreamingResponse",
"Threads",
"AsyncThreads",
"ThreadsWithRawResponse",
"AsyncThreadsWithRawResponse",
"ThreadsWithStreamingResponse",
"AsyncThreadsWithStreamingResponse",
"ChatKit",
"AsyncChatKit",
"ChatKitWithRawResponse",
"AsyncChatKitWithRawResponse",
"ChatKitWithStreamingResponse",
"AsyncChatKitWithStreamingResponse",
]

View File

@@ -0,0 +1,134 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .sessions import (
Sessions,
AsyncSessions,
SessionsWithRawResponse,
AsyncSessionsWithRawResponse,
SessionsWithStreamingResponse,
AsyncSessionsWithStreamingResponse,
)
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["ChatKit", "AsyncChatKit"]
class ChatKit(SyncAPIResource):
@cached_property
def sessions(self) -> Sessions:
return Sessions(self._client)
@cached_property
def threads(self) -> Threads:
return Threads(self._client)
@cached_property
def with_raw_response(self) -> ChatKitWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ChatKitWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ChatKitWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ChatKitWithStreamingResponse(self)
class AsyncChatKit(AsyncAPIResource):
@cached_property
def sessions(self) -> AsyncSessions:
return AsyncSessions(self._client)
@cached_property
def threads(self) -> AsyncThreads:
return AsyncThreads(self._client)
@cached_property
def with_raw_response(self) -> AsyncChatKitWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatKitWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncChatKitWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncChatKitWithStreamingResponse(self)
class ChatKitWithRawResponse:
def __init__(self, chatkit: ChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> SessionsWithRawResponse:
return SessionsWithRawResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> ThreadsWithRawResponse:
return ThreadsWithRawResponse(self._chatkit.threads)
class AsyncChatKitWithRawResponse:
def __init__(self, chatkit: AsyncChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> AsyncSessionsWithRawResponse:
return AsyncSessionsWithRawResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> AsyncThreadsWithRawResponse:
return AsyncThreadsWithRawResponse(self._chatkit.threads)
class ChatKitWithStreamingResponse:
def __init__(self, chatkit: ChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> SessionsWithStreamingResponse:
return SessionsWithStreamingResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> ThreadsWithStreamingResponse:
return ThreadsWithStreamingResponse(self._chatkit.threads)
class AsyncChatKitWithStreamingResponse:
def __init__(self, chatkit: AsyncChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> AsyncSessionsWithStreamingResponse:
return AsyncSessionsWithStreamingResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> AsyncThreadsWithStreamingResponse:
return AsyncThreadsWithStreamingResponse(self._chatkit.threads)

View File

@@ -0,0 +1,301 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.beta.chatkit import (
ChatSessionWorkflowParam,
ChatSessionRateLimitsParam,
ChatSessionExpiresAfterParam,
ChatSessionChatKitConfigurationParam,
session_create_params,
)
from ....types.beta.chatkit.chat_session import ChatSession
from ....types.beta.chatkit.chat_session_workflow_param import ChatSessionWorkflowParam
from ....types.beta.chatkit.chat_session_rate_limits_param import ChatSessionRateLimitsParam
from ....types.beta.chatkit.chat_session_expires_after_param import ChatSessionExpiresAfterParam
from ....types.beta.chatkit.chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam
__all__ = ["Sessions", "AsyncSessions"]
class Sessions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SessionsWithStreamingResponse(self)
def create(
self,
*,
user: str,
workflow: ChatSessionWorkflowParam,
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Create a ChatKit session
Args:
user: A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
workflow: Workflow that powers the session.
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
expires_after: Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
"/chatkit/sessions",
body=maybe_transform(
{
"user": user,
"workflow": workflow,
"chatkit_configuration": chatkit_configuration,
"expires_after": expires_after,
"rate_limits": rate_limits,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
def cancel(
self,
session_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Cancel a ChatKit session
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not session_id:
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
f"/chatkit/sessions/{session_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
class AsyncSessions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSessionsWithStreamingResponse(self)
async def create(
self,
*,
user: str,
workflow: ChatSessionWorkflowParam,
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Create a ChatKit session
Args:
user: A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
workflow: Workflow that powers the session.
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
expires_after: Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
"/chatkit/sessions",
body=await async_maybe_transform(
{
"user": user,
"workflow": workflow,
"chatkit_configuration": chatkit_configuration,
"expires_after": expires_after,
"rate_limits": rate_limits,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
async def cancel(
self,
session_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Cancel a ChatKit session
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not session_id:
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
f"/chatkit/sessions/{session_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
class SessionsWithRawResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = _legacy_response.to_raw_response_wrapper(
sessions.create,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
sessions.cancel,
)
class AsyncSessionsWithRawResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
sessions.create,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
sessions.cancel,
)
class SessionsWithStreamingResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = to_streamed_response_wrapper(
sessions.create,
)
self.cancel = to_streamed_response_wrapper(
sessions.cancel,
)
class AsyncSessionsWithStreamingResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = async_to_streamed_response_wrapper(
sessions.create,
)
self.cancel = async_to_streamed_response_wrapper(
sessions.cancel,
)

View File

@@ -0,0 +1,521 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Any, cast
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.beta.chatkit import thread_list_params, thread_list_items_params
from ....types.beta.chatkit.chatkit_thread import ChatKitThread
from ....types.beta.chatkit.thread_delete_response import ThreadDeleteResponse
from ....types.beta.chatkit.chatkit_thread_item_list import Data
__all__ = ["Threads", "AsyncThreads"]
class Threads(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ThreadsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ThreadsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ThreadsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ThreadsWithStreamingResponse(self)
def retrieve(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatKitThread:
"""
Retrieve a ChatKit thread
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatKitThread,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[ChatKitThread]:
"""
List ChatKit threads
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
user: Filter threads that belong to this user identifier. Defaults to null to return
all users.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
"/chatkit/threads",
page=SyncConversationCursorPage[ChatKitThread],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"user": user,
},
thread_list_params.ThreadListParams,
),
),
model=ChatKitThread,
)
def delete(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ThreadDeleteResponse:
"""
Delete a ChatKit thread
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._delete(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ThreadDeleteResponse,
)
def list_items(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Data]:
"""
List ChatKit thread items
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
f"/chatkit/threads/{thread_id}/items",
page=SyncConversationCursorPage[Data],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
thread_list_items_params.ThreadListItemsParams,
),
),
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
)
class AsyncThreads(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncThreadsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncThreadsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncThreadsWithStreamingResponse(self)
async def retrieve(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatKitThread:
"""
Retrieve a ChatKit thread
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._get(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatKitThread,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ChatKitThread, AsyncConversationCursorPage[ChatKitThread]]:
"""
List ChatKit threads
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
user: Filter threads that belong to this user identifier. Defaults to null to return
all users.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
"/chatkit/threads",
page=AsyncConversationCursorPage[ChatKitThread],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"user": user,
},
thread_list_params.ThreadListParams,
),
),
model=ChatKitThread,
)
async def delete(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ThreadDeleteResponse:
"""
Delete a ChatKit thread
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._delete(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ThreadDeleteResponse,
)
def list_items(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Data, AsyncConversationCursorPage[Data]]:
"""
List ChatKit thread items
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
f"/chatkit/threads/{thread_id}/items",
page=AsyncConversationCursorPage[Data],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
thread_list_items_params.ThreadListItemsParams,
),
),
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
)
class ThreadsWithRawResponse:
def __init__(self, threads: Threads) -> None:
self._threads = threads
self.retrieve = _legacy_response.to_raw_response_wrapper(
threads.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
threads.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
threads.delete,
)
self.list_items = _legacy_response.to_raw_response_wrapper(
threads.list_items,
)
class AsyncThreadsWithRawResponse:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
threads.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
threads.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
threads.delete,
)
self.list_items = _legacy_response.async_to_raw_response_wrapper(
threads.list_items,
)
class ThreadsWithStreamingResponse:
def __init__(self, threads: Threads) -> None:
self._threads = threads
self.retrieve = to_streamed_response_wrapper(
threads.retrieve,
)
self.list = to_streamed_response_wrapper(
threads.list,
)
self.delete = to_streamed_response_wrapper(
threads.delete,
)
self.list_items = to_streamed_response_wrapper(
threads.list_items,
)
class AsyncThreadsWithStreamingResponse:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
self.retrieve = async_to_streamed_response_wrapper(
threads.retrieve,
)
self.list = async_to_streamed_response_wrapper(
threads.list,
)
self.delete = async_to_streamed_response_wrapper(
threads.delete,
)
self.list_items = async_to_streamed_response_wrapper(
threads.list_items,
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .realtime import (
Realtime,
AsyncRealtime,
RealtimeWithRawResponse,
AsyncRealtimeWithRawResponse,
RealtimeWithStreamingResponse,
AsyncRealtimeWithStreamingResponse,
)
from .sessions import (
Sessions,
AsyncSessions,
SessionsWithRawResponse,
AsyncSessionsWithRawResponse,
SessionsWithStreamingResponse,
AsyncSessionsWithStreamingResponse,
)
from .transcription_sessions import (
TranscriptionSessions,
AsyncTranscriptionSessions,
TranscriptionSessionsWithRawResponse,
AsyncTranscriptionSessionsWithRawResponse,
TranscriptionSessionsWithStreamingResponse,
AsyncTranscriptionSessionsWithStreamingResponse,
)
__all__ = [
"Sessions",
"AsyncSessions",
"SessionsWithRawResponse",
"AsyncSessionsWithRawResponse",
"SessionsWithStreamingResponse",
"AsyncSessionsWithStreamingResponse",
"TranscriptionSessions",
"AsyncTranscriptionSessions",
"TranscriptionSessionsWithRawResponse",
"AsyncTranscriptionSessionsWithRawResponse",
"TranscriptionSessionsWithStreamingResponse",
"AsyncTranscriptionSessionsWithStreamingResponse",
"Realtime",
"AsyncRealtime",
"RealtimeWithRawResponse",
"AsyncRealtimeWithRawResponse",
"RealtimeWithStreamingResponse",
"AsyncRealtimeWithStreamingResponse",
]

View File

@@ -0,0 +1,424 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Iterable
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.beta.realtime import session_create_params
from ....types.beta.realtime.session_create_response import SessionCreateResponse
__all__ = ["Sessions", "AsyncSessions"]
class Sessions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SessionsWithStreamingResponse(self)
def create(
self,
*,
client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
model: Literal[
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
]
| NotGiven = NOT_GIVEN,
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: str | NotGiven = NOT_GIVEN,
tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN,
turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
| NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SessionCreateResponse:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API. Can be configured with the same session parameters as the
`session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
client_secret: Configuration options for the generated client secret.
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
model: The Realtime model used for this session.
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
For `pcm16`, output audio is sampled at a rate of 24kHz.
speed: The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
minimum speed. 1.5 is the maximum speed. This value can only be changed in
between model turns, not while a response is in progress.
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
temperature of 0.8 is highly recommended for best performance.
tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
a function.
tools: Tools (functions) available to the model.
tracing: Configuration options for tracing. Set to null to disable tracing. Once tracing
is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjunction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
voice: The voice the model uses to respond. Voice cannot be changed during the session
once the model has responded with audio at least once. Current voice options are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/realtime/sessions",
body=maybe_transform(
{
"client_secret": client_secret,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"instructions": instructions,
"max_response_output_tokens": max_response_output_tokens,
"modalities": modalities,
"model": model,
"output_audio_format": output_audio_format,
"speed": speed,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"tracing": tracing,
"turn_detection": turn_detection,
"voice": voice,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SessionCreateResponse,
)
class AsyncSessions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSessionsWithStreamingResponse(self)
async def create(
self,
*,
client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
model: Literal[
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
]
| NotGiven = NOT_GIVEN,
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: str | NotGiven = NOT_GIVEN,
tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN,
turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
| NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SessionCreateResponse:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API. Can be configured with the same session parameters as the
`session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
client_secret: Configuration options for the generated client secret.
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
model: The Realtime model used for this session.
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
For `pcm16`, output audio is sampled at a rate of 24kHz.
speed: The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
minimum speed. 1.5 is the maximum speed. This value can only be changed in
between model turns, not while a response is in progress.
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
temperature of 0.8 is highly recommended for best performance.
tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
a function.
tools: Tools (functions) available to the model.
tracing: Configuration options for tracing. Set to null to disable tracing. Once tracing
is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjunction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
voice: The voice the model uses to respond. Voice cannot be changed during the session
once the model has responded with audio at least once. Current voice options are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/realtime/sessions",
body=await async_maybe_transform(
{
"client_secret": client_secret,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"instructions": instructions,
"max_response_output_tokens": max_response_output_tokens,
"modalities": modalities,
"model": model,
"output_audio_format": output_audio_format,
"speed": speed,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"tracing": tracing,
"turn_detection": turn_detection,
"voice": voice,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SessionCreateResponse,
)
class SessionsWithRawResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = _legacy_response.to_raw_response_wrapper(
sessions.create,
)
class AsyncSessionsWithRawResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
sessions.create,
)
class SessionsWithStreamingResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = to_streamed_response_wrapper(
sessions.create,
)
class AsyncSessionsWithStreamingResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = async_to_streamed_response_wrapper(
sessions.create,
)

View File

@@ -0,0 +1,282 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.beta.realtime import transcription_session_create_params
from ....types.beta.realtime.transcription_session import TranscriptionSession
__all__ = ["TranscriptionSessions", "AsyncTranscriptionSessions"]
class TranscriptionSessions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> TranscriptionSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return TranscriptionSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> TranscriptionSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return TranscriptionSessionsWithStreamingResponse(self)
def create(
self,
*,
client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
| NotGiven = NOT_GIVEN,
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionSession:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API specifically for realtime transcriptions. Can be configured with
the same session parameters as the `transcription_session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
client_secret: Configuration options for the generated client secret.
include:
The set of items to include in the transcription. Current available items are:
- `item.input_audio_transcription.logprobs`
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
language and prompt for transcription, these offer additional guidance to the
transcription service.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjunction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/realtime/transcription_sessions",
body=maybe_transform(
{
"client_secret": client_secret,
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"modalities": modalities,
"turn_detection": turn_detection,
},
transcription_session_create_params.TranscriptionSessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TranscriptionSession,
)
class AsyncTranscriptionSessions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranscriptionSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranscriptionSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranscriptionSessionsWithStreamingResponse(self)
async def create(
self,
*,
client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
| NotGiven = NOT_GIVEN,
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionSession:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API specifically for realtime transcriptions. Can be configured with
the same session parameters as the `transcription_session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
client_secret: Configuration options for the generated client secret.
include:
The set of items to include in the transcription. Current available items are:
- `item.input_audio_transcription.logprobs`
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
language and prompt for transcription, these offer additional guidance to the
transcription service.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjunction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/realtime/transcription_sessions",
body=await async_maybe_transform(
{
"client_secret": client_secret,
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"modalities": modalities,
"turn_detection": turn_detection,
},
transcription_session_create_params.TranscriptionSessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TranscriptionSession,
)
class TranscriptionSessionsWithRawResponse:
def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = _legacy_response.to_raw_response_wrapper(
transcription_sessions.create,
)
class AsyncTranscriptionSessionsWithRawResponse:
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
transcription_sessions.create,
)
class TranscriptionSessionsWithStreamingResponse:
def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = to_streamed_response_wrapper(
transcription_sessions.create,
)
class AsyncTranscriptionSessionsWithStreamingResponse:
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = async_to_streamed_response_wrapper(
transcription_sessions.create,
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .messages import (
Messages,
AsyncMessages,
MessagesWithRawResponse,
AsyncMessagesWithRawResponse,
MessagesWithStreamingResponse,
AsyncMessagesWithStreamingResponse,
)
__all__ = [
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
"Messages",
"AsyncMessages",
"MessagesWithRawResponse",
"AsyncMessagesWithRawResponse",
"MessagesWithStreamingResponse",
"AsyncMessagesWithStreamingResponse",
"Threads",
"AsyncThreads",
"ThreadsWithRawResponse",
"AsyncThreadsWithRawResponse",
"ThreadsWithStreamingResponse",
"AsyncThreadsWithStreamingResponse",
]

View File

@@ -0,0 +1,718 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import typing_extensions
from typing import Union, Iterable, Optional
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import (
AsyncPaginator,
make_request_options,
)
from ....types.beta.threads import message_list_params, message_create_params, message_update_params
from ....types.beta.threads.message import Message
from ....types.shared_params.metadata import Metadata
from ....types.beta.threads.message_deleted import MessageDeleted
from ....types.beta.threads.message_content_part_param import MessageContentPartParam
__all__ = ["Messages", "AsyncMessages"]
class Messages(SyncAPIResource):
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
thread_id: str,
*,
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Create a message.
Args:
content: The text contents of the message.
role:
The role of the entity that is creating the message. Allowed values include:
- `user`: Indicates the message is sent by an actual user and should be used in
most cases to represent user-generated messages.
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages",
body=maybe_transform(
{
"content": content,
"role": role,
"attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Retrieve a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def update(
self,
message_id: str,
*,
thread_id: str,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Modifies a message.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
run_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[Message]:
"""
Returns a list of messages for a given thread.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
run_id: Filter messages by the run ID that generated them.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=SyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"run_id": run_id,
},
message_list_params.MessageListParams,
),
),
model=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def delete(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageDeleted:
"""
Deletes a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageDeleted,
)
class AsyncMessages(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncMessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncMessagesWithStreamingResponse(self)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def create(
self,
thread_id: str,
*,
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Create a message.
Args:
content: The text contents of the message.
role:
The role of the entity that is creating the message. Allowed values include:
- `user`: Indicates the message is sent by an actual user and should be used in
most cases to represent user-generated messages.
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/messages",
body=await async_maybe_transform(
{
"content": content,
"role": role,
"attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def retrieve(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Retrieve a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def update(
self,
message_id: str,
*,
thread_id: str,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Modifies a message.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
run_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Message, AsyncCursorPage[Message]]:
"""
Returns a list of messages for a given thread.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
run_id: Filter messages by the run ID that generated them.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=AsyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"run_id": run_id,
},
message_list_params.MessageListParams,
),
),
model=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def delete(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageDeleted:
"""
Deletes a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageDeleted,
)
class MessagesWithRawResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
messages.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
messages.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
messages.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
messages.list, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
messages.delete, # pyright: ignore[reportDeprecated],
)
)
class AsyncMessagesWithRawResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
messages.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
messages.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
messages.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
messages.list, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
messages.delete, # pyright: ignore[reportDeprecated],
)
)
class MessagesWithStreamingResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.list, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.delete, # pyright: ignore[reportDeprecated],
)
)
class AsyncMessagesWithStreamingResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.list, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.delete, # pyright: ignore[reportDeprecated],
)
)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .steps import (
Steps,
AsyncSteps,
StepsWithRawResponse,
AsyncStepsWithRawResponse,
StepsWithStreamingResponse,
AsyncStepsWithStreamingResponse,
)
__all__ = [
"Steps",
"AsyncSteps",
"StepsWithRawResponse",
"AsyncStepsWithRawResponse",
"StepsWithStreamingResponse",
"AsyncStepsWithStreamingResponse",
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
]

View File

@@ -0,0 +1,399 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import typing_extensions
from typing import List
from typing_extensions import Literal
import httpx
from ..... import _legacy_response
from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ....._utils import maybe_transform, async_maybe_transform
from ....._compat import cached_property
from ....._resource import SyncAPIResource, AsyncAPIResource
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .....pagination import SyncCursorPage, AsyncCursorPage
from ....._base_client import AsyncPaginator, make_request_options
from .....types.beta.threads.runs import step_list_params, step_retrieve_params
from .....types.beta.threads.runs.run_step import RunStep
from .....types.beta.threads.runs.run_step_include import RunStepInclude
__all__ = ["Steps", "AsyncSteps"]
class Steps(SyncAPIResource):
@cached_property
def with_raw_response(self) -> StepsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return StepsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> StepsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return StepsWithStreamingResponse(self)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
step_id: str,
*,
thread_id: str,
run_id: str,
include: List[RunStepInclude] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunStep:
"""
Retrieves a run step.
Args:
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not step_id:
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
),
cast_to=RunStep,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
run_id: str,
*,
thread_id: str,
after: str | Omit = omit,
before: str | Omit = omit,
include: List[RunStepInclude] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[RunStep]:
"""
Returns a list of run steps belonging to a run.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs/{run_id}/steps",
page=SyncCursorPage[RunStep],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
step_list_params.StepListParams,
),
),
model=RunStep,
)
class AsyncSteps(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncStepsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncStepsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncStepsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncStepsWithStreamingResponse(self)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
async def retrieve(
self,
step_id: str,
*,
thread_id: str,
run_id: str,
include: List[RunStepInclude] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunStep:
"""
Retrieves a run step.
Args:
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not step_id:
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
),
cast_to=RunStep,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
run_id: str,
*,
thread_id: str,
after: str | Omit = omit,
before: str | Omit = omit,
include: List[RunStepInclude] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]:
"""
Returns a list of run steps belonging to a run.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs/{run_id}/steps",
page=AsyncCursorPage[RunStep],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
step_list_params.StepListParams,
),
),
model=RunStep,
)
class StepsWithRawResponse:
def __init__(self, steps: Steps) -> None:
self._steps = steps
self.retrieve = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
steps.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
steps.list, # pyright: ignore[reportDeprecated],
)
)
class AsyncStepsWithRawResponse:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
self.retrieve = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
steps.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
steps.list, # pyright: ignore[reportDeprecated],
)
)
class StepsWithStreamingResponse:
def __init__(self, steps: Steps) -> None:
self._steps = steps
self.retrieve = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
steps.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
steps.list, # pyright: ignore[reportDeprecated],
)
)
class AsyncStepsWithStreamingResponse:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
self.retrieve = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
steps.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
steps.list, # pyright: ignore[reportDeprecated],
)
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .chat import (
Chat,
AsyncChat,
ChatWithRawResponse,
AsyncChatWithRawResponse,
ChatWithStreamingResponse,
AsyncChatWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
__all__ = [
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
"Chat",
"AsyncChat",
"ChatWithRawResponse",
"AsyncChatWithRawResponse",
"ChatWithStreamingResponse",
"AsyncChatWithStreamingResponse",
]

View File

@@ -0,0 +1,102 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from .completions.completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
__all__ = ["Chat", "AsyncChat"]
class Chat(SyncAPIResource):
@cached_property
def completions(self) -> Completions:
return Completions(self._client)
@cached_property
def with_raw_response(self) -> ChatWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ChatWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ChatWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ChatWithStreamingResponse(self)
class AsyncChat(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletions:
return AsyncCompletions(self._client)
@cached_property
def with_raw_response(self) -> AsyncChatWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncChatWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncChatWithStreamingResponse(self)
class ChatWithRawResponse:
def __init__(self, chat: Chat) -> None:
self._chat = chat
@cached_property
def completions(self) -> CompletionsWithRawResponse:
return CompletionsWithRawResponse(self._chat.completions)
class AsyncChatWithRawResponse:
def __init__(self, chat: AsyncChat) -> None:
self._chat = chat
@cached_property
def completions(self) -> AsyncCompletionsWithRawResponse:
return AsyncCompletionsWithRawResponse(self._chat.completions)
class ChatWithStreamingResponse:
def __init__(self, chat: Chat) -> None:
self._chat = chat
@cached_property
def completions(self) -> CompletionsWithStreamingResponse:
return CompletionsWithStreamingResponse(self._chat.completions)
class AsyncChatWithStreamingResponse:
def __init__(self, chat: AsyncChat) -> None:
self._chat = chat
@cached_property
def completions(self) -> AsyncCompletionsWithStreamingResponse:
return AsyncCompletionsWithStreamingResponse(self._chat.completions)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .messages import (
Messages,
AsyncMessages,
MessagesWithRawResponse,
AsyncMessagesWithRawResponse,
MessagesWithStreamingResponse,
AsyncMessagesWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
__all__ = [
"Messages",
"AsyncMessages",
"MessagesWithRawResponse",
"AsyncMessagesWithRawResponse",
"MessagesWithStreamingResponse",
"AsyncMessagesWithStreamingResponse",
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
]

View File

@@ -0,0 +1,212 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.chat.completions import message_list_params
from ....types.chat.chat_completion_store_message import ChatCompletionStoreMessage
__all__ = ["Messages", "AsyncMessages"]
class Messages(SyncAPIResource):
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
def list(
self,
completion_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ChatCompletionStoreMessage]:
"""Get the messages in a stored chat completion.
Only Chat Completions that have
been created with the `store` parameter set to `true` will be returned.
Args:
after: Identifier for the last message from the previous pagination request.
limit: Number of messages to retrieve.
order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
for descending order. Defaults to `asc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get_api_list(
f"/chat/completions/{completion_id}/messages",
page=SyncCursorPage[ChatCompletionStoreMessage],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
message_list_params.MessageListParams,
),
),
model=ChatCompletionStoreMessage,
)
class AsyncMessages(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncMessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncMessagesWithStreamingResponse(self)
def list(
self,
completion_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]:
"""Get the messages in a stored chat completion.
Only Chat Completions that have
been created with the `store` parameter set to `true` will be returned.
Args:
after: Identifier for the last message from the previous pagination request.
limit: Number of messages to retrieve.
order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
for descending order. Defaults to `asc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get_api_list(
f"/chat/completions/{completion_id}/messages",
page=AsyncCursorPage[ChatCompletionStoreMessage],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
message_list_params.MessageListParams,
),
),
model=ChatCompletionStoreMessage,
)
class MessagesWithRawResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.list = _legacy_response.to_raw_response_wrapper(
messages.list,
)
class AsyncMessagesWithRawResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.list = _legacy_response.async_to_raw_response_wrapper(
messages.list,
)
class MessagesWithStreamingResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.list = to_streamed_response_wrapper(
messages.list,
)
class AsyncMessagesWithStreamingResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.list = async_to_streamed_response_wrapper(
messages.list,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from .containers import (
Containers,
AsyncContainers,
ContainersWithRawResponse,
AsyncContainersWithRawResponse,
ContainersWithStreamingResponse,
AsyncContainersWithStreamingResponse,
)
__all__ = [
"Files",
"AsyncFiles",
"FilesWithRawResponse",
"AsyncFilesWithRawResponse",
"FilesWithStreamingResponse",
"AsyncFilesWithStreamingResponse",
"Containers",
"AsyncContainers",
"ContainersWithRawResponse",
"AsyncContainersWithRawResponse",
"ContainersWithStreamingResponse",
"AsyncContainersWithStreamingResponse",
]

View File

@@ -0,0 +1,518 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ...types import container_list_params, container_create_params
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .files.files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.container_list_response import ContainerListResponse
from ...types.container_create_response import ContainerCreateResponse
from ...types.container_retrieve_response import ContainerRetrieveResponse
__all__ = ["Containers", "AsyncContainers"]
class Containers(SyncAPIResource):
@cached_property
def files(self) -> Files:
return Files(self._client)
@cached_property
def with_raw_response(self) -> ContainersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContainersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContainersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContainersWithStreamingResponse(self)
def create(
self,
*,
name: str,
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerCreateResponse:
"""
Create Container
Args:
name: Name of the container to create.
expires_after: Container expiration time in seconds relative to the 'anchor' time.
file_ids: IDs of files to copy to the container.
memory_limit: Optional memory limit for the container. Defaults to "1g".
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/containers",
body=maybe_transform(
{
"name": name,
"expires_after": expires_after,
"file_ids": file_ids,
"memory_limit": memory_limit,
},
container_create_params.ContainerCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerCreateResponse,
)
def retrieve(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerRetrieveResponse:
"""
Retrieve Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerRetrieveResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ContainerListResponse]:
"""List Containers
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/containers",
page=SyncCursorPage[ContainerListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
container_list_params.ContainerListParams,
),
),
model=ContainerListResponse,
)
def delete(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class AsyncContainers(AsyncAPIResource):
@cached_property
def files(self) -> AsyncFiles:
return AsyncFiles(self._client)
@cached_property
def with_raw_response(self) -> AsyncContainersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContainersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContainersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContainersWithStreamingResponse(self)
async def create(
self,
*,
name: str,
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerCreateResponse:
"""
Create Container
Args:
name: Name of the container to create.
expires_after: Container expiration time in seconds relative to the 'anchor' time.
file_ids: IDs of files to copy to the container.
memory_limit: Optional memory limit for the container. Defaults to "1g".
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/containers",
body=await async_maybe_transform(
{
"name": name,
"expires_after": expires_after,
"file_ids": file_ids,
"memory_limit": memory_limit,
},
container_create_params.ContainerCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerCreateResponse,
)
async def retrieve(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerRetrieveResponse:
"""
Retrieve Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return await self._get(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerRetrieveResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ContainerListResponse, AsyncCursorPage[ContainerListResponse]]:
"""List Containers
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/containers",
page=AsyncCursorPage[ContainerListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
container_list_params.ContainerListParams,
),
),
model=ContainerListResponse,
)
async def delete(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class ContainersWithRawResponse:
def __init__(self, containers: Containers) -> None:
self._containers = containers
self.create = _legacy_response.to_raw_response_wrapper(
containers.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
containers.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
containers.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> FilesWithRawResponse:
return FilesWithRawResponse(self._containers.files)
class AsyncContainersWithRawResponse:
def __init__(self, containers: AsyncContainers) -> None:
self._containers = containers
self.create = _legacy_response.async_to_raw_response_wrapper(
containers.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
containers.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
containers.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> AsyncFilesWithRawResponse:
return AsyncFilesWithRawResponse(self._containers.files)
class ContainersWithStreamingResponse:
def __init__(self, containers: Containers) -> None:
self._containers = containers
self.create = to_streamed_response_wrapper(
containers.create,
)
self.retrieve = to_streamed_response_wrapper(
containers.retrieve,
)
self.list = to_streamed_response_wrapper(
containers.list,
)
self.delete = to_streamed_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> FilesWithStreamingResponse:
return FilesWithStreamingResponse(self._containers.files)
class AsyncContainersWithStreamingResponse:
def __init__(self, containers: AsyncContainers) -> None:
self._containers = containers
self.create = async_to_streamed_response_wrapper(
containers.create,
)
self.retrieve = async_to_streamed_response_wrapper(
containers.retrieve,
)
self.list = async_to_streamed_response_wrapper(
containers.list,
)
self.delete = async_to_streamed_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> AsyncFilesWithStreamingResponse:
return AsyncFilesWithStreamingResponse(self._containers.files)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from .content import (
Content,
AsyncContent,
ContentWithRawResponse,
AsyncContentWithRawResponse,
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
__all__ = [
"Content",
"AsyncContent",
"ContentWithRawResponse",
"AsyncContentWithRawResponse",
"ContentWithStreamingResponse",
"AsyncContentWithStreamingResponse",
"Files",
"AsyncFiles",
"FilesWithRawResponse",
"AsyncFilesWithRawResponse",
"FilesWithStreamingResponse",
"AsyncFilesWithStreamingResponse",
]

View File

@@ -0,0 +1,173 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ...._base_client import make_request_options
__all__ = ["Content", "AsyncContent"]
class Content(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContentWithStreamingResponse(self)
def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class AsyncContent(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContentWithStreamingResponse(self)
async def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class ContentWithRawResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = _legacy_response.to_raw_response_wrapper(
content.retrieve,
)
class AsyncContentWithRawResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
content.retrieve,
)
class ContentWithStreamingResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = to_custom_streamed_response_wrapper(
content.retrieve,
StreamedBinaryAPIResponse,
)
class AsyncContentWithStreamingResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = async_to_custom_streamed_response_wrapper(
content.retrieve,
AsyncStreamedBinaryAPIResponse,
)

View File

@@ -0,0 +1,545 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Mapping, cast
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from .content import (
Content,
AsyncContent,
ContentWithRawResponse,
AsyncContentWithRawResponse,
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, FileTypes, omit, not_given
from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.containers import file_list_params, file_create_params
from ....types.containers.file_list_response import FileListResponse
from ....types.containers.file_create_response import FileCreateResponse
from ....types.containers.file_retrieve_response import FileRetrieveResponse
__all__ = ["Files", "AsyncFiles"]
class Files(SyncAPIResource):
@cached_property
def content(self) -> Content:
return Content(self._client)
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FilesWithStreamingResponse(self)
def create(
self,
container_id: str,
*,
file: FileTypes | Omit = omit,
file_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileCreateResponse:
"""
Create a Container File
You can send either a multipart/form-data request with the raw file content, or
a JSON request with a file ID.
Args:
file: The File object (not file name) to be uploaded.
file_id: Name of the file to create.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
body = deepcopy_minimal(
{
"file": file,
"file_id": file_id,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
f"/containers/{container_id}/files",
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileCreateResponse,
)
def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileRetrieveResponse:
"""
Retrieve Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileRetrieveResponse,
)
def list(
self,
container_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FileListResponse]:
"""List Container files
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get_api_list(
f"/containers/{container_id}/files",
page=SyncCursorPage[FileListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
model=FileListResponse,
)
def delete(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class AsyncFiles(AsyncAPIResource):
@cached_property
def content(self) -> AsyncContent:
return AsyncContent(self._client)
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
async def create(
self,
container_id: str,
*,
file: FileTypes | Omit = omit,
file_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileCreateResponse:
"""
Create a Container File
You can send either a multipart/form-data request with the raw file content, or
a JSON request with a file ID.
Args:
file: The File object (not file name) to be uploaded.
file_id: Name of the file to create.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
body = deepcopy_minimal(
{
"file": file,
"file_id": file_id,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
f"/containers/{container_id}/files",
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileCreateResponse,
)
async def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileRetrieveResponse:
"""
Retrieve Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileRetrieveResponse,
)
def list(
self,
container_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FileListResponse, AsyncCursorPage[FileListResponse]]:
"""List Container files
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get_api_list(
f"/containers/{container_id}/files",
page=AsyncCursorPage[FileListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
model=FileListResponse,
)
async def delete(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class FilesWithRawResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = _legacy_response.to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> ContentWithRawResponse:
return ContentWithRawResponse(self._files.content)
class AsyncFilesWithRawResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = _legacy_response.async_to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> AsyncContentWithRawResponse:
return AsyncContentWithRawResponse(self._files.content)
class FilesWithStreamingResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = to_streamed_response_wrapper(
files.create,
)
self.retrieve = to_streamed_response_wrapper(
files.retrieve,
)
self.list = to_streamed_response_wrapper(
files.list,
)
self.delete = to_streamed_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> ContentWithStreamingResponse:
return ContentWithStreamingResponse(self._files.content)
class AsyncFilesWithStreamingResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = async_to_streamed_response_wrapper(
files.create,
)
self.retrieve = async_to_streamed_response_wrapper(
files.retrieve,
)
self.list = async_to_streamed_response_wrapper(
files.list,
)
self.delete = async_to_streamed_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> AsyncContentWithStreamingResponse:
return AsyncContentWithStreamingResponse(self._files.content)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .items import (
Items,
AsyncItems,
ItemsWithRawResponse,
AsyncItemsWithRawResponse,
ItemsWithStreamingResponse,
AsyncItemsWithStreamingResponse,
)
from .conversations import (
Conversations,
AsyncConversations,
ConversationsWithRawResponse,
AsyncConversationsWithRawResponse,
ConversationsWithStreamingResponse,
AsyncConversationsWithStreamingResponse,
)
__all__ = [
"Items",
"AsyncItems",
"ItemsWithRawResponse",
"AsyncItemsWithRawResponse",
"ItemsWithStreamingResponse",
"AsyncItemsWithStreamingResponse",
"Conversations",
"AsyncConversations",
"ConversationsWithRawResponse",
"AsyncConversationsWithRawResponse",
"ConversationsWithStreamingResponse",
"AsyncConversationsWithStreamingResponse",
]

View File

@@ -0,0 +1,486 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable, Optional
import httpx
from ... import _legacy_response
from .items import (
Items,
AsyncItems,
ItemsWithRawResponse,
AsyncItemsWithRawResponse,
ItemsWithStreamingResponse,
AsyncItemsWithStreamingResponse,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.conversations import conversation_create_params, conversation_update_params
from ...types.shared_params.metadata import Metadata
from ...types.conversations.conversation import Conversation
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.conversations.conversation_deleted_resource import ConversationDeletedResource
__all__ = ["Conversations", "AsyncConversations"]
class Conversations(SyncAPIResource):
@cached_property
def items(self) -> Items:
return Items(self._client)
@cached_property
def with_raw_response(self) -> ConversationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ConversationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ConversationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ConversationsWithStreamingResponse(self)
def create(
self,
*,
items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Create a conversation.
Args:
items: Initial items to include in the conversation context. You may add up to 20 items
at a time.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/conversations",
body=maybe_transform(
{
"items": items,
"metadata": metadata,
},
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def retrieve(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Get a conversation
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def update(
self,
conversation_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Update a conversation
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
f"/conversations/{conversation_id}",
body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def delete(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationDeletedResource:
"""Delete a conversation.
Items in the conversation will not be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._delete(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ConversationDeletedResource,
)
class AsyncConversations(AsyncAPIResource):
@cached_property
def items(self) -> AsyncItems:
return AsyncItems(self._client)
@cached_property
def with_raw_response(self) -> AsyncConversationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncConversationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncConversationsWithStreamingResponse(self)
async def create(
self,
*,
items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Create a conversation.
Args:
items: Initial items to include in the conversation context. You may add up to 20 items
at a time.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/conversations",
body=await async_maybe_transform(
{
"items": items,
"metadata": metadata,
},
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def retrieve(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Get a conversation
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._get(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def update(
self,
conversation_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Update a conversation
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
f"/conversations/{conversation_id}",
body=await async_maybe_transform(
{"metadata": metadata}, conversation_update_params.ConversationUpdateParams
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def delete(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationDeletedResource:
"""Delete a conversation.
Items in the conversation will not be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._delete(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ConversationDeletedResource,
)
class ConversationsWithRawResponse:
def __init__(self, conversations: Conversations) -> None:
self._conversations = conversations
self.create = _legacy_response.to_raw_response_wrapper(
conversations.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
conversations.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
conversations.update,
)
self.delete = _legacy_response.to_raw_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> ItemsWithRawResponse:
return ItemsWithRawResponse(self._conversations.items)
class AsyncConversationsWithRawResponse:
def __init__(self, conversations: AsyncConversations) -> None:
self._conversations = conversations
self.create = _legacy_response.async_to_raw_response_wrapper(
conversations.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
conversations.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
conversations.update,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> AsyncItemsWithRawResponse:
return AsyncItemsWithRawResponse(self._conversations.items)
class ConversationsWithStreamingResponse:
def __init__(self, conversations: Conversations) -> None:
self._conversations = conversations
self.create = to_streamed_response_wrapper(
conversations.create,
)
self.retrieve = to_streamed_response_wrapper(
conversations.retrieve,
)
self.update = to_streamed_response_wrapper(
conversations.update,
)
self.delete = to_streamed_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> ItemsWithStreamingResponse:
return ItemsWithStreamingResponse(self._conversations.items)
class AsyncConversationsWithStreamingResponse:
def __init__(self, conversations: AsyncConversations) -> None:
self._conversations = conversations
self.create = async_to_streamed_response_wrapper(
conversations.create,
)
self.retrieve = async_to_streamed_response_wrapper(
conversations.retrieve,
)
self.update = async_to_streamed_response_wrapper(
conversations.update,
)
self.delete = async_to_streamed_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> AsyncItemsWithStreamingResponse:
return AsyncItemsWithStreamingResponse(self._conversations.items)

View File

@@ -0,0 +1,557 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Any, List, Iterable, cast
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.conversations import item_list_params, item_create_params, item_retrieve_params
from ...types.conversations.conversation import Conversation
from ...types.responses.response_includable import ResponseIncludable
from ...types.conversations.conversation_item import ConversationItem
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.conversations.conversation_item_list import ConversationItemList
__all__ = ["Items", "AsyncItems"]
class Items(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ItemsWithStreamingResponse(self)
def create(
self,
conversation_id: str,
*,
items: Iterable[ResponseInputItemParam],
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItemList:
"""
Create items in a conversation with the given ID.
Args:
items: The items to add to the conversation. You may add up to 20 items at a time.
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
f"/conversations/{conversation_id}/items",
body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_create_params.ItemCreateParams),
),
cast_to=ConversationItemList,
)
def retrieve(
self,
item_id: str,
*,
conversation_id: str,
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItem:
"""
Get a single item from a conversation with the given IDs.
Args:
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return cast(
ConversationItem,
self._get(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
)
def list(
self,
conversation_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[ConversationItem]:
"""
List all items for a conversation with the given ID.
Args:
after: An item ID to list items after, used in pagination.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
f"/conversations/{conversation_id}/items",
page=SyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
item_list_params.ItemListParams,
),
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
def delete(
self,
item_id: str,
*,
conversation_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Delete an item from a conversation with the given IDs.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return self._delete(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
class AsyncItems(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncItemsWithStreamingResponse(self)
async def create(
self,
conversation_id: str,
*,
items: Iterable[ResponseInputItemParam],
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItemList:
"""
Create items in a conversation with the given ID.
Args:
items: The items to add to the conversation. You may add up to 20 items at a time.
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
f"/conversations/{conversation_id}/items",
body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams),
),
cast_to=ConversationItemList,
)
async def retrieve(
self,
item_id: str,
*,
conversation_id: str,
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItem:
"""
Get a single item from a conversation with the given IDs.
Args:
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return cast(
ConversationItem,
await self._get(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
)
def list(
self,
conversation_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]:
"""
List all items for a conversation with the given ID.
Args:
after: An item ID to list items after, used in pagination.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
f"/conversations/{conversation_id}/items",
page=AsyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
item_list_params.ItemListParams,
),
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
async def delete(
self,
item_id: str,
*,
conversation_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Delete an item from a conversation with the given IDs.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return await self._delete(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
class ItemsWithRawResponse:
def __init__(self, items: Items) -> None:
self._items = items
self.create = _legacy_response.to_raw_response_wrapper(
items.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
items.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
items.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
items.delete,
)
class AsyncItemsWithRawResponse:
def __init__(self, items: AsyncItems) -> None:
self._items = items
self.create = _legacy_response.async_to_raw_response_wrapper(
items.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
items.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
items.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
items.delete,
)
class ItemsWithStreamingResponse:
def __init__(self, items: Items) -> None:
self._items = items
self.create = to_streamed_response_wrapper(
items.create,
)
self.retrieve = to_streamed_response_wrapper(
items.retrieve,
)
self.list = to_streamed_response_wrapper(
items.list,
)
self.delete = to_streamed_response_wrapper(
items.delete,
)
class AsyncItemsWithStreamingResponse:
def __init__(self, items: AsyncItems) -> None:
self._items = items
self.create = async_to_streamed_response_wrapper(
items.create,
)
self.retrieve = async_to_streamed_response_wrapper(
items.retrieve,
)
self.list = async_to_streamed_response_wrapper(
items.list,
)
self.delete = async_to_streamed_response_wrapper(
items.delete,
)

View File

@@ -0,0 +1,298 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import array
import base64
from typing import Union, Iterable, cast
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import embedding_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from .._utils import is_given, maybe_transform
from .._compat import cached_property
from .._extras import numpy as np, has_numpy
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .._base_client import make_request_options
from ..types.embedding_model import EmbeddingModel
from ..types.create_embedding_response import CreateEmbeddingResponse
__all__ = ["Embeddings", "AsyncEmbeddings"]
class Embeddings(SyncAPIResource):
@cached_property
def with_raw_response(self) -> EmbeddingsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return EmbeddingsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> EmbeddingsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return EmbeddingsWithStreamingResponse(self)
def create(
self,
*,
input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]],
model: Union[str, EmbeddingModel],
dimensions: int | Omit = omit,
encoding_format: Literal["float", "base64"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> CreateEmbeddingResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model (8192 tokens for
all embedding models), cannot be an empty string, and any array must be 2048
dimensions or less.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens. In addition to the per-input token limit, all embedding
models enforce a maximum of 300,000 tokens summed across all inputs in a single
request.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
encoding_format: The format to return the embeddings in. Can be either `float` or
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
params = {
"input": input,
"model": model,
"user": user,
"dimensions": dimensions,
"encoding_format": encoding_format,
}
if not is_given(encoding_format):
params["encoding_format"] = "base64"
def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
if is_given(encoding_format):
# don't modify the response object if a user explicitly asked for a format
return obj
if not obj.data:
raise ValueError("No embedding data received")
for embedding in obj.data:
data = cast(object, embedding.embedding)
if not isinstance(data, str):
continue
if not has_numpy():
# use array for base64 optimisation
embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
else:
embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call]
base64.b64decode(data), dtype="float32"
).tolist()
return obj
return self._post(
"/embeddings",
body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
cast_to=CreateEmbeddingResponse,
)
class AsyncEmbeddings(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncEmbeddingsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncEmbeddingsWithStreamingResponse(self)
async def create(
self,
*,
input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]],
model: Union[str, EmbeddingModel],
dimensions: int | Omit = omit,
encoding_format: Literal["float", "base64"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> CreateEmbeddingResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model (8192 tokens for
all embedding models), cannot be an empty string, and any array must be 2048
dimensions or less.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens. In addition to the per-input token limit, all embedding
models enforce a maximum of 300,000 tokens summed across all inputs in a single
request.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
encoding_format: The format to return the embeddings in. Can be either `float` or
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
params = {
"input": input,
"model": model,
"user": user,
"dimensions": dimensions,
"encoding_format": encoding_format,
}
if not is_given(encoding_format):
params["encoding_format"] = "base64"
def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
if is_given(encoding_format):
# don't modify the response object if a user explicitly asked for a format
return obj
if not obj.data:
raise ValueError("No embedding data received")
for embedding in obj.data:
data = cast(object, embedding.embedding)
if not isinstance(data, str):
continue
if not has_numpy():
# use array for base64 optimisation
embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
else:
embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call]
base64.b64decode(data), dtype="float32"
).tolist()
return obj
return await self._post(
"/embeddings",
body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
cast_to=CreateEmbeddingResponse,
)
class EmbeddingsWithRawResponse:
def __init__(self, embeddings: Embeddings) -> None:
self._embeddings = embeddings
self.create = _legacy_response.to_raw_response_wrapper(
embeddings.create,
)
class AsyncEmbeddingsWithRawResponse:
def __init__(self, embeddings: AsyncEmbeddings) -> None:
self._embeddings = embeddings
self.create = _legacy_response.async_to_raw_response_wrapper(
embeddings.create,
)
class EmbeddingsWithStreamingResponse:
def __init__(self, embeddings: Embeddings) -> None:
self._embeddings = embeddings
self.create = to_streamed_response_wrapper(
embeddings.create,
)
class AsyncEmbeddingsWithStreamingResponse:
def __init__(self, embeddings: AsyncEmbeddings) -> None:
self._embeddings = embeddings
self.create = async_to_streamed_response_wrapper(
embeddings.create,
)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .evals import (
Evals,
AsyncEvals,
EvalsWithRawResponse,
AsyncEvalsWithRawResponse,
EvalsWithStreamingResponse,
AsyncEvalsWithStreamingResponse,
)
__all__ = [
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
"Evals",
"AsyncEvals",
"EvalsWithRawResponse",
"AsyncEvalsWithRawResponse",
"EvalsWithStreamingResponse",
"AsyncEvalsWithStreamingResponse",
]

View File

@@ -0,0 +1,662 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ...types import eval_list_params, eval_create_params, eval_update_params
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from .runs.runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.eval_list_response import EvalListResponse
from ...types.eval_create_response import EvalCreateResponse
from ...types.eval_delete_response import EvalDeleteResponse
from ...types.eval_update_response import EvalUpdateResponse
from ...types.eval_retrieve_response import EvalRetrieveResponse
from ...types.shared_params.metadata import Metadata
__all__ = ["Evals", "AsyncEvals"]
class Evals(SyncAPIResource):
@cached_property
def runs(self) -> Runs:
return Runs(self._client)
@cached_property
def with_raw_response(self) -> EvalsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return EvalsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> EvalsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return EvalsWithStreamingResponse(self)
def create(
self,
*,
data_source_config: eval_create_params.DataSourceConfig,
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalCreateResponse:
"""
Create the structure of an evaluation that can be used to test a model's
performance. An evaluation is a set of testing criteria and the config for a
data source, which dictates the schema of the data used in the evaluation. After
creating an evaluation, you can run it on different models and model parameters.
We support several types of graders and datasources. For more information, see
the [Evals guide](https://platform.openai.com/docs/guides/evals).
Args:
data_source_config: The configuration for the data source used for the evaluation runs. Dictates the
schema of the data used in the evaluation.
testing_criteria: A list of graders for all eval runs in this group. Graders can reference
variables in the data source using double curly braces notation, like
`{{item.variable_name}}`. To reference the model's output, use the `sample`
namespace (ie, `{{sample.output_text}}`).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/evals",
body=maybe_transform(
{
"data_source_config": data_source_config,
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
},
eval_create_params.EvalCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalCreateResponse,
)
def retrieve(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalRetrieveResponse:
"""
Get an evaluation by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalRetrieveResponse,
)
def update(
self,
eval_id: str,
*,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalUpdateResponse:
"""
Update certain properties of an evaluation.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: Rename the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._post(
f"/evals/{eval_id}",
body=maybe_transform(
{
"metadata": metadata,
"name": name,
},
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalUpdateResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
order_by: Literal["created_at", "updated_at"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[EvalListResponse]:
"""
List evaluations for a project.
Args:
after: Identifier for the last eval from the previous pagination request.
limit: Number of evals to retrieve.
order: Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
descending order.
order_by: Evals can be ordered by creation time or last updated time. Use `created_at` for
creation time or `updated_at` for last updated time.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/evals",
page=SyncCursorPage[EvalListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"order_by": order_by,
},
eval_list_params.EvalListParams,
),
),
model=EvalListResponse,
)
def delete(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalDeleteResponse:
"""
Delete an evaluation.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._delete(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalDeleteResponse,
)
class AsyncEvals(AsyncAPIResource):
@cached_property
def runs(self) -> AsyncRuns:
return AsyncRuns(self._client)
@cached_property
def with_raw_response(self) -> AsyncEvalsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncEvalsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncEvalsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncEvalsWithStreamingResponse(self)
async def create(
self,
*,
data_source_config: eval_create_params.DataSourceConfig,
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalCreateResponse:
"""
Create the structure of an evaluation that can be used to test a model's
performance. An evaluation is a set of testing criteria and the config for a
data source, which dictates the schema of the data used in the evaluation. After
creating an evaluation, you can run it on different models and model parameters.
We support several types of graders and datasources. For more information, see
the [Evals guide](https://platform.openai.com/docs/guides/evals).
Args:
data_source_config: The configuration for the data source used for the evaluation runs. Dictates the
schema of the data used in the evaluation.
testing_criteria: A list of graders for all eval runs in this group. Graders can reference
variables in the data source using double curly braces notation, like
`{{item.variable_name}}`. To reference the model's output, use the `sample`
namespace (ie, `{{sample.output_text}}`).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/evals",
body=await async_maybe_transform(
{
"data_source_config": data_source_config,
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
},
eval_create_params.EvalCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalCreateResponse,
)
async def retrieve(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalRetrieveResponse:
"""
Get an evaluation by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._get(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalRetrieveResponse,
)
async def update(
self,
eval_id: str,
*,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalUpdateResponse:
"""
Update certain properties of an evaluation.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: Rename the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
f"/evals/{eval_id}",
body=await async_maybe_transform(
{
"metadata": metadata,
"name": name,
},
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalUpdateResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
order_by: Literal["created_at", "updated_at"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[EvalListResponse, AsyncCursorPage[EvalListResponse]]:
"""
List evaluations for a project.
Args:
after: Identifier for the last eval from the previous pagination request.
limit: Number of evals to retrieve.
order: Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
descending order.
order_by: Evals can be ordered by creation time or last updated time. Use `created_at` for
creation time or `updated_at` for last updated time.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/evals",
page=AsyncCursorPage[EvalListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"order_by": order_by,
},
eval_list_params.EvalListParams,
),
),
model=EvalListResponse,
)
async def delete(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalDeleteResponse:
"""
Delete an evaluation.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._delete(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalDeleteResponse,
)
class EvalsWithRawResponse:
def __init__(self, evals: Evals) -> None:
self._evals = evals
self.create = _legacy_response.to_raw_response_wrapper(
evals.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
evals.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
evals.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
evals.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> RunsWithRawResponse:
return RunsWithRawResponse(self._evals.runs)
class AsyncEvalsWithRawResponse:
def __init__(self, evals: AsyncEvals) -> None:
self._evals = evals
self.create = _legacy_response.async_to_raw_response_wrapper(
evals.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
evals.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
evals.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
evals.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
return AsyncRunsWithRawResponse(self._evals.runs)
class EvalsWithStreamingResponse:
def __init__(self, evals: Evals) -> None:
self._evals = evals
self.create = to_streamed_response_wrapper(
evals.create,
)
self.retrieve = to_streamed_response_wrapper(
evals.retrieve,
)
self.update = to_streamed_response_wrapper(
evals.update,
)
self.list = to_streamed_response_wrapper(
evals.list,
)
self.delete = to_streamed_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> RunsWithStreamingResponse:
return RunsWithStreamingResponse(self._evals.runs)
class AsyncEvalsWithStreamingResponse:
def __init__(self, evals: AsyncEvals) -> None:
self._evals = evals
self.create = async_to_streamed_response_wrapper(
evals.create,
)
self.retrieve = async_to_streamed_response_wrapper(
evals.retrieve,
)
self.update = async_to_streamed_response_wrapper(
evals.update,
)
self.list = async_to_streamed_response_wrapper(
evals.list,
)
self.delete = async_to_streamed_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> AsyncRunsWithStreamingResponse:
return AsyncRunsWithStreamingResponse(self._evals.runs)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .output_items import (
OutputItems,
AsyncOutputItems,
OutputItemsWithRawResponse,
AsyncOutputItemsWithRawResponse,
OutputItemsWithStreamingResponse,
AsyncOutputItemsWithStreamingResponse,
)
__all__ = [
"OutputItems",
"AsyncOutputItems",
"OutputItemsWithRawResponse",
"AsyncOutputItemsWithRawResponse",
"OutputItemsWithStreamingResponse",
"AsyncOutputItemsWithStreamingResponse",
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
]

View File

@@ -0,0 +1,315 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.evals.runs import output_item_list_params
from ....types.evals.runs.output_item_list_response import OutputItemListResponse
from ....types.evals.runs.output_item_retrieve_response import OutputItemRetrieveResponse
__all__ = ["OutputItems", "AsyncOutputItems"]
class OutputItems(SyncAPIResource):
@cached_property
def with_raw_response(self) -> OutputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return OutputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> OutputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return OutputItemsWithStreamingResponse(self)
def retrieve(
self,
output_item_id: str,
*,
eval_id: str,
run_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OutputItemRetrieveResponse:
"""
Get an evaluation run output item by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return self._get(
f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=OutputItemRetrieveResponse,
)
def list(
self,
run_id: str,
*,
eval_id: str,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
status: Literal["fail", "pass"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[OutputItemListResponse]:
"""
Get a list of output items for an evaluation run.
Args:
after: Identifier for the last output item from the previous pagination request.
limit: Number of output items to retrieve.
order: Sort order for output items by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
status: Filter output items by status. Use `failed` to filter by failed output items or
`pass` to filter by passed output items.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs/{run_id}/output_items",
page=SyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
output_item_list_params.OutputItemListParams,
),
),
model=OutputItemListResponse,
)
class AsyncOutputItems(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncOutputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncOutputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncOutputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncOutputItemsWithStreamingResponse(self)
async def retrieve(
self,
output_item_id: str,
*,
eval_id: str,
run_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OutputItemRetrieveResponse:
"""
Get an evaluation run output item by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return await self._get(
f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=OutputItemRetrieveResponse,
)
def list(
self,
run_id: str,
*,
eval_id: str,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
status: Literal["fail", "pass"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[OutputItemListResponse, AsyncCursorPage[OutputItemListResponse]]:
"""
Get a list of output items for an evaluation run.
Args:
after: Identifier for the last output item from the previous pagination request.
limit: Number of output items to retrieve.
order: Sort order for output items by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
status: Filter output items by status. Use `failed` to filter by failed output items or
`pass` to filter by passed output items.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs/{run_id}/output_items",
page=AsyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
output_item_list_params.OutputItemListParams,
),
),
model=OutputItemListResponse,
)
class OutputItemsWithRawResponse:
def __init__(self, output_items: OutputItems) -> None:
self._output_items = output_items
self.retrieve = _legacy_response.to_raw_response_wrapper(
output_items.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
output_items.list,
)
class AsyncOutputItemsWithRawResponse:
def __init__(self, output_items: AsyncOutputItems) -> None:
self._output_items = output_items
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
output_items.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
output_items.list,
)
class OutputItemsWithStreamingResponse:
def __init__(self, output_items: OutputItems) -> None:
self._output_items = output_items
self.retrieve = to_streamed_response_wrapper(
output_items.retrieve,
)
self.list = to_streamed_response_wrapper(
output_items.list,
)
class AsyncOutputItemsWithStreamingResponse:
def __init__(self, output_items: AsyncOutputItems) -> None:
self._output_items = output_items
self.retrieve = async_to_streamed_response_wrapper(
output_items.retrieve,
)
self.list = async_to_streamed_response_wrapper(
output_items.list,
)

View File

@@ -0,0 +1,634 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .output_items import (
OutputItems,
AsyncOutputItems,
OutputItemsWithRawResponse,
AsyncOutputItemsWithRawResponse,
OutputItemsWithStreamingResponse,
AsyncOutputItemsWithStreamingResponse,
)
from ....pagination import SyncCursorPage, AsyncCursorPage
from ....types.evals import run_list_params, run_create_params
from ...._base_client import AsyncPaginator, make_request_options
from ....types.shared_params.metadata import Metadata
from ....types.evals.run_list_response import RunListResponse
from ....types.evals.run_cancel_response import RunCancelResponse
from ....types.evals.run_create_response import RunCreateResponse
from ....types.evals.run_delete_response import RunDeleteResponse
from ....types.evals.run_retrieve_response import RunRetrieveResponse
__all__ = ["Runs", "AsyncRuns"]
class Runs(SyncAPIResource):
@cached_property
def output_items(self) -> OutputItems:
return OutputItems(self._client)
@cached_property
def with_raw_response(self) -> RunsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return RunsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> RunsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return RunsWithStreamingResponse(self)
def create(
self,
eval_id: str,
*,
data_source: run_create_params.DataSource,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunCreateResponse:
"""
Kicks off a new run for a given evaluation, specifying the data source, and what
model configuration to use to test. The datasource will be validated against the
schema specified in the config of the evaluation.
Args:
data_source: Details about the run's data source.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the run.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._post(
f"/evals/{eval_id}/runs",
body=maybe_transform(
{
"data_source": data_source,
"metadata": metadata,
"name": name,
},
run_create_params.RunCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCreateResponse,
)
def retrieve(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunRetrieveResponse:
"""
Get an evaluation run by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunRetrieveResponse,
)
def list(
self,
eval_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[RunListResponse]:
"""
Get a list of runs for an evaluation.
Args:
after: Identifier for the last run from the previous pagination request.
limit: Number of runs to retrieve.
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
| `canceled`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs",
page=SyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
run_list_params.RunListParams,
),
),
model=RunListResponse,
)
def delete(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunDeleteResponse:
"""
Delete an eval run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._delete(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunDeleteResponse,
)
def cancel(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunCancelResponse:
"""
Cancel an ongoing evaluation run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._post(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCancelResponse,
)
class AsyncRuns(AsyncAPIResource):
@cached_property
def output_items(self) -> AsyncOutputItems:
return AsyncOutputItems(self._client)
@cached_property
def with_raw_response(self) -> AsyncRunsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncRunsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncRunsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncRunsWithStreamingResponse(self)
async def create(
self,
eval_id: str,
*,
data_source: run_create_params.DataSource,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunCreateResponse:
"""
Kicks off a new run for a given evaluation, specifying the data source, and what
model configuration to use to test. The datasource will be validated against the
schema specified in the config of the evaluation.
Args:
data_source: Details about the run's data source.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the run.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
f"/evals/{eval_id}/runs",
body=await async_maybe_transform(
{
"data_source": data_source,
"metadata": metadata,
"name": name,
},
run_create_params.RunCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCreateResponse,
)
async def retrieve(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunRetrieveResponse:
"""
Get an evaluation run by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunRetrieveResponse,
)
def list(
self,
eval_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[RunListResponse, AsyncCursorPage[RunListResponse]]:
"""
Get a list of runs for an evaluation.
Args:
after: Identifier for the last run from the previous pagination request.
limit: Number of runs to retrieve.
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
| `canceled`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs",
page=AsyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
run_list_params.RunListParams,
),
),
model=RunListResponse,
)
async def delete(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunDeleteResponse:
"""
Delete an eval run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._delete(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunDeleteResponse,
)
async def cancel(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunCancelResponse:
"""
Cancel an ongoing evaluation run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._post(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCancelResponse,
)
class RunsWithRawResponse:
def __init__(self, runs: Runs) -> None:
self._runs = runs
self.create = _legacy_response.to_raw_response_wrapper(
runs.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
runs.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
runs.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
runs.delete,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> OutputItemsWithRawResponse:
return OutputItemsWithRawResponse(self._runs.output_items)
class AsyncRunsWithRawResponse:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
self.create = _legacy_response.async_to_raw_response_wrapper(
runs.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
runs.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
runs.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
runs.delete,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> AsyncOutputItemsWithRawResponse:
return AsyncOutputItemsWithRawResponse(self._runs.output_items)
class RunsWithStreamingResponse:
def __init__(self, runs: Runs) -> None:
self._runs = runs
self.create = to_streamed_response_wrapper(
runs.create,
)
self.retrieve = to_streamed_response_wrapper(
runs.retrieve,
)
self.list = to_streamed_response_wrapper(
runs.list,
)
self.delete = to_streamed_response_wrapper(
runs.delete,
)
self.cancel = to_streamed_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> OutputItemsWithStreamingResponse:
return OutputItemsWithStreamingResponse(self._runs.output_items)
class AsyncRunsWithStreamingResponse:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
self.create = async_to_streamed_response_wrapper(
runs.create,
)
self.retrieve = async_to_streamed_response_wrapper(
runs.retrieve,
)
self.list = async_to_streamed_response_wrapper(
runs.list,
)
self.delete = async_to_streamed_response_wrapper(
runs.delete,
)
self.cancel = async_to_streamed_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> AsyncOutputItemsWithStreamingResponse:
return AsyncOutputItemsWithStreamingResponse(self._runs.output_items)

View File

@@ -0,0 +1,770 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import time
import typing_extensions
from typing import Mapping, cast
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import FilePurpose, file_list_params, file_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_streamed_response_wrapper,
async_to_streamed_response_wrapper,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..pagination import SyncCursorPage, AsyncCursorPage
from .._base_client import AsyncPaginator, make_request_options
from ..types.file_object import FileObject
from ..types.file_deleted import FileDeleted
from ..types.file_purpose import FilePurpose
__all__ = ["Files", "AsyncFiles"]
class Files(SyncAPIResource):
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FilesWithStreamingResponse(self)
def create(
self,
*,
file: FileTypes,
purpose: FilePurpose,
expires_after: file_create_params.ExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""Upload a file that can be used across various endpoints.
Individual files can be
up to 512 MB, and the size of all files uploaded by one organization can be up
to 1 TB.
- The Assistants API supports files up to 2 million tokens and of specific file
types. See the
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools)
for details.
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input)
or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input
also has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Args:
file: The File object (not file name) to be uploaded.
purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
Flexible file type for any purpose - `evals`: Used for eval data sets
expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
after 30 days and all other files are persisted until they are manually deleted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/files",
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def retrieve(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""
Returns information about a specific file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
purpose: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FileObject]:
"""Returns a list of files.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
10,000, and the default is 10,000.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/files",
page=SyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"purpose": purpose,
},
file_list_params.FileListParams,
),
),
model=FileObject,
)
def delete(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileDeleted:
"""
Delete a file and remove it from all vector stores.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileDeleted,
)
def content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@typing_extensions.deprecated("The `.content()` method should be used instead")
def retrieve_content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=str,
)
def wait_for_processing(
self,
id: str,
*,
poll_interval: float = 5.0,
max_wait_seconds: float = 30 * 60,
) -> FileObject:
"""Waits for the given file to be processed, default timeout is 30 mins."""
TERMINAL_STATES = {"processed", "error", "deleted"}
start = time.time()
file = self.retrieve(id)
while file.status not in TERMINAL_STATES:
self._sleep(poll_interval)
file = self.retrieve(id)
if time.time() - start > max_wait_seconds:
raise RuntimeError(
f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
)
return file
class AsyncFiles(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
async def create(
self,
*,
file: FileTypes,
purpose: FilePurpose,
expires_after: file_create_params.ExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""Upload a file that can be used across various endpoints.
Individual files can be
up to 512 MB, and the size of all files uploaded by one organization can be up
to 1 TB.
- The Assistants API supports files up to 2 million tokens and of specific file
types. See the
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools)
for details.
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input)
or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input
also has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Args:
file: The File object (not file name) to be uploaded.
purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
Flexible file type for any purpose - `evals`: Used for eval data sets
expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
after 30 days and all other files are persisted until they are manually deleted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/files",
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
async def retrieve(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""
Returns information about a specific file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
purpose: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]:
"""Returns a list of files.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
10,000, and the default is 10,000.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/files",
page=AsyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"purpose": purpose,
},
file_list_params.FileListParams,
),
),
model=FileObject,
)
async def delete(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileDeleted:
"""
Delete a file and remove it from all vector stores.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileDeleted,
)
async def content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@typing_extensions.deprecated("The `.content()` method should be used instead")
async def retrieve_content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=str,
)
async def wait_for_processing(
self,
id: str,
*,
poll_interval: float = 5.0,
max_wait_seconds: float = 30 * 60,
) -> FileObject:
"""Waits for the given file to be processed, default timeout is 30 mins."""
TERMINAL_STATES = {"processed", "error", "deleted"}
start = time.time()
file = await self.retrieve(id)
while file.status not in TERMINAL_STATES:
await self._sleep(poll_interval)
file = await self.retrieve(id)
if time.time() - start > max_wait_seconds:
raise RuntimeError(
f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
)
return file
class FilesWithRawResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = _legacy_response.to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
self.content = _legacy_response.to_raw_response_wrapper(
files.content,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
files.retrieve_content, # pyright: ignore[reportDeprecated],
)
)
class AsyncFilesWithRawResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = _legacy_response.async_to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
self.content = _legacy_response.async_to_raw_response_wrapper(
files.content,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
files.retrieve_content, # pyright: ignore[reportDeprecated],
)
)
class FilesWithStreamingResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = to_streamed_response_wrapper(
files.create,
)
self.retrieve = to_streamed_response_wrapper(
files.retrieve,
)
self.list = to_streamed_response_wrapper(
files.list,
)
self.delete = to_streamed_response_wrapper(
files.delete,
)
self.content = to_custom_streamed_response_wrapper(
files.content,
StreamedBinaryAPIResponse,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
files.retrieve_content, # pyright: ignore[reportDeprecated],
)
)
class AsyncFilesWithStreamingResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = async_to_streamed_response_wrapper(
files.create,
)
self.retrieve = async_to_streamed_response_wrapper(
files.retrieve,
)
self.list = async_to_streamed_response_wrapper(
files.list,
)
self.delete = async_to_streamed_response_wrapper(
files.delete,
)
self.content = async_to_custom_streamed_response_wrapper(
files.content,
AsyncStreamedBinaryAPIResponse,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
files.retrieve_content, # pyright: ignore[reportDeprecated],
)
)

View File

@@ -0,0 +1,61 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .jobs import (
Jobs,
AsyncJobs,
JobsWithRawResponse,
AsyncJobsWithRawResponse,
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
from .alpha import (
Alpha,
AsyncAlpha,
AlphaWithRawResponse,
AsyncAlphaWithRawResponse,
AlphaWithStreamingResponse,
AsyncAlphaWithStreamingResponse,
)
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
from .fine_tuning import (
FineTuning,
AsyncFineTuning,
FineTuningWithRawResponse,
AsyncFineTuningWithRawResponse,
FineTuningWithStreamingResponse,
AsyncFineTuningWithStreamingResponse,
)
__all__ = [
"Jobs",
"AsyncJobs",
"JobsWithRawResponse",
"AsyncJobsWithRawResponse",
"JobsWithStreamingResponse",
"AsyncJobsWithStreamingResponse",
"Checkpoints",
"AsyncCheckpoints",
"CheckpointsWithRawResponse",
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
"Alpha",
"AsyncAlpha",
"AlphaWithRawResponse",
"AsyncAlphaWithRawResponse",
"AlphaWithStreamingResponse",
"AsyncAlphaWithStreamingResponse",
"FineTuning",
"AsyncFineTuning",
"FineTuningWithRawResponse",
"AsyncFineTuningWithRawResponse",
"FineTuningWithStreamingResponse",
"AsyncFineTuningWithStreamingResponse",
]

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .alpha import (
Alpha,
AsyncAlpha,
AlphaWithRawResponse,
AsyncAlphaWithRawResponse,
AlphaWithStreamingResponse,
AsyncAlphaWithStreamingResponse,
)
from .graders import (
Graders,
AsyncGraders,
GradersWithRawResponse,
AsyncGradersWithRawResponse,
GradersWithStreamingResponse,
AsyncGradersWithStreamingResponse,
)
__all__ = [
"Graders",
"AsyncGraders",
"GradersWithRawResponse",
"AsyncGradersWithRawResponse",
"GradersWithStreamingResponse",
"AsyncGradersWithStreamingResponse",
"Alpha",
"AsyncAlpha",
"AlphaWithRawResponse",
"AsyncAlphaWithRawResponse",
"AlphaWithStreamingResponse",
"AsyncAlphaWithStreamingResponse",
]

View File

@@ -0,0 +1,102 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from .graders import (
Graders,
AsyncGraders,
GradersWithRawResponse,
AsyncGradersWithRawResponse,
GradersWithStreamingResponse,
AsyncGradersWithStreamingResponse,
)
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["Alpha", "AsyncAlpha"]
class Alpha(SyncAPIResource):
@cached_property
def graders(self) -> Graders:
return Graders(self._client)
@cached_property
def with_raw_response(self) -> AlphaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AlphaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AlphaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AlphaWithStreamingResponse(self)
class AsyncAlpha(AsyncAPIResource):
@cached_property
def graders(self) -> AsyncGraders:
return AsyncGraders(self._client)
@cached_property
def with_raw_response(self) -> AsyncAlphaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncAlphaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncAlphaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncAlphaWithStreamingResponse(self)
class AlphaWithRawResponse:
def __init__(self, alpha: Alpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> GradersWithRawResponse:
return GradersWithRawResponse(self._alpha.graders)
class AsyncAlphaWithRawResponse:
def __init__(self, alpha: AsyncAlpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> AsyncGradersWithRawResponse:
return AsyncGradersWithRawResponse(self._alpha.graders)
class AlphaWithStreamingResponse:
def __init__(self, alpha: Alpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> GradersWithStreamingResponse:
return GradersWithStreamingResponse(self._alpha.graders)
class AsyncAlphaWithStreamingResponse:
def __init__(self, alpha: AsyncAlpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> AsyncGradersWithStreamingResponse:
return AsyncGradersWithStreamingResponse(self._alpha.graders)

View File

@@ -0,0 +1,282 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.fine_tuning.alpha import grader_run_params, grader_validate_params
from ....types.fine_tuning.alpha.grader_run_response import GraderRunResponse
from ....types.fine_tuning.alpha.grader_validate_response import GraderValidateResponse
__all__ = ["Graders", "AsyncGraders"]
class Graders(SyncAPIResource):
@cached_property
def with_raw_response(self) -> GradersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return GradersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> GradersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return GradersWithStreamingResponse(self)
def run(
self,
*,
grader: grader_run_params.Grader,
model_sample: str,
item: object | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderRunResponse:
"""
Run a grader.
Args:
grader: The grader used for the fine-tuning job.
model_sample: The model sample to be evaluated. This value will be used to populate the
`sample` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
The `output_json` variable will be populated if the model sample is a valid JSON
string.
item: The dataset item provided to the grader. This will be used to populate the
`item` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/alpha/graders/run",
body=maybe_transform(
{
"grader": grader,
"model_sample": model_sample,
"item": item,
},
grader_run_params.GraderRunParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderRunResponse,
)
def validate(
self,
*,
grader: grader_validate_params.Grader,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderValidateResponse:
"""
Validate a grader.
Args:
grader: The grader used for the fine-tuning job.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/alpha/graders/validate",
body=maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderValidateResponse,
)
class AsyncGraders(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncGradersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncGradersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncGradersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncGradersWithStreamingResponse(self)
async def run(
self,
*,
grader: grader_run_params.Grader,
model_sample: str,
item: object | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderRunResponse:
"""
Run a grader.
Args:
grader: The grader used for the fine-tuning job.
model_sample: The model sample to be evaluated. This value will be used to populate the
`sample` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
The `output_json` variable will be populated if the model sample is a valid JSON
string.
item: The dataset item provided to the grader. This will be used to populate the
`item` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/fine_tuning/alpha/graders/run",
body=await async_maybe_transform(
{
"grader": grader,
"model_sample": model_sample,
"item": item,
},
grader_run_params.GraderRunParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderRunResponse,
)
async def validate(
self,
*,
grader: grader_validate_params.Grader,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderValidateResponse:
"""
Validate a grader.
Args:
grader: The grader used for the fine-tuning job.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/fine_tuning/alpha/graders/validate",
body=await async_maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderValidateResponse,
)
class GradersWithRawResponse:
def __init__(self, graders: Graders) -> None:
self._graders = graders
self.run = _legacy_response.to_raw_response_wrapper(
graders.run,
)
self.validate = _legacy_response.to_raw_response_wrapper(
graders.validate,
)
class AsyncGradersWithRawResponse:
def __init__(self, graders: AsyncGraders) -> None:
self._graders = graders
self.run = _legacy_response.async_to_raw_response_wrapper(
graders.run,
)
self.validate = _legacy_response.async_to_raw_response_wrapper(
graders.validate,
)
class GradersWithStreamingResponse:
def __init__(self, graders: Graders) -> None:
self._graders = graders
self.run = to_streamed_response_wrapper(
graders.run,
)
self.validate = to_streamed_response_wrapper(
graders.validate,
)
class AsyncGradersWithStreamingResponse:
def __init__(self, graders: AsyncGraders) -> None:
self._graders = graders
self.run = async_to_streamed_response_wrapper(
graders.run,
)
self.validate = async_to_streamed_response_wrapper(
graders.validate,
)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
from .permissions import (
Permissions,
AsyncPermissions,
PermissionsWithRawResponse,
AsyncPermissionsWithRawResponse,
PermissionsWithStreamingResponse,
AsyncPermissionsWithStreamingResponse,
)
__all__ = [
"Permissions",
"AsyncPermissions",
"PermissionsWithRawResponse",
"AsyncPermissionsWithRawResponse",
"PermissionsWithStreamingResponse",
"AsyncPermissionsWithStreamingResponse",
"Checkpoints",
"AsyncCheckpoints",
"CheckpointsWithRawResponse",
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
]

View File

@@ -0,0 +1,102 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ...._compat import cached_property
from .permissions import (
Permissions,
AsyncPermissions,
PermissionsWithRawResponse,
AsyncPermissionsWithRawResponse,
PermissionsWithStreamingResponse,
AsyncPermissionsWithStreamingResponse,
)
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["Checkpoints", "AsyncCheckpoints"]
class Checkpoints(SyncAPIResource):
@cached_property
def permissions(self) -> Permissions:
return Permissions(self._client)
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CheckpointsWithStreamingResponse(self)
class AsyncCheckpoints(AsyncAPIResource):
@cached_property
def permissions(self) -> AsyncPermissions:
return AsyncPermissions(self._client)
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCheckpointsWithStreamingResponse(self)
class CheckpointsWithRawResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> PermissionsWithRawResponse:
return PermissionsWithRawResponse(self._checkpoints.permissions)
class AsyncCheckpointsWithRawResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> AsyncPermissionsWithRawResponse:
return AsyncPermissionsWithRawResponse(self._checkpoints.permissions)
class CheckpointsWithStreamingResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> PermissionsWithStreamingResponse:
return PermissionsWithStreamingResponse(self._checkpoints.permissions)
class AsyncCheckpointsWithStreamingResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> AsyncPermissionsWithStreamingResponse:
return AsyncPermissionsWithStreamingResponse(self._checkpoints.permissions)

View File

@@ -0,0 +1,418 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncPage, AsyncPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
from ....types.fine_tuning.checkpoints.permission_create_response import PermissionCreateResponse
from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse
from ....types.fine_tuning.checkpoints.permission_retrieve_response import PermissionRetrieveResponse
__all__ = ["Permissions", "AsyncPermissions"]
class Permissions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> PermissionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return PermissionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> PermissionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return PermissionsWithStreamingResponse(self)
def create(
self,
fine_tuned_model_checkpoint: str,
*,
project_ids: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPage[PermissionCreateResponse]:
"""
**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
This enables organization owners to share fine-tuned models with other projects
in their organization.
Args:
project_ids: The project identifiers to grant access to.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
page=SyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=PermissionCreateResponse,
method="post",
)
def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["ascending", "descending"] | Omit = omit,
project_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to view all permissions for a
fine-tuned model checkpoint.
Args:
after: Identifier for the last permission ID from the previous pagination request.
limit: Number of permissions to retrieve.
order: The order in which to retrieve permissions.
project_id: The ID of the project to get permissions for.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"project_id": project_id,
},
permission_retrieve_params.PermissionRetrieveParams,
),
),
cast_to=PermissionRetrieveResponse,
)
def delete(
self,
permission_id: str,
*,
fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionDeleteResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to delete a permission for a
fine-tuned model checkpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return self._delete(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=PermissionDeleteResponse,
)
class AsyncPermissions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncPermissionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncPermissionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncPermissionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncPermissionsWithStreamingResponse(self)
def create(
self,
fine_tuned_model_checkpoint: str,
*,
project_ids: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[PermissionCreateResponse, AsyncPage[PermissionCreateResponse]]:
"""
**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
This enables organization owners to share fine-tuned models with other projects
in their organization.
Args:
project_ids: The project identifiers to grant access to.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
page=AsyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=PermissionCreateResponse,
method="post",
)
async def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["ascending", "descending"] | Omit = omit,
project_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to view all permissions for a
fine-tuned model checkpoint.
Args:
after: Identifier for the last permission ID from the previous pagination request.
limit: Number of permissions to retrieve.
order: The order in which to retrieve permissions.
project_id: The ID of the project to get permissions for.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return await self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"project_id": project_id,
},
permission_retrieve_params.PermissionRetrieveParams,
),
),
cast_to=PermissionRetrieveResponse,
)
async def delete(
self,
permission_id: str,
*,
fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionDeleteResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to delete a permission for a
fine-tuned model checkpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return await self._delete(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=PermissionDeleteResponse,
)
class PermissionsWithRawResponse:
def __init__(self, permissions: Permissions) -> None:
self._permissions = permissions
self.create = _legacy_response.to_raw_response_wrapper(
permissions.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
permissions.retrieve,
)
self.delete = _legacy_response.to_raw_response_wrapper(
permissions.delete,
)
class AsyncPermissionsWithRawResponse:
def __init__(self, permissions: AsyncPermissions) -> None:
self._permissions = permissions
self.create = _legacy_response.async_to_raw_response_wrapper(
permissions.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
permissions.retrieve,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
permissions.delete,
)
class PermissionsWithStreamingResponse:
def __init__(self, permissions: Permissions) -> None:
self._permissions = permissions
self.create = to_streamed_response_wrapper(
permissions.create,
)
self.retrieve = to_streamed_response_wrapper(
permissions.retrieve,
)
self.delete = to_streamed_response_wrapper(
permissions.delete,
)
class AsyncPermissionsWithStreamingResponse:
def __init__(self, permissions: AsyncPermissions) -> None:
self._permissions = permissions
self.create = async_to_streamed_response_wrapper(
permissions.create,
)
self.retrieve = async_to_streamed_response_wrapper(
permissions.retrieve,
)
self.delete = async_to_streamed_response_wrapper(
permissions.delete,
)

View File

@@ -0,0 +1,166 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ..._compat import cached_property
from .jobs.jobs import (
Jobs,
AsyncJobs,
JobsWithRawResponse,
AsyncJobsWithRawResponse,
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
from .alpha.alpha import (
Alpha,
AsyncAlpha,
AlphaWithRawResponse,
AsyncAlphaWithRawResponse,
AlphaWithStreamingResponse,
AsyncAlphaWithStreamingResponse,
)
from .checkpoints.checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
__all__ = ["FineTuning", "AsyncFineTuning"]
class FineTuning(SyncAPIResource):
@cached_property
def jobs(self) -> Jobs:
return Jobs(self._client)
@cached_property
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
@cached_property
def alpha(self) -> Alpha:
return Alpha(self._client)
@cached_property
def with_raw_response(self) -> FineTuningWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FineTuningWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FineTuningWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FineTuningWithStreamingResponse(self)
class AsyncFineTuning(AsyncAPIResource):
@cached_property
def jobs(self) -> AsyncJobs:
return AsyncJobs(self._client)
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
return AsyncCheckpoints(self._client)
@cached_property
def alpha(self) -> AsyncAlpha:
return AsyncAlpha(self._client)
@cached_property
def with_raw_response(self) -> AsyncFineTuningWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFineTuningWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFineTuningWithStreamingResponse(self)
class FineTuningWithRawResponse:
def __init__(self, fine_tuning: FineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> JobsWithRawResponse:
return JobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
return CheckpointsWithRawResponse(self._fine_tuning.checkpoints)
@cached_property
def alpha(self) -> AlphaWithRawResponse:
return AlphaWithRawResponse(self._fine_tuning.alpha)
class AsyncFineTuningWithRawResponse:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> AsyncJobsWithRawResponse:
return AsyncJobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
return AsyncCheckpointsWithRawResponse(self._fine_tuning.checkpoints)
@cached_property
def alpha(self) -> AsyncAlphaWithRawResponse:
return AsyncAlphaWithRawResponse(self._fine_tuning.alpha)
class FineTuningWithStreamingResponse:
def __init__(self, fine_tuning: FineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> JobsWithStreamingResponse:
return JobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
return CheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)
@cached_property
def alpha(self) -> AlphaWithStreamingResponse:
return AlphaWithStreamingResponse(self._fine_tuning.alpha)
class AsyncFineTuningWithStreamingResponse:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> AsyncJobsWithStreamingResponse:
return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)
@cached_property
def alpha(self) -> AsyncAlphaWithStreamingResponse:
return AsyncAlphaWithStreamingResponse(self._fine_tuning.alpha)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .jobs import (
Jobs,
AsyncJobs,
JobsWithRawResponse,
AsyncJobsWithRawResponse,
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
__all__ = [
"Checkpoints",
"AsyncCheckpoints",
"CheckpointsWithRawResponse",
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
"Jobs",
"AsyncJobs",
"JobsWithRawResponse",
"AsyncJobsWithRawResponse",
"JobsWithStreamingResponse",
"AsyncJobsWithStreamingResponse",
]

View File

@@ -0,0 +1,199 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import (
AsyncPaginator,
make_request_options,
)
from ....types.fine_tuning.jobs import checkpoint_list_params
from ....types.fine_tuning.jobs.fine_tuning_job_checkpoint import FineTuningJobCheckpoint
__all__ = ["Checkpoints", "AsyncCheckpoints"]
class Checkpoints(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CheckpointsWithStreamingResponse(self)
def list(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FineTuningJobCheckpoint]:
"""
List checkpoints for a fine-tuning job.
Args:
after: Identifier for the last checkpoint ID from the previous pagination request.
limit: Number of checkpoints to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
page=SyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
checkpoint_list_params.CheckpointListParams,
),
),
model=FineTuningJobCheckpoint,
)
class AsyncCheckpoints(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCheckpointsWithStreamingResponse(self)
def list(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FineTuningJobCheckpoint, AsyncCursorPage[FineTuningJobCheckpoint]]:
"""
List checkpoints for a fine-tuning job.
Args:
after: Identifier for the last checkpoint ID from the previous pagination request.
limit: Number of checkpoints to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
page=AsyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
checkpoint_list_params.CheckpointListParams,
),
),
model=FineTuningJobCheckpoint,
)
class CheckpointsWithRawResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
self.list = _legacy_response.to_raw_response_wrapper(
checkpoints.list,
)
class AsyncCheckpointsWithRawResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
self.list = _legacy_response.async_to_raw_response_wrapper(
checkpoints.list,
)
class CheckpointsWithStreamingResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
self.list = to_streamed_response_wrapper(
checkpoints.list,
)
class AsyncCheckpointsWithStreamingResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
self.list = async_to_streamed_response_wrapper(
checkpoints.list,
)

View File

@@ -0,0 +1,918 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import (
AsyncPaginator,
make_request_options,
)
from ....types.fine_tuning import job_list_params, job_create_params, job_list_events_params
from ....types.shared_params.metadata import Metadata
from ....types.fine_tuning.fine_tuning_job import FineTuningJob
from ....types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent
__all__ = ["Jobs", "AsyncJobs"]
class Jobs(SyncAPIResource):
@cached_property
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
@cached_property
def with_raw_response(self) -> JobsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return JobsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> JobsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return JobsWithStreamingResponse(self)
def create(
self,
*,
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | Omit = omit,
integrations: Optional[Iterable[job_create_params.Integration]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
method: job_create_params.Method | Omit = omit,
seed: Optional[int] | Omit = omit,
suffix: Optional[str] | Omit = omit,
validation_file: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Creates a fine-tuning job which begins the process of creating a new model from
a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
Args:
model: The name of the model to fine-tune. You can select one of the
[supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.
The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format, or if the fine-tuning method uses the
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
format.
See the
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
for more details.
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
in favor of `method`, and should be passed in under the `method` parameter.
integrations: A list of integrations to enable for your fine-tuning job.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
method: The method used for fine-tuning.
seed: The seed controls the reproducibility of the job. Passing in the same seed and
job parameters should produce the same results, but may differ in rare cases. If
a seed is not specified, one will be generated for you.
suffix: A string of up to 64 characters that will be added to your fine-tuned model
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
If you provide this file, the data is used to generate validation metrics
periodically during fine-tuning. These metrics can be viewed in the fine-tuning
results file. The same data should not be present in both train and validation
files.
Your dataset must be formatted as a JSONL file. You must upload your file with
the purpose `fine-tune`.
See the
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/jobs",
body=maybe_transform(
{
"model": model,
"training_file": training_file,
"hyperparameters": hyperparameters,
"integrations": integrations,
"metadata": metadata,
"method": method,
"seed": seed,
"suffix": suffix,
"validation_file": validation_file,
},
job_create_params.JobCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def retrieve(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Get info about a fine-tuning job.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get(
f"/fine_tuning/jobs/{fine_tuning_job_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
metadata: Optional[Dict[str, str]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FineTuningJob]:
"""
List your organization's fine-tuning jobs
Args:
after: Identifier for the last job from the previous pagination request.
limit: Number of fine-tuning jobs to retrieve.
metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
Alternatively, set `metadata=null` to indicate no metadata.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/fine_tuning/jobs",
page=SyncCursorPage[FineTuningJob],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
},
job_list_params.JobListParams,
),
),
model=FineTuningJob,
)
def cancel(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Immediately cancel a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list_events(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FineTuningJobEvent]:
"""
Get status updates for a fine-tuning job.
Args:
after: Identifier for the last event from the previous pagination request.
limit: Number of events to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
page=SyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
job_list_events_params.JobListEventsParams,
),
),
model=FineTuningJobEvent,
)
def pause(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Pause a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def resume(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Resume a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
class AsyncJobs(AsyncAPIResource):
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
return AsyncCheckpoints(self._client)
@cached_property
def with_raw_response(self) -> AsyncJobsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncJobsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncJobsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncJobsWithStreamingResponse(self)
async def create(
self,
*,
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | Omit = omit,
integrations: Optional[Iterable[job_create_params.Integration]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
method: job_create_params.Method | Omit = omit,
seed: Optional[int] | Omit = omit,
suffix: Optional[str] | Omit = omit,
validation_file: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Creates a fine-tuning job which begins the process of creating a new model from
a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
Args:
model: The name of the model to fine-tune. You can select one of the
[supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.
The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format, or if the fine-tuning method uses the
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
format.
See the
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
for more details.
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
in favor of `method`, and should be passed in under the `method` parameter.
integrations: A list of integrations to enable for your fine-tuning job.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
method: The method used for fine-tuning.
seed: The seed controls the reproducibility of the job. Passing in the same seed and
job parameters should produce the same results, but may differ in rare cases. If
a seed is not specified, one will be generated for you.
suffix: A string of up to 64 characters that will be added to your fine-tuned model
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
If you provide this file, the data is used to generate validation metrics
periodically during fine-tuning. These metrics can be viewed in the fine-tuning
results file. The same data should not be present in both train and validation
files.
Your dataset must be formatted as a JSONL file. You must upload your file with
the purpose `fine-tune`.
See the
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/fine_tuning/jobs",
body=await async_maybe_transform(
{
"model": model,
"training_file": training_file,
"hyperparameters": hyperparameters,
"integrations": integrations,
"metadata": metadata,
"method": method,
"seed": seed,
"suffix": suffix,
"validation_file": validation_file,
},
job_create_params.JobCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
async def retrieve(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Get info about a fine-tuning job.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._get(
f"/fine_tuning/jobs/{fine_tuning_job_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
metadata: Optional[Dict[str, str]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]:
"""
List your organization's fine-tuning jobs
Args:
after: Identifier for the last job from the previous pagination request.
limit: Number of fine-tuning jobs to retrieve.
metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
Alternatively, set `metadata=null` to indicate no metadata.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/fine_tuning/jobs",
page=AsyncCursorPage[FineTuningJob],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
},
job_list_params.JobListParams,
),
),
model=FineTuningJob,
)
async def cancel(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Immediately cancel a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list_events(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]:
"""
Get status updates for a fine-tuning job.
Args:
after: Identifier for the last event from the previous pagination request.
limit: Number of events to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
page=AsyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
job_list_events_params.JobListEventsParams,
),
),
model=FineTuningJobEvent,
)
async def pause(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Pause a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
async def resume(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Resume a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
class JobsWithRawResponse:
def __init__(self, jobs: Jobs) -> None:
self._jobs = jobs
self.create = _legacy_response.to_raw_response_wrapper(
jobs.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
jobs.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
jobs.list,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
jobs.cancel,
)
self.list_events = _legacy_response.to_raw_response_wrapper(
jobs.list_events,
)
self.pause = _legacy_response.to_raw_response_wrapper(
jobs.pause,
)
self.resume = _legacy_response.to_raw_response_wrapper(
jobs.resume,
)
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
return CheckpointsWithRawResponse(self._jobs.checkpoints)
class AsyncJobsWithRawResponse:
def __init__(self, jobs: AsyncJobs) -> None:
self._jobs = jobs
self.create = _legacy_response.async_to_raw_response_wrapper(
jobs.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
jobs.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
jobs.list,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
jobs.cancel,
)
self.list_events = _legacy_response.async_to_raw_response_wrapper(
jobs.list_events,
)
self.pause = _legacy_response.async_to_raw_response_wrapper(
jobs.pause,
)
self.resume = _legacy_response.async_to_raw_response_wrapper(
jobs.resume,
)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
return AsyncCheckpointsWithRawResponse(self._jobs.checkpoints)
class JobsWithStreamingResponse:
def __init__(self, jobs: Jobs) -> None:
self._jobs = jobs
self.create = to_streamed_response_wrapper(
jobs.create,
)
self.retrieve = to_streamed_response_wrapper(
jobs.retrieve,
)
self.list = to_streamed_response_wrapper(
jobs.list,
)
self.cancel = to_streamed_response_wrapper(
jobs.cancel,
)
self.list_events = to_streamed_response_wrapper(
jobs.list_events,
)
self.pause = to_streamed_response_wrapper(
jobs.pause,
)
self.resume = to_streamed_response_wrapper(
jobs.resume,
)
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
return CheckpointsWithStreamingResponse(self._jobs.checkpoints)
class AsyncJobsWithStreamingResponse:
def __init__(self, jobs: AsyncJobs) -> None:
self._jobs = jobs
self.create = async_to_streamed_response_wrapper(
jobs.create,
)
self.retrieve = async_to_streamed_response_wrapper(
jobs.retrieve,
)
self.list = async_to_streamed_response_wrapper(
jobs.list,
)
self.cancel = async_to_streamed_response_wrapper(
jobs.cancel,
)
self.list_events = async_to_streamed_response_wrapper(
jobs.list_events,
)
self.pause = async_to_streamed_response_wrapper(
jobs.pause,
)
self.resume = async_to_streamed_response_wrapper(
jobs.resume,
)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,306 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .. import _legacy_response
from .._types import Body, Query, Headers, NotGiven, not_given
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncPage, AsyncPage
from ..types.model import Model
from .._base_client import (
AsyncPaginator,
make_request_options,
)
from ..types.model_deleted import ModelDeleted
__all__ = ["Models", "AsyncModels"]
class Models(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ModelsWithStreamingResponse(self)
def retrieve(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Model:
"""
Retrieves a model instance, providing basic information about the model such as
the owner and permissioning.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._get(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Model,
)
def list(
self,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPage[Model]:
"""
Lists the currently available models, and provides basic information about each
one such as the owner and availability.
"""
return self._get_api_list(
"/models",
page=SyncPage[Model],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=Model,
)
def delete(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModelDeleted:
"""Delete a fine-tuned model.
You must have the Owner role in your organization to
delete a model.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._delete(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelDeleted,
)
class AsyncModels(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncModelsWithStreamingResponse(self)
async def retrieve(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Model:
"""
Retrieves a model instance, providing basic information about the model such as
the owner and permissioning.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._get(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Model,
)
def list(
self,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Model, AsyncPage[Model]]:
"""
Lists the currently available models, and provides basic information about each
one such as the owner and availability.
"""
return self._get_api_list(
"/models",
page=AsyncPage[Model],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=Model,
)
async def delete(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModelDeleted:
"""Delete a fine-tuned model.
You must have the Owner role in your organization to
delete a model.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._delete(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelDeleted,
)
class ModelsWithRawResponse:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = _legacy_response.to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
models.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
models.delete,
)
class AsyncModelsWithRawResponse:
def __init__(self, models: AsyncModels) -> None:
self._models = models
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
models.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
models.delete,
)
class ModelsWithStreamingResponse:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = to_streamed_response_wrapper(
models.retrieve,
)
self.list = to_streamed_response_wrapper(
models.list,
)
self.delete = to_streamed_response_wrapper(
models.delete,
)
class AsyncModelsWithStreamingResponse:
def __init__(self, models: AsyncModels) -> None:
self._models = models
self.retrieve = async_to_streamed_response_wrapper(
models.retrieve,
)
self.list = async_to_streamed_response_wrapper(
models.list,
)
self.delete = async_to_streamed_response_wrapper(
models.delete,
)

View File

@@ -0,0 +1,197 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable
import httpx
from .. import _legacy_response
from ..types import moderation_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .._base_client import make_request_options
from ..types.moderation_model import ModerationModel
from ..types.moderation_create_response import ModerationCreateResponse
from ..types.moderation_multi_modal_input_param import ModerationMultiModalInputParam
__all__ = ["Moderations", "AsyncModerations"]
class Moderations(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ModerationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ModerationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ModerationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ModerationsWithStreamingResponse(self)
def create(
self,
*,
input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]],
model: Union[str, ModerationModel] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModerationCreateResponse:
"""Classifies if text and/or image inputs are potentially harmful.
Learn more in
the [moderation guide](https://platform.openai.com/docs/guides/moderation).
Args:
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
an array of multi-modal input objects similar to other models.
model: The content moderation model you would like to use. Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
[here](https://platform.openai.com/docs/models#moderation).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/moderations",
body=maybe_transform(
{
"input": input,
"model": model,
},
moderation_create_params.ModerationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModerationCreateResponse,
)
class AsyncModerations(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncModerationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncModerationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncModerationsWithStreamingResponse(self)
async def create(
self,
*,
input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]],
model: Union[str, ModerationModel] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModerationCreateResponse:
"""Classifies if text and/or image inputs are potentially harmful.
Learn more in
the [moderation guide](https://platform.openai.com/docs/guides/moderation).
Args:
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
an array of multi-modal input objects similar to other models.
model: The content moderation model you would like to use. Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
[here](https://platform.openai.com/docs/models#moderation).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/moderations",
body=await async_maybe_transform(
{
"input": input,
"model": model,
},
moderation_create_params.ModerationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModerationCreateResponse,
)
class ModerationsWithRawResponse:
def __init__(self, moderations: Moderations) -> None:
self._moderations = moderations
self.create = _legacy_response.to_raw_response_wrapper(
moderations.create,
)
class AsyncModerationsWithRawResponse:
def __init__(self, moderations: AsyncModerations) -> None:
self._moderations = moderations
self.create = _legacy_response.async_to_raw_response_wrapper(
moderations.create,
)
class ModerationsWithStreamingResponse:
def __init__(self, moderations: Moderations) -> None:
self._moderations = moderations
self.create = to_streamed_response_wrapper(
moderations.create,
)
class AsyncModerationsWithStreamingResponse:
def __init__(self, moderations: AsyncModerations) -> None:
self._moderations = moderations
self.create = async_to_streamed_response_wrapper(
moderations.create,
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .calls import (
Calls,
AsyncCalls,
CallsWithRawResponse,
AsyncCallsWithRawResponse,
CallsWithStreamingResponse,
AsyncCallsWithStreamingResponse,
)
from .realtime import (
Realtime,
AsyncRealtime,
RealtimeWithRawResponse,
AsyncRealtimeWithRawResponse,
RealtimeWithStreamingResponse,
AsyncRealtimeWithStreamingResponse,
)
from .client_secrets import (
ClientSecrets,
AsyncClientSecrets,
ClientSecretsWithRawResponse,
AsyncClientSecretsWithRawResponse,
ClientSecretsWithStreamingResponse,
AsyncClientSecretsWithStreamingResponse,
)
__all__ = [
"ClientSecrets",
"AsyncClientSecrets",
"ClientSecretsWithRawResponse",
"AsyncClientSecretsWithRawResponse",
"ClientSecretsWithStreamingResponse",
"AsyncClientSecretsWithStreamingResponse",
"Calls",
"AsyncCalls",
"CallsWithRawResponse",
"AsyncCallsWithRawResponse",
"CallsWithStreamingResponse",
"AsyncCallsWithStreamingResponse",
"Realtime",
"AsyncRealtime",
"RealtimeWithRawResponse",
"AsyncRealtimeWithRawResponse",
"RealtimeWithStreamingResponse",
"AsyncRealtimeWithStreamingResponse",
]

View File

@@ -0,0 +1,778 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_streamed_response_wrapper,
async_to_streamed_response_wrapper,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..._base_client import make_request_options
from ...types.realtime import (
call_refer_params,
call_accept_params,
call_create_params,
call_reject_params,
)
from ...types.responses.response_prompt_param import ResponsePromptParam
from ...types.realtime.realtime_truncation_param import RealtimeTruncationParam
from ...types.realtime.realtime_audio_config_param import RealtimeAudioConfigParam
from ...types.realtime.realtime_tools_config_param import RealtimeToolsConfigParam
from ...types.realtime.realtime_tracing_config_param import RealtimeTracingConfigParam
from ...types.realtime.realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam
from ...types.realtime.realtime_session_create_request_param import RealtimeSessionCreateRequestParam
__all__ = ["Calls", "AsyncCalls"]
class Calls(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CallsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CallsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CallsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CallsWithStreamingResponse(self)
def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Create a new Realtime API call over WebRTC and receive the SDP answer needed to
complete the peer connection.
Args:
sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller.
session: Realtime session object configuration.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/sdp", **(extra_headers or {})}
return self._post(
"/realtime/calls",
body=maybe_transform(
{
"sdp": sdp,
"session": session,
},
call_create_params.CallCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
def accept(
self,
call_id: str,
*,
type: Literal["realtime"],
audio: RealtimeAudioConfigParam | Omit = omit,
include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit,
instructions: str | Omit = omit,
max_output_tokens: Union[int, Literal["inf"]] | Omit = omit,
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
]
| Omit = omit,
output_modalities: List[Literal["text", "audio"]] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
tool_choice: RealtimeToolChoiceConfigParam | Omit = omit,
tools: RealtimeToolsConfigParam | Omit = omit,
tracing: Optional[RealtimeTracingConfigParam] | Omit = omit,
truncation: RealtimeTruncationParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Accept an incoming SIP call and configure the realtime session that will handle
it.
Args:
type: The type of session to create. Always `realtime` for the Realtime API.
audio: Configuration for input and output audio.
include: Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
model: The Realtime model used for this session.
output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`,
indicating that the model will respond with audio plus a transcript. `["text"]`
can be used to make the model respond with text only. It is not possible to
request both `text` and `audio` at the same time.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
tool_choice: How the model chooses tools. Provide one of the string modes or force a specific
function/MCP tool.
tools: Tools available to the model.
tracing: Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
truncation: When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/accept",
body=maybe_transform(
{
"type": type,
"audio": audio,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"model": model,
"output_modalities": output_modalities,
"prompt": prompt,
"tool_choice": tool_choice,
"tools": tools,
"tracing": tracing,
"truncation": truncation,
},
call_accept_params.CallAcceptParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def hangup(
self,
call_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
End an active Realtime API call, whether it was initiated over SIP or WebRTC.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/hangup",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def refer(
self,
call_id: str,
*,
target_uri: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Transfer an active SIP call to a new destination using the SIP REFER verb.
Args:
target_uri: URI that should appear in the SIP Refer-To header. Supports values like
`tel:+14155550123` or `sip:agent@example.com`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/refer",
body=maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def reject(
self,
call_id: str,
*,
status_code: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Decline an incoming SIP call by returning a SIP status code to the caller.
Args:
status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when
omitted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/reject",
body=maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class AsyncCalls(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCallsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCallsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCallsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCallsWithStreamingResponse(self)
async def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Create a new Realtime API call over WebRTC and receive the SDP answer needed to
complete the peer connection.
Args:
sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller.
session: Realtime session object configuration.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/sdp", **(extra_headers or {})}
return await self._post(
"/realtime/calls",
body=await async_maybe_transform(
{
"sdp": sdp,
"session": session,
},
call_create_params.CallCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
async def accept(
self,
call_id: str,
*,
type: Literal["realtime"],
audio: RealtimeAudioConfigParam | Omit = omit,
include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit,
instructions: str | Omit = omit,
max_output_tokens: Union[int, Literal["inf"]] | Omit = omit,
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
]
| Omit = omit,
output_modalities: List[Literal["text", "audio"]] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
tool_choice: RealtimeToolChoiceConfigParam | Omit = omit,
tools: RealtimeToolsConfigParam | Omit = omit,
tracing: Optional[RealtimeTracingConfigParam] | Omit = omit,
truncation: RealtimeTruncationParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Accept an incoming SIP call and configure the realtime session that will handle
it.
Args:
type: The type of session to create. Always `realtime` for the Realtime API.
audio: Configuration for input and output audio.
include: Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
model: The Realtime model used for this session.
output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`,
indicating that the model will respond with audio plus a transcript. `["text"]`
can be used to make the model respond with text only. It is not possible to
request both `text` and `audio` at the same time.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
tool_choice: How the model chooses tools. Provide one of the string modes or force a specific
function/MCP tool.
tools: Tools available to the model.
tracing: Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
truncation: When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/accept",
body=await async_maybe_transform(
{
"type": type,
"audio": audio,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"model": model,
"output_modalities": output_modalities,
"prompt": prompt,
"tool_choice": tool_choice,
"tools": tools,
"tracing": tracing,
"truncation": truncation,
},
call_accept_params.CallAcceptParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
async def hangup(
self,
call_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
End an active Realtime API call, whether it was initiated over SIP or WebRTC.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/hangup",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
async def refer(
self,
call_id: str,
*,
target_uri: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Transfer an active SIP call to a new destination using the SIP REFER verb.
Args:
target_uri: URI that should appear in the SIP Refer-To header. Supports values like
`tel:+14155550123` or `sip:agent@example.com`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/refer",
body=await async_maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
async def reject(
self,
call_id: str,
*,
status_code: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Decline an incoming SIP call by returning a SIP status code to the caller.
Args:
status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when
omitted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/reject",
body=await async_maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class CallsWithRawResponse:
def __init__(self, calls: Calls) -> None:
self._calls = calls
self.create = _legacy_response.to_raw_response_wrapper(
calls.create,
)
self.accept = _legacy_response.to_raw_response_wrapper(
calls.accept,
)
self.hangup = _legacy_response.to_raw_response_wrapper(
calls.hangup,
)
self.refer = _legacy_response.to_raw_response_wrapper(
calls.refer,
)
self.reject = _legacy_response.to_raw_response_wrapper(
calls.reject,
)
class AsyncCallsWithRawResponse:
def __init__(self, calls: AsyncCalls) -> None:
self._calls = calls
self.create = _legacy_response.async_to_raw_response_wrapper(
calls.create,
)
self.accept = _legacy_response.async_to_raw_response_wrapper(
calls.accept,
)
self.hangup = _legacy_response.async_to_raw_response_wrapper(
calls.hangup,
)
self.refer = _legacy_response.async_to_raw_response_wrapper(
calls.refer,
)
self.reject = _legacy_response.async_to_raw_response_wrapper(
calls.reject,
)
class CallsWithStreamingResponse:
def __init__(self, calls: Calls) -> None:
self._calls = calls
self.create = to_custom_streamed_response_wrapper(
calls.create,
StreamedBinaryAPIResponse,
)
self.accept = to_streamed_response_wrapper(
calls.accept,
)
self.hangup = to_streamed_response_wrapper(
calls.hangup,
)
self.refer = to_streamed_response_wrapper(
calls.refer,
)
self.reject = to_streamed_response_wrapper(
calls.reject,
)
class AsyncCallsWithStreamingResponse:
def __init__(self, calls: AsyncCalls) -> None:
self._calls = calls
self.create = async_to_custom_streamed_response_wrapper(
calls.create,
AsyncStreamedBinaryAPIResponse,
)
self.accept = async_to_streamed_response_wrapper(
calls.accept,
)
self.hangup = async_to_streamed_response_wrapper(
calls.hangup,
)
self.refer = async_to_streamed_response_wrapper(
calls.refer,
)
self.reject = async_to_streamed_response_wrapper(
calls.reject,
)

View File

@@ -0,0 +1,189 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.realtime import client_secret_create_params
from ...types.realtime.client_secret_create_response import ClientSecretCreateResponse
__all__ = ["ClientSecrets", "AsyncClientSecrets"]
class ClientSecrets(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ClientSecretsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ClientSecretsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ClientSecretsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ClientSecretsWithStreamingResponse(self)
def create(
self,
*,
expires_after: client_secret_create_params.ExpiresAfter | Omit = omit,
session: client_secret_create_params.Session | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ClientSecretCreateResponse:
"""
Create a Realtime client secret with an associated session configuration.
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
session itself may continue after that time once started. A secret can be used
to create multiple sessions until it expires.
session: Session configuration to use for the client secret. Choose either a realtime
session or a transcription session.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/realtime/client_secrets",
body=maybe_transform(
{
"expires_after": expires_after,
"session": session,
},
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ClientSecretCreateResponse,
)
class AsyncClientSecrets(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncClientSecretsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncClientSecretsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncClientSecretsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncClientSecretsWithStreamingResponse(self)
async def create(
self,
*,
expires_after: client_secret_create_params.ExpiresAfter | Omit = omit,
session: client_secret_create_params.Session | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ClientSecretCreateResponse:
"""
Create a Realtime client secret with an associated session configuration.
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
session itself may continue after that time once started. A secret can be used
to create multiple sessions until it expires.
session: Session configuration to use for the client secret. Choose either a realtime
session or a transcription session.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/realtime/client_secrets",
body=await async_maybe_transform(
{
"expires_after": expires_after,
"session": session,
},
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ClientSecretCreateResponse,
)
class ClientSecretsWithRawResponse:
def __init__(self, client_secrets: ClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = _legacy_response.to_raw_response_wrapper(
client_secrets.create,
)
class AsyncClientSecretsWithRawResponse:
def __init__(self, client_secrets: AsyncClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = _legacy_response.async_to_raw_response_wrapper(
client_secrets.create,
)
class ClientSecretsWithStreamingResponse:
def __init__(self, client_secrets: ClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = to_streamed_response_wrapper(
client_secrets.create,
)
class AsyncClientSecretsWithStreamingResponse:
def __init__(self, client_secrets: AsyncClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = async_to_streamed_response_wrapper(
client_secrets.create,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .responses import (
Responses,
AsyncResponses,
ResponsesWithRawResponse,
AsyncResponsesWithRawResponse,
ResponsesWithStreamingResponse,
AsyncResponsesWithStreamingResponse,
)
from .input_items import (
InputItems,
AsyncInputItems,
InputItemsWithRawResponse,
AsyncInputItemsWithRawResponse,
InputItemsWithStreamingResponse,
AsyncInputItemsWithStreamingResponse,
)
from .input_tokens import (
InputTokens,
AsyncInputTokens,
InputTokensWithRawResponse,
AsyncInputTokensWithRawResponse,
InputTokensWithStreamingResponse,
AsyncInputTokensWithStreamingResponse,
)
__all__ = [
"InputItems",
"AsyncInputItems",
"InputItemsWithRawResponse",
"AsyncInputItemsWithRawResponse",
"InputItemsWithStreamingResponse",
"AsyncInputItemsWithStreamingResponse",
"InputTokens",
"AsyncInputTokens",
"InputTokensWithRawResponse",
"AsyncInputTokensWithRawResponse",
"InputTokensWithStreamingResponse",
"AsyncInputTokensWithStreamingResponse",
"Responses",
"AsyncResponses",
"ResponsesWithRawResponse",
"AsyncResponsesWithRawResponse",
"ResponsesWithStreamingResponse",
"AsyncResponsesWithStreamingResponse",
]

View File

@@ -0,0 +1,226 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Any, List, cast
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.responses import input_item_list_params
from ...types.responses.response_item import ResponseItem
from ...types.responses.response_includable import ResponseIncludable
__all__ = ["InputItems", "AsyncInputItems"]
class InputItems(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return InputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> InputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return InputItemsWithStreamingResponse(self)
def list(
self,
response_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ResponseItem]:
"""
Returns a list of input items for a given response.
Args:
after: An item ID to list items after, used in pagination.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
f"/responses/{response_id}/input_items",
page=SyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
class AsyncInputItems(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncInputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncInputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncInputItemsWithStreamingResponse(self)
def list(
self,
response_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]:
"""
Returns a list of input items for a given response.
Args:
after: An item ID to list items after, used in pagination.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
f"/responses/{response_id}/input_items",
page=AsyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
class InputItemsWithRawResponse:
def __init__(self, input_items: InputItems) -> None:
self._input_items = input_items
self.list = _legacy_response.to_raw_response_wrapper(
input_items.list,
)
class AsyncInputItemsWithRawResponse:
def __init__(self, input_items: AsyncInputItems) -> None:
self._input_items = input_items
self.list = _legacy_response.async_to_raw_response_wrapper(
input_items.list,
)
class InputItemsWithStreamingResponse:
def __init__(self, input_items: InputItems) -> None:
self._input_items = input_items
self.list = to_streamed_response_wrapper(
input_items.list,
)
class AsyncInputItemsWithStreamingResponse:
def __init__(self, input_items: AsyncInputItems) -> None:
self._input_items = input_items
self.list = async_to_streamed_response_wrapper(
input_items.list,
)

View File

@@ -0,0 +1,305 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.responses import input_token_count_params
from ...types.responses.tool_param import ToolParam
from ...types.shared_params.reasoning import Reasoning
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.responses.input_token_count_response import InputTokenCountResponse
__all__ = ["InputTokens", "AsyncInputTokens"]
class InputTokens(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputTokensWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return InputTokensWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> InputTokensWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return InputTokensWithStreamingResponse(self)
def count(
self,
*,
conversation: Optional[input_token_count_params.Conversation] | Omit = omit,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Optional[str] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
text: Optional[input_token_count_params.Text] | Omit = omit,
tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit,
tools: Optional[Iterable[ToolParam]] | Omit = omit,
truncation: Literal["auto", "disabled"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
Get input token counts
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
input: Text, image, or file inputs to the model, used to generate a response
instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
reasoning: **gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: Controls which tool the model should use, if any.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
truncation: The truncation strategy to use for the model response. - `auto`: If the input to
this Response exceeds the model's context window size, the model will truncate
the response to fit the context window by dropping items from the beginning of
the conversation. - `disabled` (default): If the input size will exceed the
context window size for a model, the request will fail with a 400 error.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/responses/input_tokens",
body=maybe_transform(
{
"conversation": conversation,
"input": input,
"instructions": instructions,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"reasoning": reasoning,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"truncation": truncation,
},
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=InputTokenCountResponse,
)
class AsyncInputTokens(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncInputTokensWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputTokensWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncInputTokensWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncInputTokensWithStreamingResponse(self)
async def count(
self,
*,
conversation: Optional[input_token_count_params.Conversation] | Omit = omit,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Optional[str] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
text: Optional[input_token_count_params.Text] | Omit = omit,
tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit,
tools: Optional[Iterable[ToolParam]] | Omit = omit,
truncation: Literal["auto", "disabled"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
Get input token counts
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
input: Text, image, or file inputs to the model, used to generate a response
instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
reasoning: **gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: Controls which tool the model should use, if any.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
truncation: The truncation strategy to use for the model response. - `auto`: If the input to
this Response exceeds the model's context window size, the model will truncate
the response to fit the context window by dropping items from the beginning of
the conversation. - `disabled` (default): If the input size will exceed the
context window size for a model, the request will fail with a 400 error.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/responses/input_tokens",
body=await async_maybe_transform(
{
"conversation": conversation,
"input": input,
"instructions": instructions,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"reasoning": reasoning,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"truncation": truncation,
},
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=InputTokenCountResponse,
)
class InputTokensWithRawResponse:
def __init__(self, input_tokens: InputTokens) -> None:
self._input_tokens = input_tokens
self.count = _legacy_response.to_raw_response_wrapper(
input_tokens.count,
)
class AsyncInputTokensWithRawResponse:
def __init__(self, input_tokens: AsyncInputTokens) -> None:
self._input_tokens = input_tokens
self.count = _legacy_response.async_to_raw_response_wrapper(
input_tokens.count,
)
class InputTokensWithStreamingResponse:
def __init__(self, input_tokens: InputTokens) -> None:
self._input_tokens = input_tokens
self.count = to_streamed_response_wrapper(
input_tokens.count,
)
class AsyncInputTokensWithStreamingResponse:
def __init__(self, input_tokens: AsyncInputTokens) -> None:
self._input_tokens = input_tokens
self.count = async_to_streamed_response_wrapper(
input_tokens.count,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .parts import (
Parts,
AsyncParts,
PartsWithRawResponse,
AsyncPartsWithRawResponse,
PartsWithStreamingResponse,
AsyncPartsWithStreamingResponse,
)
from .uploads import (
Uploads,
AsyncUploads,
UploadsWithRawResponse,
AsyncUploadsWithRawResponse,
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)
__all__ = [
"Parts",
"AsyncParts",
"PartsWithRawResponse",
"AsyncPartsWithRawResponse",
"PartsWithStreamingResponse",
"AsyncPartsWithStreamingResponse",
"Uploads",
"AsyncUploads",
"UploadsWithRawResponse",
"AsyncUploadsWithRawResponse",
"UploadsWithStreamingResponse",
"AsyncUploadsWithStreamingResponse",
]

View File

@@ -0,0 +1,205 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Mapping, cast
import httpx
from ... import _legacy_response
from ..._types import Body, Query, Headers, NotGiven, FileTypes, not_given
from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.uploads import part_create_params
from ...types.uploads.upload_part import UploadPart
__all__ = ["Parts", "AsyncParts"]
class Parts(SyncAPIResource):
@cached_property
def with_raw_response(self) -> PartsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return PartsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> PartsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return PartsWithStreamingResponse(self)
def create(
self,
upload_id: str,
*,
data: FileTypes,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UploadPart:
"""
Adds a
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
A Part represents a chunk of bytes from the file you are trying to upload.
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
maximum of 8 GB.
It is possible to add multiple Parts in parallel. You can decide the intended
order of the Parts when you
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
Args:
data: The chunk of bytes for this Part.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
body = deepcopy_minimal({"data": data})
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
f"/uploads/{upload_id}/parts",
body=maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=UploadPart,
)
class AsyncParts(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncPartsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncPartsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncPartsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncPartsWithStreamingResponse(self)
async def create(
self,
upload_id: str,
*,
data: FileTypes,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UploadPart:
"""
Adds a
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
A Part represents a chunk of bytes from the file you are trying to upload.
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
maximum of 8 GB.
It is possible to add multiple Parts in parallel. You can decide the intended
order of the Parts when you
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
Args:
data: The chunk of bytes for this Part.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
body = deepcopy_minimal({"data": data})
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
f"/uploads/{upload_id}/parts",
body=await async_maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=UploadPart,
)
class PartsWithRawResponse:
def __init__(self, parts: Parts) -> None:
self._parts = parts
self.create = _legacy_response.to_raw_response_wrapper(
parts.create,
)
class AsyncPartsWithRawResponse:
def __init__(self, parts: AsyncParts) -> None:
self._parts = parts
self.create = _legacy_response.async_to_raw_response_wrapper(
parts.create,
)
class PartsWithStreamingResponse:
def __init__(self, parts: Parts) -> None:
self._parts = parts
self.create = to_streamed_response_wrapper(
parts.create,
)
class AsyncPartsWithStreamingResponse:
def __init__(self, parts: AsyncParts) -> None:
self._parts = parts
self.create = async_to_streamed_response_wrapper(
parts.create,
)

View File

@@ -0,0 +1,719 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import io
import os
import logging
import builtins
from typing import overload
from pathlib import Path
import anyio
import httpx
from ... import _legacy_response
from .parts import (
Parts,
AsyncParts,
PartsWithRawResponse,
AsyncPartsWithRawResponse,
PartsWithStreamingResponse,
AsyncPartsWithStreamingResponse,
)
from ...types import FilePurpose, upload_create_params, upload_complete_params
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.upload import Upload
from ...types.file_purpose import FilePurpose
__all__ = ["Uploads", "AsyncUploads"]
# 64MB
DEFAULT_PART_SIZE = 64 * 1024 * 1024
log: logging.Logger = logging.getLogger(__name__)
class Uploads(SyncAPIResource):
@cached_property
def parts(self) -> Parts:
return Parts(self._client)
@cached_property
def with_raw_response(self) -> UploadsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return UploadsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> UploadsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return UploadsWithStreamingResponse(self)
@overload
def upload_file_chunked(
self,
*,
file: os.PathLike[str],
mime_type: str,
purpose: FilePurpose,
bytes: int | None = None,
part_size: int | None = None,
md5: str | Omit = omit,
) -> Upload:
"""Splits a file into multiple 64MB parts and uploads them sequentially."""
@overload
def upload_file_chunked(
self,
*,
file: bytes,
filename: str,
bytes: int,
mime_type: str,
purpose: FilePurpose,
part_size: int | None = None,
md5: str | Omit = omit,
) -> Upload:
"""Splits an in-memory file into multiple 64MB parts and uploads them sequentially."""
def upload_file_chunked(
self,
*,
file: os.PathLike[str] | bytes,
mime_type: str,
purpose: FilePurpose,
filename: str | None = None,
bytes: int | None = None,
part_size: int | None = None,
md5: str | Omit = omit,
) -> Upload:
"""Splits the given file into multiple parts and uploads them sequentially.
```py
from pathlib import Path
client.uploads.upload_file(
file=Path("my-paper.pdf"),
mime_type="pdf",
purpose="assistants",
)
```
"""
if isinstance(file, builtins.bytes):
if filename is None:
raise TypeError("The `filename` argument must be given for in-memory files")
if bytes is None:
raise TypeError("The `bytes` argument must be given for in-memory files")
else:
if not isinstance(file, Path):
file = Path(file)
if not filename:
filename = file.name
if bytes is None:
bytes = file.stat().st_size
upload = self.create(
bytes=bytes,
filename=filename,
mime_type=mime_type,
purpose=purpose,
)
part_ids: list[str] = []
if part_size is None:
part_size = DEFAULT_PART_SIZE
if isinstance(file, builtins.bytes):
buf: io.FileIO | io.BytesIO = io.BytesIO(file)
else:
buf = io.FileIO(file)
try:
while True:
data = buf.read(part_size)
if not data:
# EOF
break
part = self.parts.create(upload_id=upload.id, data=data)
log.info("Uploaded part %s for upload %s", part.id, upload.id)
part_ids.append(part.id)
finally:
buf.close()
return self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5)
def create(
self,
*,
bytes: int,
filename: str,
mime_type: str,
purpose: FilePurpose,
expires_after: upload_create_params.ExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Upload:
"""
Creates an intermediate
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
that you can add
[Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
Currently, an Upload can accept at most 8 GB in total and expires after an hour
after you create it.
Once you complete the Upload, we will create a
[File](https://platform.openai.com/docs/api-reference/files/object) object that
contains all the parts you uploaded. This File is usable in the rest of our
platform as a regular File object.
For certain `purpose` values, the correct `mime_type` must be specified. Please
refer to documentation for the
[supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
For guidance on the proper filename extensions for each purpose, please follow
the documentation on
[creating a File](https://platform.openai.com/docs/api-reference/files/create).
Args:
bytes: The number of bytes in the file you are uploading.
filename: The name of the file to upload.
mime_type: The MIME type of the file.
This must fall within the supported MIME types for your file purpose. See the
supported MIME types for assistants and vision.
purpose: The intended purpose of the uploaded file.
See the
[documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
after 30 days and all other files are persisted until they are manually deleted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/uploads",
body=maybe_transform(
{
"bytes": bytes,
"filename": filename,
"mime_type": mime_type,
"purpose": purpose,
"expires_after": expires_after,
},
upload_create_params.UploadCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Upload,
)
def cancel(
self,
upload_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Upload:
"""Cancels the Upload.
No Parts may be added after an Upload is cancelled.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return self._post(
f"/uploads/{upload_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Upload,
)
def complete(
self,
upload_id: str,
*,
part_ids: SequenceNotStr[str],
md5: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Upload:
"""
Completes the
[Upload](https://platform.openai.com/docs/api-reference/uploads/object).
Within the returned Upload object, there is a nested
[File](https://platform.openai.com/docs/api-reference/files/object) object that
is ready to use in the rest of the platform.
You can specify the order of the Parts by passing in an ordered list of the Part
IDs.
The number of bytes uploaded upon completion must match the number of bytes
initially specified when creating the Upload object. No Parts may be added after
an Upload is completed.
Args:
part_ids: The ordered list of Part IDs.
md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
matches what you expect.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return self._post(
f"/uploads/{upload_id}/complete",
body=maybe_transform(
{
"part_ids": part_ids,
"md5": md5,
},
upload_complete_params.UploadCompleteParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Upload,
)
class AsyncUploads(AsyncAPIResource):
@cached_property
def parts(self) -> AsyncParts:
return AsyncParts(self._client)
@cached_property
def with_raw_response(self) -> AsyncUploadsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncUploadsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncUploadsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncUploadsWithStreamingResponse(self)
@overload
async def upload_file_chunked(
self,
*,
file: os.PathLike[str],
mime_type: str,
purpose: FilePurpose,
bytes: int | None = None,
part_size: int | None = None,
md5: str | Omit = omit,
) -> Upload:
"""Splits a file into multiple 64MB parts and uploads them sequentially."""
@overload
async def upload_file_chunked(
self,
*,
file: bytes,
filename: str,
bytes: int,
mime_type: str,
purpose: FilePurpose,
part_size: int | None = None,
md5: str | Omit = omit,
) -> Upload:
"""Splits an in-memory file into multiple 64MB parts and uploads them sequentially."""
async def upload_file_chunked(
self,
*,
file: os.PathLike[str] | bytes,
mime_type: str,
purpose: FilePurpose,
filename: str | None = None,
bytes: int | None = None,
part_size: int | None = None,
md5: str | Omit = omit,
) -> Upload:
"""Splits the given file into multiple parts and uploads them sequentially.
```py
from pathlib import Path
client.uploads.upload_file(
file=Path("my-paper.pdf"),
mime_type="pdf",
purpose="assistants",
)
```
"""
if isinstance(file, builtins.bytes):
if filename is None:
raise TypeError("The `filename` argument must be given for in-memory files")
if bytes is None:
raise TypeError("The `bytes` argument must be given for in-memory files")
else:
if not isinstance(file, anyio.Path):
file = anyio.Path(file)
if not filename:
filename = file.name
if bytes is None:
stat = await file.stat()
bytes = stat.st_size
upload = await self.create(
bytes=bytes,
filename=filename,
mime_type=mime_type,
purpose=purpose,
)
part_ids: list[str] = []
if part_size is None:
part_size = DEFAULT_PART_SIZE
if isinstance(file, anyio.Path):
fd = await file.open("rb")
async with fd:
while True:
data = await fd.read(part_size)
if not data:
# EOF
break
part = await self.parts.create(upload_id=upload.id, data=data)
log.info("Uploaded part %s for upload %s", part.id, upload.id)
part_ids.append(part.id)
else:
buf = io.BytesIO(file)
try:
while True:
data = buf.read(part_size)
if not data:
# EOF
break
part = await self.parts.create(upload_id=upload.id, data=data)
log.info("Uploaded part %s for upload %s", part.id, upload.id)
part_ids.append(part.id)
finally:
buf.close()
return await self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5)
async def create(
self,
*,
bytes: int,
filename: str,
mime_type: str,
purpose: FilePurpose,
expires_after: upload_create_params.ExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Upload:
"""
Creates an intermediate
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
that you can add
[Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
Currently, an Upload can accept at most 8 GB in total and expires after an hour
after you create it.
Once you complete the Upload, we will create a
[File](https://platform.openai.com/docs/api-reference/files/object) object that
contains all the parts you uploaded. This File is usable in the rest of our
platform as a regular File object.
For certain `purpose` values, the correct `mime_type` must be specified. Please
refer to documentation for the
[supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
For guidance on the proper filename extensions for each purpose, please follow
the documentation on
[creating a File](https://platform.openai.com/docs/api-reference/files/create).
Args:
bytes: The number of bytes in the file you are uploading.
filename: The name of the file to upload.
mime_type: The MIME type of the file.
This must fall within the supported MIME types for your file purpose. See the
supported MIME types for assistants and vision.
purpose: The intended purpose of the uploaded file.
See the
[documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
after 30 days and all other files are persisted until they are manually deleted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/uploads",
body=await async_maybe_transform(
{
"bytes": bytes,
"filename": filename,
"mime_type": mime_type,
"purpose": purpose,
"expires_after": expires_after,
},
upload_create_params.UploadCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Upload,
)
async def cancel(
self,
upload_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Upload:
"""Cancels the Upload.
No Parts may be added after an Upload is cancelled.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return await self._post(
f"/uploads/{upload_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Upload,
)
async def complete(
self,
upload_id: str,
*,
part_ids: SequenceNotStr[str],
md5: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Upload:
"""
Completes the
[Upload](https://platform.openai.com/docs/api-reference/uploads/object).
Within the returned Upload object, there is a nested
[File](https://platform.openai.com/docs/api-reference/files/object) object that
is ready to use in the rest of the platform.
You can specify the order of the Parts by passing in an ordered list of the Part
IDs.
The number of bytes uploaded upon completion must match the number of bytes
initially specified when creating the Upload object. No Parts may be added after
an Upload is completed.
Args:
part_ids: The ordered list of Part IDs.
md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
matches what you expect.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
return await self._post(
f"/uploads/{upload_id}/complete",
body=await async_maybe_transform(
{
"part_ids": part_ids,
"md5": md5,
},
upload_complete_params.UploadCompleteParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Upload,
)
class UploadsWithRawResponse:
def __init__(self, uploads: Uploads) -> None:
self._uploads = uploads
self.create = _legacy_response.to_raw_response_wrapper(
uploads.create,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
uploads.cancel,
)
self.complete = _legacy_response.to_raw_response_wrapper(
uploads.complete,
)
@cached_property
def parts(self) -> PartsWithRawResponse:
return PartsWithRawResponse(self._uploads.parts)
class AsyncUploadsWithRawResponse:
def __init__(self, uploads: AsyncUploads) -> None:
self._uploads = uploads
self.create = _legacy_response.async_to_raw_response_wrapper(
uploads.create,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
uploads.cancel,
)
self.complete = _legacy_response.async_to_raw_response_wrapper(
uploads.complete,
)
@cached_property
def parts(self) -> AsyncPartsWithRawResponse:
return AsyncPartsWithRawResponse(self._uploads.parts)
class UploadsWithStreamingResponse:
def __init__(self, uploads: Uploads) -> None:
self._uploads = uploads
self.create = to_streamed_response_wrapper(
uploads.create,
)
self.cancel = to_streamed_response_wrapper(
uploads.cancel,
)
self.complete = to_streamed_response_wrapper(
uploads.complete,
)
@cached_property
def parts(self) -> PartsWithStreamingResponse:
return PartsWithStreamingResponse(self._uploads.parts)
class AsyncUploadsWithStreamingResponse:
def __init__(self, uploads: AsyncUploads) -> None:
self._uploads = uploads
self.create = async_to_streamed_response_wrapper(
uploads.create,
)
self.cancel = async_to_streamed_response_wrapper(
uploads.cancel,
)
self.complete = async_to_streamed_response_wrapper(
uploads.complete,
)
@cached_property
def parts(self) -> AsyncPartsWithStreamingResponse:
return AsyncPartsWithStreamingResponse(self._uploads.parts)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from .file_batches import (
FileBatches,
AsyncFileBatches,
FileBatchesWithRawResponse,
AsyncFileBatchesWithRawResponse,
FileBatchesWithStreamingResponse,
AsyncFileBatchesWithStreamingResponse,
)
from .vector_stores import (
VectorStores,
AsyncVectorStores,
VectorStoresWithRawResponse,
AsyncVectorStoresWithRawResponse,
VectorStoresWithStreamingResponse,
AsyncVectorStoresWithStreamingResponse,
)
__all__ = [
"Files",
"AsyncFiles",
"FilesWithRawResponse",
"AsyncFilesWithRawResponse",
"FilesWithStreamingResponse",
"AsyncFilesWithStreamingResponse",
"FileBatches",
"AsyncFileBatches",
"FileBatchesWithRawResponse",
"AsyncFileBatchesWithRawResponse",
"FileBatchesWithStreamingResponse",
"AsyncFileBatchesWithStreamingResponse",
"VectorStores",
"AsyncVectorStores",
"VectorStoresWithRawResponse",
"AsyncVectorStoresWithRawResponse",
"VectorStoresWithStreamingResponse",
"AsyncVectorStoresWithStreamingResponse",
]

View File

@@ -0,0 +1,813 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import asyncio
from typing import Dict, Iterable, Optional
from typing_extensions import Union, Literal
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
import httpx
import sniffio
from ... import _legacy_response
from ...types import FileChunkingStrategyParam
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
from ..._utils import is_given, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.file_object import FileObject
from ...types.vector_stores import file_batch_create_params, file_batch_list_files_params
from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
from ...types.vector_stores.vector_store_file import VectorStoreFile
from ...types.vector_stores.vector_store_file_batch import VectorStoreFileBatch
__all__ = ["FileBatches", "AsyncFileBatches"]
class FileBatches(SyncAPIResource):
@cached_property
def with_raw_response(self) -> FileBatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FileBatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FileBatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FileBatchesWithStreamingResponse(self)
def create(
self,
vector_store_id: str,
*,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
files: Iterable[file_batch_create_params.File] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""
Create a vector store file batch.
Args:
attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard. Keys are strings with a maximum
length of 64 characters. Values are strings with a maximum length of 512
characters, booleans, or numbers.
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.
file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files. If `attributes` or `chunking_strategy` are provided, they will be applied
to all files in the batch. Mutually exclusive with `files`.
files: A list of objects that each include a `file_id` plus optional `attributes` or
`chunking_strategy`. Use this when you need to override metadata for specific
files. The global `attributes` or `chunking_strategy` will be ignored and must
be specified for each file. Mutually exclusive with `file_ids`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}/file_batches",
body=maybe_transform(
{
"attributes": attributes,
"chunking_strategy": chunking_strategy,
"file_ids": file_ids,
"files": files,
},
file_batch_create_params.FileBatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileBatch,
)
def retrieve(
self,
batch_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""
Retrieves a vector store file batch.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileBatch,
)
def cancel(
self,
batch_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""Cancel a vector store file batch.
This attempts to cancel the processing of
files in this batch as soon as possible.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileBatch,
)
def create_and_poll(
self,
vector_store_id: str,
*,
file_ids: SequenceNotStr[str],
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFileBatch:
"""Create a vector store batch and poll until all files have been processed."""
batch = self.create(
vector_store_id=vector_store_id,
file_ids=file_ids,
chunking_strategy=chunking_strategy,
)
# TODO: don't poll unless necessary??
return self.poll(
batch.id,
vector_store_id=vector_store_id,
poll_interval_ms=poll_interval_ms,
)
def list_files(
self,
batch_id: str,
*,
vector_store_id: str,
after: str | Omit = omit,
before: str | Omit = omit,
filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[VectorStoreFile]:
"""
Returns a list of vector store files in a batch.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
page=SyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"filter": filter,
"limit": limit,
"order": order,
},
file_batch_list_files_params.FileBatchListFilesParams,
),
),
model=VectorStoreFile,
)
def poll(
self,
batch_id: str,
*,
vector_store_id: str,
poll_interval_ms: int | Omit = omit,
) -> VectorStoreFileBatch:
"""Wait for the given file batch to be processed.
Note: this will return even if one of the files failed to process, you need to
check batch.file_counts.failed_count to handle this case.
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = self.with_raw_response.retrieve(
batch_id,
vector_store_id=vector_store_id,
extra_headers=headers,
)
batch = response.parse()
if batch.file_counts.in_progress > 0:
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
self._sleep(poll_interval_ms / 1000)
continue
return batch
def upload_and_poll(
self,
vector_store_id: str,
*,
files: Iterable[FileTypes],
max_concurrency: int = 5,
file_ids: SequenceNotStr[str] = [],
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFileBatch:
"""Uploads the given files concurrently and then creates a vector store file batch.
If you've already uploaded certain files that you want to include in this batch
then you can pass their IDs through the `file_ids` argument.
By default, if any file upload fails then an exception will be eagerly raised.
The number of concurrency uploads is configurable using the `max_concurrency`
parameter.
Note: this method only supports `asyncio` or `trio` as the backing async
runtime.
"""
results: list[FileObject] = []
with ThreadPoolExecutor(max_workers=max_concurrency) as executor:
futures: list[Future[FileObject]] = [
executor.submit(
self._client.files.create,
file=file,
purpose="assistants",
)
for file in files
]
for future in as_completed(futures):
exc = future.exception()
if exc:
raise exc
results.append(future.result())
batch = self.create_and_poll(
vector_store_id=vector_store_id,
file_ids=[*file_ids, *(f.id for f in results)],
poll_interval_ms=poll_interval_ms,
chunking_strategy=chunking_strategy,
)
return batch
class AsyncFileBatches(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncFileBatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFileBatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFileBatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFileBatchesWithStreamingResponse(self)
async def create(
self,
vector_store_id: str,
*,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
files: Iterable[file_batch_create_params.File] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""
Create a vector store file batch.
Args:
attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard. Keys are strings with a maximum
length of 64 characters. Values are strings with a maximum length of 512
characters, booleans, or numbers.
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.
file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files. If `attributes` or `chunking_strategy` are provided, they will be applied
to all files in the batch. Mutually exclusive with `files`.
files: A list of objects that each include a `file_id` plus optional `attributes` or
`chunking_strategy`. Use this when you need to override metadata for specific
files. The global `attributes` or `chunking_strategy` will be ignored and must
be specified for each file. Mutually exclusive with `file_ids`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}/file_batches",
body=await async_maybe_transform(
{
"attributes": attributes,
"chunking_strategy": chunking_strategy,
"file_ids": file_ids,
"files": files,
},
file_batch_create_params.FileBatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileBatch,
)
async def retrieve(
self,
batch_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""
Retrieves a vector store file batch.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileBatch,
)
async def cancel(
self,
batch_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileBatch:
"""Cancel a vector store file batch.
This attempts to cancel the processing of
files in this batch as soon as possible.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileBatch,
)
async def create_and_poll(
self,
vector_store_id: str,
*,
file_ids: SequenceNotStr[str],
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFileBatch:
"""Create a vector store batch and poll until all files have been processed."""
batch = await self.create(
vector_store_id=vector_store_id,
file_ids=file_ids,
chunking_strategy=chunking_strategy,
)
# TODO: don't poll unless necessary??
return await self.poll(
batch.id,
vector_store_id=vector_store_id,
poll_interval_ms=poll_interval_ms,
)
def list_files(
self,
batch_id: str,
*,
vector_store_id: str,
after: str | Omit = omit,
before: str | Omit = omit,
filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
"""
Returns a list of vector store files in a batch.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
page=AsyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"filter": filter,
"limit": limit,
"order": order,
},
file_batch_list_files_params.FileBatchListFilesParams,
),
),
model=VectorStoreFile,
)
async def poll(
self,
batch_id: str,
*,
vector_store_id: str,
poll_interval_ms: int | Omit = omit,
) -> VectorStoreFileBatch:
"""Wait for the given file batch to be processed.
Note: this will return even if one of the files failed to process, you need to
check batch.file_counts.failed_count to handle this case.
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = await self.with_raw_response.retrieve(
batch_id,
vector_store_id=vector_store_id,
extra_headers=headers,
)
batch = response.parse()
if batch.file_counts.in_progress > 0:
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
await self._sleep(poll_interval_ms / 1000)
continue
return batch
async def upload_and_poll(
self,
vector_store_id: str,
*,
files: Iterable[FileTypes],
max_concurrency: int = 5,
file_ids: SequenceNotStr[str] = [],
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFileBatch:
"""Uploads the given files concurrently and then creates a vector store file batch.
If you've already uploaded certain files that you want to include in this batch
then you can pass their IDs through the `file_ids` argument.
By default, if any file upload fails then an exception will be eagerly raised.
The number of concurrency uploads is configurable using the `max_concurrency`
parameter.
Note: this method only supports `asyncio` or `trio` as the backing async
runtime.
"""
uploaded_files: list[FileObject] = []
async_library = sniffio.current_async_library()
if async_library == "asyncio":
async def asyncio_upload_file(semaphore: asyncio.Semaphore, file: FileTypes) -> None:
async with semaphore:
file_obj = await self._client.files.create(
file=file,
purpose="assistants",
)
uploaded_files.append(file_obj)
semaphore = asyncio.Semaphore(max_concurrency)
tasks = [asyncio_upload_file(semaphore, file) for file in files]
await asyncio.gather(*tasks)
elif async_library == "trio":
# We only import if the library is being used.
# We support Python 3.7 so are using an older version of trio that does not have type information
import trio # type: ignore # pyright: ignore[reportMissingTypeStubs]
async def trio_upload_file(limiter: trio.CapacityLimiter, file: FileTypes) -> None:
async with limiter:
file_obj = await self._client.files.create(
file=file,
purpose="assistants",
)
uploaded_files.append(file_obj)
limiter = trio.CapacityLimiter(max_concurrency)
async with trio.open_nursery() as nursery:
for file in files:
nursery.start_soon(trio_upload_file, limiter, file) # pyright: ignore [reportUnknownMemberType]
else:
raise RuntimeError(
f"Async runtime {async_library} is not supported yet. Only asyncio or trio is supported",
)
batch = await self.create_and_poll(
vector_store_id=vector_store_id,
file_ids=[*file_ids, *(f.id for f in uploaded_files)],
poll_interval_ms=poll_interval_ms,
chunking_strategy=chunking_strategy,
)
return batch
class FileBatchesWithRawResponse:
def __init__(self, file_batches: FileBatches) -> None:
self._file_batches = file_batches
self.create = _legacy_response.to_raw_response_wrapper(
file_batches.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
file_batches.retrieve,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
file_batches.cancel,
)
self.list_files = _legacy_response.to_raw_response_wrapper(
file_batches.list_files,
)
class AsyncFileBatchesWithRawResponse:
def __init__(self, file_batches: AsyncFileBatches) -> None:
self._file_batches = file_batches
self.create = _legacy_response.async_to_raw_response_wrapper(
file_batches.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
file_batches.retrieve,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
file_batches.cancel,
)
self.list_files = _legacy_response.async_to_raw_response_wrapper(
file_batches.list_files,
)
class FileBatchesWithStreamingResponse:
def __init__(self, file_batches: FileBatches) -> None:
self._file_batches = file_batches
self.create = to_streamed_response_wrapper(
file_batches.create,
)
self.retrieve = to_streamed_response_wrapper(
file_batches.retrieve,
)
self.cancel = to_streamed_response_wrapper(
file_batches.cancel,
)
self.list_files = to_streamed_response_wrapper(
file_batches.list_files,
)
class AsyncFileBatchesWithStreamingResponse:
def __init__(self, file_batches: AsyncFileBatches) -> None:
self._file_batches = file_batches
self.create = async_to_streamed_response_wrapper(
file_batches.create,
)
self.retrieve = async_to_streamed_response_wrapper(
file_batches.retrieve,
)
self.cancel = async_to_streamed_response_wrapper(
file_batches.cancel,
)
self.list_files = async_to_streamed_response_wrapper(
file_batches.list_files,
)

View File

@@ -0,0 +1,939 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Union, Optional
from typing_extensions import Literal, assert_never
import httpx
from ... import _legacy_response
from ...types import FileChunkingStrategyParam
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
from ..._utils import is_given, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.vector_stores import file_list_params, file_create_params, file_update_params
from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
from ...types.vector_stores.vector_store_file import VectorStoreFile
from ...types.vector_stores.file_content_response import FileContentResponse
from ...types.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted
__all__ = ["Files", "AsyncFiles"]
class Files(SyncAPIResource):
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FilesWithStreamingResponse(self)
def create(
self,
vector_store_id: str,
*,
file_id: str,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""
Create a vector store file by attaching a
[File](https://platform.openai.com/docs/api-reference/files) to a
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
Args:
file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
vector store should use. Useful for tools like `file_search` that can access
files.
attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard. Keys are strings with a maximum
length of 64 characters. Values are strings with a maximum length of 512
characters, booleans, or numbers.
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}/files",
body=maybe_transform(
{
"file_id": file_id,
"attributes": attributes,
"chunking_strategy": chunking_strategy,
},
file_create_params.FileCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFile,
)
def retrieve(
self,
file_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""
Retrieves a vector store file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFile,
)
def update(
self,
file_id: str,
*,
vector_store_id: str,
attributes: Optional[Dict[str, Union[str, float, bool]]],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""
Update attributes on a vector store file.
Args:
attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard. Keys are strings with a maximum
length of 64 characters. Values are strings with a maximum length of 512
characters, booleans, or numbers.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}/files/{file_id}",
body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFile,
)
def list(
self,
vector_store_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[VectorStoreFile]:
"""
Returns a list of vector store files.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/files",
page=SyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"filter": filter,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
model=VectorStoreFile,
)
def delete(
self,
file_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileDeleted:
"""Delete a vector store file.
This will remove the file from the vector store but
the file itself will not be deleted. To delete the file, use the
[delete file](https://platform.openai.com/docs/api-reference/files/delete)
endpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileDeleted,
)
def create_and_poll(
self,
file_id: str,
*,
vector_store_id: str,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFile:
"""Attach a file to the given vector store and wait for it to be processed."""
self.create(
vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes
)
return self.poll(
file_id,
vector_store_id=vector_store_id,
poll_interval_ms=poll_interval_ms,
)
def poll(
self,
file_id: str,
*,
vector_store_id: str,
poll_interval_ms: int | Omit = omit,
) -> VectorStoreFile:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = self.with_raw_response.retrieve(
file_id,
vector_store_id=vector_store_id,
extra_headers=headers,
)
file = response.parse()
if file.status == "in_progress":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
self._sleep(poll_interval_ms / 1000)
elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
return file
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(file.status)
else:
return file
def upload(
self,
*,
vector_store_id: str,
file: FileTypes,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFile:
"""Upload a file to the `files` API and then attach it to the given vector store.
Note the file will be asynchronously processed (you can use the alternative
polling helper method to wait for processing to complete).
"""
file_obj = self._client.files.create(file=file, purpose="assistants")
return self.create(vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy)
def upload_and_poll(
self,
*,
vector_store_id: str,
file: FileTypes,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFile:
"""Add a file to a vector store and poll until processing is complete."""
file_obj = self._client.files.create(file=file, purpose="assistants")
return self.create_and_poll(
vector_store_id=vector_store_id,
file_id=file_obj.id,
chunking_strategy=chunking_strategy,
poll_interval_ms=poll_interval_ms,
attributes=attributes,
)
def content(
self,
file_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPage[FileContentResponse]:
"""
Retrieve the parsed contents of a vector store file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/files/{file_id}/content",
page=SyncPage[FileContentResponse],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=FileContentResponse,
)
class AsyncFiles(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
async def create(
self,
vector_store_id: str,
*,
file_id: str,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""
Create a vector store file by attaching a
[File](https://platform.openai.com/docs/api-reference/files) to a
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
Args:
file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
vector store should use. Useful for tools like `file_search` that can access
files.
attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard. Keys are strings with a maximum
length of 64 characters. Values are strings with a maximum length of 512
characters, booleans, or numbers.
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}/files",
body=await async_maybe_transform(
{
"file_id": file_id,
"attributes": attributes,
"chunking_strategy": chunking_strategy,
},
file_create_params.FileCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFile,
)
async def retrieve(
self,
file_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""
Retrieves a vector store file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFile,
)
async def update(
self,
file_id: str,
*,
vector_store_id: str,
attributes: Optional[Dict[str, Union[str, float, bool]]],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFile:
"""
Update attributes on a vector store file.
Args:
attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard. Keys are strings with a maximum
length of 64 characters. Values are strings with a maximum length of 512
characters, booleans, or numbers.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}/files/{file_id}",
body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFile,
)
def list(
self,
vector_store_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
filter: Literal["in_progress", "completed", "failed", "cancelled"] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
"""
Returns a list of vector store files.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/files",
page=AsyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"filter": filter,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
model=VectorStoreFile,
)
async def delete(
self,
file_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreFileDeleted:
"""Delete a vector store file.
This will remove the file from the vector store but
the file itself will not be deleted. To delete the file, use the
[delete file](https://platform.openai.com/docs/api-reference/files/delete)
endpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreFileDeleted,
)
async def create_and_poll(
self,
file_id: str,
*,
vector_store_id: str,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFile:
"""Attach a file to the given vector store and wait for it to be processed."""
await self.create(
vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes
)
return await self.poll(
file_id,
vector_store_id=vector_store_id,
poll_interval_ms=poll_interval_ms,
)
async def poll(
self,
file_id: str,
*,
vector_store_id: str,
poll_interval_ms: int | Omit = omit,
) -> VectorStoreFile:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = await self.with_raw_response.retrieve(
file_id,
vector_store_id=vector_store_id,
extra_headers=headers,
)
file = response.parse()
if file.status == "in_progress":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
await self._sleep(poll_interval_ms / 1000)
elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
return file
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(file.status)
else:
return file
async def upload(
self,
*,
vector_store_id: str,
file: FileTypes,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFile:
"""Upload a file to the `files` API and then attach it to the given vector store.
Note the file will be asynchronously processed (you can use the alternative
polling helper method to wait for processing to complete).
"""
file_obj = await self._client.files.create(file=file, purpose="assistants")
return await self.create(
vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy
)
async def upload_and_poll(
self,
*,
vector_store_id: str,
file: FileTypes,
attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
poll_interval_ms: int | Omit = omit,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
) -> VectorStoreFile:
"""Add a file to a vector store and poll until processing is complete."""
file_obj = await self._client.files.create(file=file, purpose="assistants")
return await self.create_and_poll(
vector_store_id=vector_store_id,
file_id=file_obj.id,
poll_interval_ms=poll_interval_ms,
chunking_strategy=chunking_strategy,
attributes=attributes,
)
def content(
self,
file_id: str,
*,
vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FileContentResponse, AsyncPage[FileContentResponse]]:
"""
Retrieve the parsed contents of a vector store file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/files/{file_id}/content",
page=AsyncPage[FileContentResponse],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=FileContentResponse,
)
class FilesWithRawResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = _legacy_response.to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
files.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
files.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
self.content = _legacy_response.to_raw_response_wrapper(
files.content,
)
class AsyncFilesWithRawResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = _legacy_response.async_to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
files.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
files.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
self.content = _legacy_response.async_to_raw_response_wrapper(
files.content,
)
class FilesWithStreamingResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = to_streamed_response_wrapper(
files.create,
)
self.retrieve = to_streamed_response_wrapper(
files.retrieve,
)
self.update = to_streamed_response_wrapper(
files.update,
)
self.list = to_streamed_response_wrapper(
files.list,
)
self.delete = to_streamed_response_wrapper(
files.delete,
)
self.content = to_streamed_response_wrapper(
files.content,
)
class AsyncFilesWithStreamingResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = async_to_streamed_response_wrapper(
files.create,
)
self.retrieve = async_to_streamed_response_wrapper(
files.retrieve,
)
self.update = async_to_streamed_response_wrapper(
files.update,
)
self.list = async_to_streamed_response_wrapper(
files.list,
)
self.delete = async_to_streamed_response_wrapper(
files.delete,
)
self.content = async_to_streamed_response_wrapper(
files.content,
)

View File

@@ -0,0 +1,875 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from .files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from ...types import (
FileChunkingStrategyParam,
vector_store_list_params,
vector_store_create_params,
vector_store_search_params,
vector_store_update_params,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
from .file_batches import (
FileBatches,
AsyncFileBatches,
FileBatchesWithRawResponse,
AsyncFileBatchesWithRawResponse,
FileBatchesWithStreamingResponse,
AsyncFileBatchesWithStreamingResponse,
)
from ..._base_client import AsyncPaginator, make_request_options
from ...types.vector_store import VectorStore
from ...types.vector_store_deleted import VectorStoreDeleted
from ...types.shared_params.metadata import Metadata
from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
from ...types.vector_store_search_response import VectorStoreSearchResponse
__all__ = ["VectorStores", "AsyncVectorStores"]
class VectorStores(SyncAPIResource):
@cached_property
def files(self) -> Files:
return Files(self._client)
@cached_property
def file_batches(self) -> FileBatches:
return FileBatches(self._client)
@cached_property
def with_raw_response(self) -> VectorStoresWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return VectorStoresWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> VectorStoresWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return VectorStoresWithStreamingResponse(self)
def create(
self,
*,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
description: str | Omit = omit,
expires_after: vector_store_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStore:
"""
Create a vector store.
Args:
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.
description: A description for the vector store. Can be used to describe the vector store's
purpose.
expires_after: The expiration policy for a vector store.
file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the vector store.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/vector_stores",
body=maybe_transform(
{
"chunking_strategy": chunking_strategy,
"description": description,
"expires_after": expires_after,
"file_ids": file_ids,
"metadata": metadata,
"name": name,
},
vector_store_create_params.VectorStoreCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStore,
)
def retrieve(
self,
vector_store_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStore:
"""
Retrieves a vector store.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/vector_stores/{vector_store_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStore,
)
def update(
self,
vector_store_id: str,
*,
expires_after: Optional[vector_store_update_params.ExpiresAfter] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStore:
"""
Modifies a vector store.
Args:
expires_after: The expiration policy for a vector store.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the vector store.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}",
body=maybe_transform(
{
"expires_after": expires_after,
"metadata": metadata,
"name": name,
},
vector_store_update_params.VectorStoreUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStore,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[VectorStore]:
"""Returns a list of vector stores.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
"/vector_stores",
page=SyncCursorPage[VectorStore],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
vector_store_list_params.VectorStoreListParams,
),
),
model=VectorStore,
)
def delete(
self,
vector_store_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreDeleted:
"""
Delete a vector store.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/vector_stores/{vector_store_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreDeleted,
)
def search(
self,
vector_store_id: str,
*,
query: Union[str, SequenceNotStr[str]],
filters: vector_store_search_params.Filters | Omit = omit,
max_num_results: int | Omit = omit,
ranking_options: vector_store_search_params.RankingOptions | Omit = omit,
rewrite_query: bool | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPage[VectorStoreSearchResponse]:
"""
Search a vector store for relevant chunks based on a query and file attributes
filter.
Args:
query: A query string for a search
filters: A filter to apply based on file attributes.
max_num_results: The maximum number of results to return. This number should be between 1 and 50
inclusive.
ranking_options: Ranking options for search.
rewrite_query: Whether to rewrite the natural language query for vector search.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/search",
page=SyncPage[VectorStoreSearchResponse],
body=maybe_transform(
{
"query": query,
"filters": filters,
"max_num_results": max_num_results,
"ranking_options": ranking_options,
"rewrite_query": rewrite_query,
},
vector_store_search_params.VectorStoreSearchParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=VectorStoreSearchResponse,
method="post",
)
class AsyncVectorStores(AsyncAPIResource):
@cached_property
def files(self) -> AsyncFiles:
return AsyncFiles(self._client)
@cached_property
def file_batches(self) -> AsyncFileBatches:
return AsyncFileBatches(self._client)
@cached_property
def with_raw_response(self) -> AsyncVectorStoresWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncVectorStoresWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncVectorStoresWithStreamingResponse(self)
async def create(
self,
*,
chunking_strategy: FileChunkingStrategyParam | Omit = omit,
description: str | Omit = omit,
expires_after: vector_store_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStore:
"""
Create a vector store.
Args:
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.
description: A description for the vector store. Can be used to describe the vector store's
purpose.
expires_after: The expiration policy for a vector store.
file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the vector store.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/vector_stores",
body=await async_maybe_transform(
{
"chunking_strategy": chunking_strategy,
"description": description,
"expires_after": expires_after,
"file_ids": file_ids,
"metadata": metadata,
"name": name,
},
vector_store_create_params.VectorStoreCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStore,
)
async def retrieve(
self,
vector_store_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStore:
"""
Retrieves a vector store.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/vector_stores/{vector_store_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStore,
)
async def update(
self,
vector_store_id: str,
*,
expires_after: Optional[vector_store_update_params.ExpiresAfter] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStore:
"""
Modifies a vector store.
Args:
expires_after: The expiration policy for a vector store.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the vector store.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}",
body=await async_maybe_transform(
{
"expires_after": expires_after,
"metadata": metadata,
"name": name,
},
vector_store_update_params.VectorStoreUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStore,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[VectorStore, AsyncCursorPage[VectorStore]]:
"""Returns a list of vector stores.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
"/vector_stores",
page=AsyncCursorPage[VectorStore],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
vector_store_list_params.VectorStoreListParams,
),
),
model=VectorStore,
)
async def delete(
self,
vector_store_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VectorStoreDeleted:
"""
Delete a vector store.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/vector_stores/{vector_store_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VectorStoreDeleted,
)
def search(
self,
vector_store_id: str,
*,
query: Union[str, SequenceNotStr[str]],
filters: vector_store_search_params.Filters | Omit = omit,
max_num_results: int | Omit = omit,
ranking_options: vector_store_search_params.RankingOptions | Omit = omit,
rewrite_query: bool | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[VectorStoreSearchResponse, AsyncPage[VectorStoreSearchResponse]]:
"""
Search a vector store for relevant chunks based on a query and file attributes
filter.
Args:
query: A query string for a search
filters: A filter to apply based on file attributes.
max_num_results: The maximum number of results to return. This number should be between 1 and 50
inclusive.
ranking_options: Ranking options for search.
rewrite_query: Whether to rewrite the natural language query for vector search.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/vector_stores/{vector_store_id}/search",
page=AsyncPage[VectorStoreSearchResponse],
body=maybe_transform(
{
"query": query,
"filters": filters,
"max_num_results": max_num_results,
"ranking_options": ranking_options,
"rewrite_query": rewrite_query,
},
vector_store_search_params.VectorStoreSearchParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=VectorStoreSearchResponse,
method="post",
)
class VectorStoresWithRawResponse:
def __init__(self, vector_stores: VectorStores) -> None:
self._vector_stores = vector_stores
self.create = _legacy_response.to_raw_response_wrapper(
vector_stores.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
vector_stores.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
vector_stores.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
vector_stores.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
vector_stores.delete,
)
self.search = _legacy_response.to_raw_response_wrapper(
vector_stores.search,
)
@cached_property
def files(self) -> FilesWithRawResponse:
return FilesWithRawResponse(self._vector_stores.files)
@cached_property
def file_batches(self) -> FileBatchesWithRawResponse:
return FileBatchesWithRawResponse(self._vector_stores.file_batches)
class AsyncVectorStoresWithRawResponse:
def __init__(self, vector_stores: AsyncVectorStores) -> None:
self._vector_stores = vector_stores
self.create = _legacy_response.async_to_raw_response_wrapper(
vector_stores.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
vector_stores.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
vector_stores.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
vector_stores.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
vector_stores.delete,
)
self.search = _legacy_response.async_to_raw_response_wrapper(
vector_stores.search,
)
@cached_property
def files(self) -> AsyncFilesWithRawResponse:
return AsyncFilesWithRawResponse(self._vector_stores.files)
@cached_property
def file_batches(self) -> AsyncFileBatchesWithRawResponse:
return AsyncFileBatchesWithRawResponse(self._vector_stores.file_batches)
class VectorStoresWithStreamingResponse:
def __init__(self, vector_stores: VectorStores) -> None:
self._vector_stores = vector_stores
self.create = to_streamed_response_wrapper(
vector_stores.create,
)
self.retrieve = to_streamed_response_wrapper(
vector_stores.retrieve,
)
self.update = to_streamed_response_wrapper(
vector_stores.update,
)
self.list = to_streamed_response_wrapper(
vector_stores.list,
)
self.delete = to_streamed_response_wrapper(
vector_stores.delete,
)
self.search = to_streamed_response_wrapper(
vector_stores.search,
)
@cached_property
def files(self) -> FilesWithStreamingResponse:
return FilesWithStreamingResponse(self._vector_stores.files)
@cached_property
def file_batches(self) -> FileBatchesWithStreamingResponse:
return FileBatchesWithStreamingResponse(self._vector_stores.file_batches)
class AsyncVectorStoresWithStreamingResponse:
def __init__(self, vector_stores: AsyncVectorStores) -> None:
self._vector_stores = vector_stores
self.create = async_to_streamed_response_wrapper(
vector_stores.create,
)
self.retrieve = async_to_streamed_response_wrapper(
vector_stores.retrieve,
)
self.update = async_to_streamed_response_wrapper(
vector_stores.update,
)
self.list = async_to_streamed_response_wrapper(
vector_stores.list,
)
self.delete = async_to_streamed_response_wrapper(
vector_stores.delete,
)
self.search = async_to_streamed_response_wrapper(
vector_stores.search,
)
@cached_property
def files(self) -> AsyncFilesWithStreamingResponse:
return AsyncFilesWithStreamingResponse(self._vector_stores.files)
@cached_property
def file_batches(self) -> AsyncFileBatchesWithStreamingResponse:
return AsyncFileBatchesWithStreamingResponse(self._vector_stores.file_batches)

View File

@@ -0,0 +1,850 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import TYPE_CHECKING, Mapping, cast
from typing_extensions import Literal, assert_never
import httpx
from .. import _legacy_response
from ..types import (
VideoSize,
VideoSeconds,
video_list_params,
video_remix_params,
video_create_params,
video_download_content_params,
)
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_streamed_response_wrapper,
async_to_streamed_response_wrapper,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from ..types.video import Video
from .._base_client import AsyncPaginator, make_request_options
from .._utils._utils import is_given
from ..types.video_size import VideoSize
from ..types.video_seconds import VideoSeconds
from ..types.video_model_param import VideoModelParam
from ..types.video_delete_response import VideoDeleteResponse
__all__ = ["Videos", "AsyncVideos"]
class Videos(SyncAPIResource):
@cached_property
def with_raw_response(self) -> VideosWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return VideosWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> VideosWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return VideosWithStreamingResponse(self)
def create(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video
Args:
prompt: Text prompt that describes the video to generate.
input_reference: Optional image reference that guides generation.
model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
to `sora-2`.
seconds: Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
size: Output resolution formatted as width x height (allowed values: 720x1280,
1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/videos",
body=maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def create_and_poll(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
poll_interval_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""Create a video and wait for it to be processed."""
video = self.create(
model=model,
prompt=prompt,
input_reference=input_reference,
seconds=seconds,
size=size,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return self.poll(
video.id,
poll_interval_ms=poll_interval_ms,
)
def poll(
self,
video_id: str,
*,
poll_interval_ms: int | Omit = omit,
) -> Video:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = self.with_raw_response.retrieve(
video_id,
extra_headers=headers,
)
video = response.parse()
if video.status == "in_progress" or video.status == "queued":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
self._sleep(poll_interval_ms / 1000)
elif video.status == "completed" or video.status == "failed":
return video
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(video.status)
else:
return video
def retrieve(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Retrieve a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._get(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Video]:
"""
List videos
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/videos",
page=SyncConversationCursorPage[Video],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
video_list_params.VideoListParams,
),
),
model=Video,
)
def delete(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
Delete a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._delete(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
def download_content(
self,
video_id: str,
*,
variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""Download video content
Args:
variant: Which downloadable asset to return.
Defaults to the MP4 video.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/videos/{video_id}/content",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"variant": variant}, video_download_content_params.VideoDownloadContentParams),
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
def remix(
self,
video_id: str,
*,
prompt: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video remix
Args:
prompt: Updated text prompt that directs the remix generation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._post(
f"/videos/{video_id}/remix",
body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
class AsyncVideos(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncVideosWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncVideosWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncVideosWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncVideosWithStreamingResponse(self)
async def create(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video
Args:
prompt: Text prompt that describes the video to generate.
input_reference: Optional image reference that guides generation.
model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
to `sora-2`.
seconds: Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
size: Output resolution formatted as width x height (allowed values: 720x1280,
1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/videos",
body=await async_maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
async def create_and_poll(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
poll_interval_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""Create a video and wait for it to be processed."""
video = await self.create(
model=model,
prompt=prompt,
input_reference=input_reference,
seconds=seconds,
size=size,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return await self.poll(
video.id,
poll_interval_ms=poll_interval_ms,
)
async def poll(
self,
video_id: str,
*,
poll_interval_ms: int | Omit = omit,
) -> Video:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = await self.with_raw_response.retrieve(
video_id,
extra_headers=headers,
)
video = response.parse()
if video.status == "in_progress" or video.status == "queued":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
await self._sleep(poll_interval_ms / 1000)
elif video.status == "completed" or video.status == "failed":
return video
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(video.status)
else:
return video
async def retrieve(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Retrieve a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._get(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Video, AsyncConversationCursorPage[Video]]:
"""
List videos
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/videos",
page=AsyncConversationCursorPage[Video],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
video_list_params.VideoListParams,
),
),
model=Video,
)
async def delete(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
Delete a video
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._delete(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
async def download_content(
self,
video_id: str,
*,
variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""Download video content
Args:
variant: Which downloadable asset to return.
Defaults to the MP4 video.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/videos/{video_id}/content",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{"variant": variant}, video_download_content_params.VideoDownloadContentParams
),
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
async def remix(
self,
video_id: str,
*,
prompt: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a video remix
Args:
prompt: Updated text prompt that directs the remix generation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._post(
f"/videos/{video_id}/remix",
body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
class VideosWithRawResponse:
def __init__(self, videos: Videos) -> None:
self._videos = videos
self.create = _legacy_response.to_raw_response_wrapper(
videos.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
videos.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
videos.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
videos.delete,
)
self.download_content = _legacy_response.to_raw_response_wrapper(
videos.download_content,
)
self.remix = _legacy_response.to_raw_response_wrapper(
videos.remix,
)
class AsyncVideosWithRawResponse:
def __init__(self, videos: AsyncVideos) -> None:
self._videos = videos
self.create = _legacy_response.async_to_raw_response_wrapper(
videos.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
videos.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
videos.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
videos.delete,
)
self.download_content = _legacy_response.async_to_raw_response_wrapper(
videos.download_content,
)
self.remix = _legacy_response.async_to_raw_response_wrapper(
videos.remix,
)
class VideosWithStreamingResponse:
def __init__(self, videos: Videos) -> None:
self._videos = videos
self.create = to_streamed_response_wrapper(
videos.create,
)
self.retrieve = to_streamed_response_wrapper(
videos.retrieve,
)
self.list = to_streamed_response_wrapper(
videos.list,
)
self.delete = to_streamed_response_wrapper(
videos.delete,
)
self.download_content = to_custom_streamed_response_wrapper(
videos.download_content,
StreamedBinaryAPIResponse,
)
self.remix = to_streamed_response_wrapper(
videos.remix,
)
class AsyncVideosWithStreamingResponse:
def __init__(self, videos: AsyncVideos) -> None:
self._videos = videos
self.create = async_to_streamed_response_wrapper(
videos.create,
)
self.retrieve = async_to_streamed_response_wrapper(
videos.retrieve,
)
self.list = async_to_streamed_response_wrapper(
videos.list,
)
self.delete = async_to_streamed_response_wrapper(
videos.delete,
)
self.download_content = async_to_custom_streamed_response_wrapper(
videos.download_content,
AsyncStreamedBinaryAPIResponse,
)
self.remix = async_to_streamed_response_wrapper(
videos.remix,
)

View File

@@ -0,0 +1,210 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import hmac
import json
import time
import base64
import hashlib
from typing import cast
from .._types import HeadersLike
from .._utils import get_required_header
from .._models import construct_type
from .._resource import SyncAPIResource, AsyncAPIResource
from .._exceptions import InvalidWebhookSignatureError
from ..types.webhooks.unwrap_webhook_event import UnwrapWebhookEvent
__all__ = ["Webhooks", "AsyncWebhooks"]
class Webhooks(SyncAPIResource):
def unwrap(
self,
payload: str | bytes,
headers: HeadersLike,
*,
secret: str | None = None,
) -> UnwrapWebhookEvent:
"""Validates that the given payload was sent by OpenAI and parses the payload."""
if secret is None:
secret = self._client.webhook_secret
self.verify_signature(payload=payload, headers=headers, secret=secret)
return cast(
UnwrapWebhookEvent,
construct_type(
type_=UnwrapWebhookEvent,
value=json.loads(payload),
),
)
def verify_signature(
self,
payload: str | bytes,
headers: HeadersLike,
*,
secret: str | None = None,
tolerance: int = 300,
) -> None:
"""Validates whether or not the webhook payload was sent by OpenAI.
Args:
payload: The webhook payload
headers: The webhook headers
secret: The webhook secret (optional, will use client secret if not provided)
tolerance: Maximum age of the webhook in seconds (default: 300 = 5 minutes)
"""
if secret is None:
secret = self._client.webhook_secret
if secret is None:
raise ValueError(
"The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, "
"on the client class, OpenAI(webhook_secret='123'), or passed to this function"
)
signature_header = get_required_header(headers, "webhook-signature")
timestamp = get_required_header(headers, "webhook-timestamp")
webhook_id = get_required_header(headers, "webhook-id")
# Validate timestamp to prevent replay attacks
try:
timestamp_seconds = int(timestamp)
except ValueError:
raise InvalidWebhookSignatureError("Invalid webhook timestamp format") from None
now = int(time.time())
if now - timestamp_seconds > tolerance:
raise InvalidWebhookSignatureError("Webhook timestamp is too old") from None
if timestamp_seconds > now + tolerance:
raise InvalidWebhookSignatureError("Webhook timestamp is too new") from None
# Extract signatures from v1,<base64> format
# The signature header can have multiple values, separated by spaces.
# Each value is in the format v1,<base64>. We should accept if any match.
signatures: list[str] = []
for part in signature_header.split():
if part.startswith("v1,"):
signatures.append(part[3:])
else:
signatures.append(part)
# Decode the secret if it starts with whsec_
if secret.startswith("whsec_"):
decoded_secret = base64.b64decode(secret[6:])
else:
decoded_secret = secret.encode()
body = payload.decode("utf-8") if isinstance(payload, bytes) else payload
# Prepare the signed payload (OpenAI uses webhookId.timestamp.payload format)
signed_payload = f"{webhook_id}.{timestamp}.{body}"
expected_signature = base64.b64encode(
hmac.new(decoded_secret, signed_payload.encode(), hashlib.sha256).digest()
).decode()
# Accept if any signature matches
if not any(hmac.compare_digest(expected_signature, sig) for sig in signatures):
raise InvalidWebhookSignatureError(
"The given webhook signature does not match the expected signature"
) from None
class AsyncWebhooks(AsyncAPIResource):
def unwrap(
self,
payload: str | bytes,
headers: HeadersLike,
*,
secret: str | None = None,
) -> UnwrapWebhookEvent:
"""Validates that the given payload was sent by OpenAI and parses the payload."""
if secret is None:
secret = self._client.webhook_secret
self.verify_signature(payload=payload, headers=headers, secret=secret)
body = payload.decode("utf-8") if isinstance(payload, bytes) else payload
return cast(
UnwrapWebhookEvent,
construct_type(
type_=UnwrapWebhookEvent,
value=json.loads(body),
),
)
def verify_signature(
self,
payload: str | bytes,
headers: HeadersLike,
*,
secret: str | None = None,
tolerance: int = 300,
) -> None:
"""Validates whether or not the webhook payload was sent by OpenAI.
Args:
payload: The webhook payload
headers: The webhook headers
secret: The webhook secret (optional, will use client secret if not provided)
tolerance: Maximum age of the webhook in seconds (default: 300 = 5 minutes)
"""
if secret is None:
secret = self._client.webhook_secret
if secret is None:
raise ValueError(
"The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, "
"on the client class, OpenAI(webhook_secret='123'), or passed to this function"
) from None
signature_header = get_required_header(headers, "webhook-signature")
timestamp = get_required_header(headers, "webhook-timestamp")
webhook_id = get_required_header(headers, "webhook-id")
# Validate timestamp to prevent replay attacks
try:
timestamp_seconds = int(timestamp)
except ValueError:
raise InvalidWebhookSignatureError("Invalid webhook timestamp format") from None
now = int(time.time())
if now - timestamp_seconds > tolerance:
raise InvalidWebhookSignatureError("Webhook timestamp is too old") from None
if timestamp_seconds > now + tolerance:
raise InvalidWebhookSignatureError("Webhook timestamp is too new") from None
# Extract signatures from v1,<base64> format
# The signature header can have multiple values, separated by spaces.
# Each value is in the format v1,<base64>. We should accept if any match.
signatures: list[str] = []
for part in signature_header.split():
if part.startswith("v1,"):
signatures.append(part[3:])
else:
signatures.append(part)
# Decode the secret if it starts with whsec_
if secret.startswith("whsec_"):
decoded_secret = base64.b64decode(secret[6:])
else:
decoded_secret = secret.encode()
body = payload.decode("utf-8") if isinstance(payload, bytes) else payload
# Prepare the signed payload (OpenAI uses webhookId.timestamp.payload format)
signed_payload = f"{webhook_id}.{timestamp}.{body}"
expected_signature = base64.b64encode(
hmac.new(decoded_secret, signed_payload.encode(), hashlib.sha256).digest()
).decode()
# Accept if any signature matches
if not any(hmac.compare_digest(expected_signature, sig) for sig in signatures):
raise InvalidWebhookSignatureError("The given webhook signature does not match the expected signature")