콘텐츠로 이동

Model

TTSVoice module-attribute

TTSVoice = Literal[
    "alloy",
    "ash",
    "coral",
    "echo",
    "fable",
    "onyx",
    "nova",
    "sage",
    "shimmer",
]

Exportable type for the TTSModelSettings voice enum

TTSModelSettings dataclass

Settings for a TTS model.

ソースコード位置: src/agents/voice/model.py
@dataclass
class TTSModelSettings:
    """Settings for a TTS model."""

    voice: TTSVoice | None = None
    """
    The voice to use for the TTS model. If not provided, the default voice for the respective model
    will be used.
    """

    buffer_size: int = 120
    """The minimal size of the chunks of audio data that are being streamed out."""

    dtype: npt.DTypeLike = np.int16
    """The data type for the audio data to be returned in."""

    transform_data: (
        Callable[[npt.NDArray[np.int16 | np.float32]], npt.NDArray[np.int16 | np.float32]] | None
    ) = None
    """
    A function to transform the data from the TTS model. This is useful if you want the resulting
    audio stream to have the data in a specific shape already.
    """

    instructions: str = (
        "You will receive partial sentences. Do not complete the sentence just read out the text."
    )
    """
    The instructions to use for the TTS model. This is useful if you want to control the tone of the
    audio output.
    """

    text_splitter: Callable[[str], tuple[str, str]] = get_sentence_based_splitter()
    """
    A function to split the text into chunks. This is useful if you want to split the text into
    chunks before sending it to the TTS model rather than waiting for the whole text to be
    processed.
    """

    speed: float | None = None
    """The speed with which the TTS model will read the text. Between 0.25 and 4.0."""

voice class-attribute instance-attribute

voice: TTSVoice | None = None

The voice to use for the TTS model. If not provided, the default voice for the respective model will be used.

buffer_size class-attribute instance-attribute

buffer_size: int = 120

The minimal size of the chunks of audio data that are being streamed out.

dtype class-attribute instance-attribute

dtype: DTypeLike = int16

The data type for the audio data to be returned in.

transform_data class-attribute instance-attribute

transform_data: (
    Callable[
        [NDArray[int16 | float32]], NDArray[int16 | float32]
    ]
    | None
) = None

A function to transform the data from the TTS model. This is useful if you want the resulting audio stream to have the data in a specific shape already.

instructions class-attribute instance-attribute

instructions: str = "You will receive partial sentences. Do not complete the sentence just read out the text."

The instructions to use for the TTS model. This is useful if you want to control the tone of the audio output.

text_splitter class-attribute instance-attribute

text_splitter: Callable[[str], tuple[str, str]] = (
    get_sentence_based_splitter()
)

A function to split the text into chunks. This is useful if you want to split the text into chunks before sending it to the TTS model rather than waiting for the whole text to be processed.

speed class-attribute instance-attribute

speed: float | None = None

The speed with which the TTS model will read the text. Between 0.25 and 4.0.

TTSModel

Bases: ABC

A text-to-speech model that can convert text into audio output.

ソースコード位置: src/agents/voice/model.py
class TTSModel(abc.ABC):
    """A text-to-speech model that can convert text into audio output."""

    @property
    @abc.abstractmethod
    def model_name(self) -> str:
        """The name of the TTS model."""
        pass

    @abc.abstractmethod
    def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:
        """Given a text string, produces a stream of audio bytes, in PCM format.

        Args:
            text: The text to convert to audio.

        Returns:
            An async iterator of audio bytes, in PCM format.
        """
        pass

model_name abstractmethod property

model_name: str

The name of the TTS model.

run abstractmethod

run(
    text: str, settings: TTSModelSettings
) -> AsyncIterator[bytes]

Given a text string, produces a stream of audio bytes, in PCM format.

引数:

名前 タイプ デスクリプション デフォルト
text str

The text to convert to audio.

必須

戻り値:

タイプ デスクリプション
AsyncIterator[bytes]

An async iterator of audio bytes, in PCM format.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:
    """Given a text string, produces a stream of audio bytes, in PCM format.

    Args:
        text: The text to convert to audio.

    Returns:
        An async iterator of audio bytes, in PCM format.
    """
    pass

StreamedTranscriptionSession

Bases: ABC

A streamed transcription of audio input.

ソースコード位置: src/agents/voice/model.py
class StreamedTranscriptionSession(abc.ABC):
    """A streamed transcription of audio input."""

    @abc.abstractmethod
    def transcribe_turns(self) -> AsyncIterator[str]:
        """Yields a stream of text transcriptions. Each transcription is a turn in the conversation.

        This method is expected to return only after `close()` is called.
        """
        pass

    @abc.abstractmethod
    async def close(self) -> None:
        """Closes the session."""
        pass

transcribe_turns abstractmethod

transcribe_turns() -> AsyncIterator[str]

Yields a stream of text transcriptions. Each transcription is a turn in the conversation.

This method is expected to return only after close() is called.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
def transcribe_turns(self) -> AsyncIterator[str]:
    """Yields a stream of text transcriptions. Each transcription is a turn in the conversation.

    This method is expected to return only after `close()` is called.
    """
    pass

close abstractmethod async

close() -> None

Closes the session.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
async def close(self) -> None:
    """Closes the session."""
    pass

STTModelSettings dataclass

Settings for a speech-to-text model.

ソースコード位置: src/agents/voice/model.py
@dataclass
class STTModelSettings:
    """Settings for a speech-to-text model."""

    prompt: str | None = None
    """Instructions for the model to follow."""

    language: str | None = None
    """The language of the audio input."""

    temperature: float | None = None
    """The temperature of the model."""

    turn_detection: dict[str, Any] | None = None
    """The turn detection settings for the model when using streamed audio input."""

prompt class-attribute instance-attribute

prompt: str | None = None

Instructions for the model to follow.

language class-attribute instance-attribute

language: str | None = None

The language of the audio input.

temperature class-attribute instance-attribute

temperature: float | None = None

The temperature of the model.

turn_detection class-attribute instance-attribute

turn_detection: dict[str, Any] | None = None

The turn detection settings for the model when using streamed audio input.

STTModel

Bases: ABC

A speech-to-text model that can convert audio input into text.

ソースコード位置: src/agents/voice/model.py
class STTModel(abc.ABC):
    """A speech-to-text model that can convert audio input into text."""

    @property
    @abc.abstractmethod
    def model_name(self) -> str:
        """The name of the STT model."""
        pass

    @abc.abstractmethod
    async def transcribe(
        self,
        input: AudioInput,
        settings: STTModelSettings,
        trace_include_sensitive_data: bool,
        trace_include_sensitive_audio_data: bool,
    ) -> str:
        """Given an audio input, produces a text transcription.

        Args:
            input: The audio input to transcribe.
            settings: The settings to use for the transcription.
            trace_include_sensitive_data: Whether to include sensitive data in traces.
            trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.

        Returns:
            The text transcription of the audio input.
        """
        pass

    @abc.abstractmethod
    async def create_session(
        self,
        input: StreamedAudioInput,
        settings: STTModelSettings,
        trace_include_sensitive_data: bool,
        trace_include_sensitive_audio_data: bool,
    ) -> StreamedTranscriptionSession:
        """Creates a new transcription session, which you can push audio to, and receive a stream
        of text transcriptions.

        Args:
            input: The audio input to transcribe.
            settings: The settings to use for the transcription.
            trace_include_sensitive_data: Whether to include sensitive data in traces.
            trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.

        Returns:
            A new transcription session.
        """
        pass

model_name abstractmethod property

model_name: str

The name of the STT model.

transcribe abstractmethod async

transcribe(
    input: AudioInput,
    settings: STTModelSettings,
    trace_include_sensitive_data: bool,
    trace_include_sensitive_audio_data: bool,
) -> str

Given an audio input, produces a text transcription.

引数:

名前 タイプ デスクリプション デフォルト
input AudioInput

The audio input to transcribe.

必須
settings STTModelSettings

The settings to use for the transcription.

必須
trace_include_sensitive_data bool

Whether to include sensitive data in traces.

必須
trace_include_sensitive_audio_data bool

Whether to include sensitive audio data in traces.

必須

戻り値:

タイプ デスクリプション
str

The text transcription of the audio input.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
async def transcribe(
    self,
    input: AudioInput,
    settings: STTModelSettings,
    trace_include_sensitive_data: bool,
    trace_include_sensitive_audio_data: bool,
) -> str:
    """Given an audio input, produces a text transcription.

    Args:
        input: The audio input to transcribe.
        settings: The settings to use for the transcription.
        trace_include_sensitive_data: Whether to include sensitive data in traces.
        trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.

    Returns:
        The text transcription of the audio input.
    """
    pass

create_session abstractmethod async

create_session(
    input: StreamedAudioInput,
    settings: STTModelSettings,
    trace_include_sensitive_data: bool,
    trace_include_sensitive_audio_data: bool,
) -> StreamedTranscriptionSession

Creates a new transcription session, which you can push audio to, and receive a stream of text transcriptions.

引数:

名前 タイプ デスクリプション デフォルト
input StreamedAudioInput

The audio input to transcribe.

必須
settings STTModelSettings

The settings to use for the transcription.

必須
trace_include_sensitive_data bool

Whether to include sensitive data in traces.

必須
trace_include_sensitive_audio_data bool

Whether to include sensitive audio data in traces.

必須

戻り値:

タイプ デスクリプション
StreamedTranscriptionSession

A new transcription session.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
async def create_session(
    self,
    input: StreamedAudioInput,
    settings: STTModelSettings,
    trace_include_sensitive_data: bool,
    trace_include_sensitive_audio_data: bool,
) -> StreamedTranscriptionSession:
    """Creates a new transcription session, which you can push audio to, and receive a stream
    of text transcriptions.

    Args:
        input: The audio input to transcribe.
        settings: The settings to use for the transcription.
        trace_include_sensitive_data: Whether to include sensitive data in traces.
        trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.

    Returns:
        A new transcription session.
    """
    pass

VoiceModelProvider

Bases: ABC

The base interface for a voice model provider.

A model provider is responsible for creating speech-to-text and text-to-speech models, given a name.

ソースコード位置: src/agents/voice/model.py
class VoiceModelProvider(abc.ABC):
    """The base interface for a voice model provider.

    A model provider is responsible for creating speech-to-text and text-to-speech models, given a
    name.
    """

    @abc.abstractmethod
    def get_stt_model(self, model_name: str | None) -> STTModel:
        """Get a speech-to-text model by name.

        Args:
            model_name: The name of the model to get.

        Returns:
            The speech-to-text model.
        """
        pass

    @abc.abstractmethod
    def get_tts_model(self, model_name: str | None) -> TTSModel:
        """Get a text-to-speech model by name."""

get_stt_model abstractmethod

get_stt_model(model_name: str | None) -> STTModel

Get a speech-to-text model by name.

引数:

名前 タイプ デスクリプション デフォルト
model_name str | None

The name of the model to get.

必須

戻り値:

タイプ デスクリプション
STTModel

The speech-to-text model.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
def get_stt_model(self, model_name: str | None) -> STTModel:
    """Get a speech-to-text model by name.

    Args:
        model_name: The name of the model to get.

    Returns:
        The speech-to-text model.
    """
    pass

get_tts_model abstractmethod

get_tts_model(model_name: str | None) -> TTSModel

Get a text-to-speech model by name.

ソースコード位置: src/agents/voice/model.py
@abc.abstractmethod
def get_tts_model(self, model_name: str | None) -> TTSModel:
    """Get a text-to-speech model by name."""