code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
async def get_signed_url(
self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
) -> ConversationSignedUrlResponseModel:
"""
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
ConversationSignedUrlResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.get_signed_url(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get_signed_url(agent_id=agent_id, request_options=request_options)
return _response.data
|
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
ConversationSignedUrlResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.get_signed_url(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
|
get_signed_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py
|
MIT
|
async def list(
self,
*,
cursor: typing.Optional[str] = None,
agent_id: typing.Optional[str] = None,
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetConversationsPageResponseModel:
"""
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationsPageResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.list()
asyncio.run(main())
"""
_response = await self._raw_client.list(
cursor=cursor,
agent_id=agent_id,
call_successful=call_successful,
call_start_before_unix=call_start_before_unix,
call_start_after_unix=call_start_after_unix,
page_size=page_size,
request_options=request_options,
)
return _response.data
|
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationsPageResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.list()
asyncio.run(main())
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py
|
MIT
|
async def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetConversationResponseModel:
"""
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.get(
conversation_id="123",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(conversation_id, request_options=request_options)
return _response.data
|
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.get(
conversation_id="123",
)
asyncio.run(main())
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py
|
MIT
|
async def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
"""
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.delete(
conversation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.delete(conversation_id, request_options=request_options)
return _response.data
|
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.delete(
conversation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py
|
MIT
|
def get_signed_url(
self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[ConversationSignedUrlResponseModel]:
"""
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[ConversationSignedUrlResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/conversation/get-signed-url",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"agent_id": agent_id,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
ConversationSignedUrlResponseModel,
construct_type(
type_=ConversationSignedUrlResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[ConversationSignedUrlResponseModel]
Successful Response
|
get_signed_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
def list(
self,
*,
cursor: typing.Optional[str] = None,
agent_id: typing.Optional[str] = None,
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetConversationsPageResponseModel]:
"""
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConversationsPageResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/conversations",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"agent_id": agent_id,
"call_successful": call_successful,
"call_start_before_unix": call_start_before_unix,
"call_start_after_unix": call_start_after_unix,
"page_size": page_size,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConversationsPageResponseModel,
construct_type(
type_=GetConversationsPageResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConversationsPageResponseModel]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetConversationResponseModel]:
"""
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConversationResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConversationResponseModel,
construct_type(
type_=GetConversationResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConversationResponseModel]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[typing.Optional[typing.Any]]:
"""
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
async def get_signed_url(
self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[ConversationSignedUrlResponseModel]:
"""
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[ConversationSignedUrlResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/conversation/get-signed-url",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"agent_id": agent_id,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
ConversationSignedUrlResponseModel,
construct_type(
type_=ConversationSignedUrlResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[ConversationSignedUrlResponseModel]
Successful Response
|
get_signed_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
async def list(
self,
*,
cursor: typing.Optional[str] = None,
agent_id: typing.Optional[str] = None,
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetConversationsPageResponseModel]:
"""
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConversationsPageResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/conversations",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"agent_id": agent_id,
"call_successful": call_successful,
"call_start_before_unix": call_start_before_unix,
"call_start_after_unix": call_start_after_unix,
"page_size": page_size,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConversationsPageResponseModel,
construct_type(
type_=GetConversationsPageResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConversationsPageResponseModel]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
async def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[GetConversationResponseModel]:
"""
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConversationResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConversationResponseModel,
construct_type(
type_=GetConversationResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConversationResponseModel]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
async def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
"""
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/raw_client.py
|
MIT
|
def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Iterator[bytes]:
"""
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[bytes]
Successful Response
"""
with self._raw_client.get(conversation_id, request_options=request_options) as r:
yield from r.data
|
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[bytes]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/audio/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/audio/client.py
|
MIT
|
async def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.AsyncIterator[bytes]:
"""
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[bytes]
Successful Response
"""
async with self._raw_client.get(conversation_id, request_options=request_options) as r:
async for _chunk in r.data:
yield _chunk
|
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[bytes]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/audio/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/audio/client.py
|
MIT
|
def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Iterator[HttpResponse[typing.Iterator[bytes]]]:
"""
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
Successful Response
"""
with self._client_wrapper.httpx_client.stream(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
) as _response:
def _stream() -> HttpResponse[typing.Iterator[bytes]]:
try:
if 200 <= _response.status_code < 300:
_chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
return HttpResponse(
response=_response, data=(_chunk for _chunk in _response.iter_bytes(chunk_size=_chunk_size))
)
_response.read()
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield _stream()
|
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.Iterator[HttpResponse[typing.Iterator[bytes]]]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/audio/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/audio/raw_client.py
|
MIT
|
async def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]:
"""
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
Successful Response
"""
async with self._client_wrapper.httpx_client.stream(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/audio",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
) as _response:
async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
try:
if 200 <= _response.status_code < 300:
_chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
return AsyncHttpResponse(
response=_response,
data=(_chunk async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size)),
)
await _response.aread()
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(
status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield await _stream()
|
Get the audio recording of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
Returns
-------
typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[bytes]]]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/audio/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/audio/raw_client.py
|
MIT
|
def create(
self,
conversation_id: str,
*,
feedback: UserFeedbackScore,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.feedback.create(
conversation_id="21m00Tcm4TlvDq8ikWAM",
feedback="like",
)
"""
_response = self._raw_client.create(conversation_id, feedback=feedback, request_options=request_options)
return _response.data
|
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.feedback.create(
conversation_id="21m00Tcm4TlvDq8ikWAM",
feedback="like",
)
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/feedback/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/feedback/client.py
|
MIT
|
async def create(
self,
conversation_id: str,
*,
feedback: UserFeedbackScore,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.feedback.create(
conversation_id="21m00Tcm4TlvDq8ikWAM",
feedback="like",
)
asyncio.run(main())
"""
_response = await self._raw_client.create(conversation_id, feedback=feedback, request_options=request_options)
return _response.data
|
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.conversations.feedback.create(
conversation_id="21m00Tcm4TlvDq8ikWAM",
feedback="like",
)
asyncio.run(main())
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/feedback/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/feedback/client.py
|
MIT
|
def create(
self,
conversation_id: str,
*,
feedback: UserFeedbackScore,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[typing.Optional[typing.Any]]:
"""
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/feedback",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"feedback": feedback,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
|
MIT
|
async def create(
self,
conversation_id: str,
*,
feedback: UserFeedbackScore,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
"""
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/conversations/{jsonable_encoder(conversation_id)}/feedback",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"feedback": feedback,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Send the feedback for the given conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
feedback : UserFeedbackScore
Either 'like' or 'dislike' to indicate the feedback for the conversation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/feedback/raw_client.py
|
MIT
|
def get(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> GetConvAiDashboardSettingsResponseModel:
"""
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.dashboard.settings.get()
"""
_response = self._raw_client.get(request_options=request_options)
return _response.data
|
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.dashboard.settings.get()
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
MIT
|
def update(
self,
*,
charts: typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetConvAiDashboardSettingsResponseModel:
"""
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.dashboard.settings.update()
"""
_response = self._raw_client.update(charts=charts, request_options=request_options)
return _response.data
|
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.dashboard.settings.update()
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
MIT
|
async def get(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> GetConvAiDashboardSettingsResponseModel:
"""
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.dashboard.settings.get()
asyncio.run(main())
"""
_response = await self._raw_client.get(request_options=request_options)
return _response.data
|
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.dashboard.settings.get()
asyncio.run(main())
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
MIT
|
async def update(
self,
*,
charts: typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetConvAiDashboardSettingsResponseModel:
"""
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.dashboard.settings.update()
asyncio.run(main())
"""
_response = await self._raw_client.update(charts=charts, request_options=request_options)
return _response.data
|
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConvAiDashboardSettingsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.dashboard.settings.update()
asyncio.run(main())
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/client.py
|
MIT
|
def get(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetConvAiDashboardSettingsResponseModel]:
"""
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/settings/dashboard",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConvAiDashboardSettingsResponseModel,
construct_type(
type_=GetConvAiDashboardSettingsResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
MIT
|
def update(
self,
*,
charts: typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetConvAiDashboardSettingsResponseModel]:
"""
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/settings/dashboard",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"charts": convert_and_respect_annotation_metadata(
object_=charts,
annotation=typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem],
direction="write",
),
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConvAiDashboardSettingsResponseModel,
construct_type(
type_=GetConvAiDashboardSettingsResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
MIT
|
async def get(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[GetConvAiDashboardSettingsResponseModel]:
"""
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/settings/dashboard",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConvAiDashboardSettingsResponseModel,
construct_type(
type_=GetConvAiDashboardSettingsResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Retrieve Convai dashboard settings for the workspace
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
MIT
|
async def update(
self,
*,
charts: typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetConvAiDashboardSettingsResponseModel]:
"""
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/settings/dashboard",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"charts": convert_and_respect_annotation_metadata(
object_=charts,
annotation=typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem],
direction="write",
),
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetConvAiDashboardSettingsResponseModel,
construct_type(
type_=GetConvAiDashboardSettingsResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Update Convai dashboard settings for the workspace
Parameters
----------
charts : typing.Optional[typing.Sequence[PatchConvAiDashboardSettingsRequestChartsItem]]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetConvAiDashboardSettingsResponseModel]
Successful Response
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/dashboard/settings/raw_client.py
|
MIT
|
def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
show_only_owned_documents: typing.Optional[bool] = None,
types: typing.Optional[
typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]
] = None,
use_typesense: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetKnowledgeBaseListResponseModel:
"""
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseListResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.list()
"""
_response = self._raw_client.list(
cursor=cursor,
page_size=page_size,
search=search,
show_only_owned_documents=show_only_owned_documents,
types=types,
use_typesense=use_typesense,
request_options=request_options,
)
return _response.data
|
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseListResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.list()
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/client.py
|
MIT
|
async def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
show_only_owned_documents: typing.Optional[bool] = None,
types: typing.Optional[
typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]
] = None,
use_typesense: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetKnowledgeBaseListResponseModel:
"""
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseListResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.list()
asyncio.run(main())
"""
_response = await self._raw_client.list(
cursor=cursor,
page_size=page_size,
search=search,
show_only_owned_documents=show_only_owned_documents,
types=types,
use_typesense=use_typesense,
request_options=request_options,
)
return _response.data
|
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseListResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.list()
asyncio.run(main())
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/client.py
|
MIT
|
def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
show_only_owned_documents: typing.Optional[bool] = None,
types: typing.Optional[
typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]
] = None,
use_typesense: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetKnowledgeBaseListResponseModel]:
"""
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetKnowledgeBaseListResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"page_size": page_size,
"search": search,
"show_only_owned_documents": show_only_owned_documents,
"types": types,
"use_typesense": use_typesense,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetKnowledgeBaseListResponseModel,
construct_type(
type_=GetKnowledgeBaseListResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetKnowledgeBaseListResponseModel]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/raw_client.py
|
MIT
|
async def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
show_only_owned_documents: typing.Optional[bool] = None,
types: typing.Optional[
typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]
] = None,
use_typesense: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetKnowledgeBaseListResponseModel]:
"""
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetKnowledgeBaseListResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"page_size": page_size,
"search": search,
"show_only_owned_documents": show_only_owned_documents,
"types": types,
"use_typesense": use_typesense,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetKnowledgeBaseListResponseModel,
construct_type(
type_=GetKnowledgeBaseListResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get a list of available knowledge base documents
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
If specified, the endpoint returns only such knowledge base documents whose names start with this string.
show_only_owned_documents : typing.Optional[bool]
If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
types : typing.Optional[typing.Union[KnowledgeBaseDocumentType, typing.Sequence[KnowledgeBaseDocumentType]]]
If present, the endpoint will return only documents of the given types.
use_typesense : typing.Optional[bool]
If set to true, the endpoint will use typesense DB to search for the documents).
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetKnowledgeBaseListResponseModel]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/raw_client.py
|
MIT
|
def compute_rag_index(
self,
documentation_id: str,
*,
model: EmbeddingModelEnum,
request_options: typing.Optional[RequestOptions] = None,
) -> RagDocumentIndexResponseModel:
"""
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.document.compute_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
model="e5_mistral_7b_instruct",
)
"""
_response = self._raw_client.compute_rag_index(documentation_id, model=model, request_options=request_options)
return _response.data
|
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.document.compute_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
model="e5_mistral_7b_instruct",
)
|
compute_rag_index
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/document/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/document/client.py
|
MIT
|
async def compute_rag_index(
self,
documentation_id: str,
*,
model: EmbeddingModelEnum,
request_options: typing.Optional[RequestOptions] = None,
) -> RagDocumentIndexResponseModel:
"""
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.document.compute_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
model="e5_mistral_7b_instruct",
)
asyncio.run(main())
"""
_response = await self._raw_client.compute_rag_index(
documentation_id, model=model, request_options=request_options
)
return _response.data
|
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.document.compute_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
model="e5_mistral_7b_instruct",
)
asyncio.run(main())
|
compute_rag_index
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/document/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/document/client.py
|
MIT
|
def compute_rag_index(
self,
documentation_id: str,
*,
model: EmbeddingModelEnum,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RagDocumentIndexResponseModel]:
"""
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagDocumentIndexResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"model": model,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagDocumentIndexResponseModel,
construct_type(
type_=RagDocumentIndexResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagDocumentIndexResponseModel]
Successful Response
|
compute_rag_index
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/document/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/document/raw_client.py
|
MIT
|
async def compute_rag_index(
self,
documentation_id: str,
*,
model: EmbeddingModelEnum,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RagDocumentIndexResponseModel]:
"""
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagDocumentIndexResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"model": model,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagDocumentIndexResponseModel,
construct_type(
type_=RagDocumentIndexResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
model : EmbeddingModelEnum
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagDocumentIndexResponseModel]
Successful Response
|
compute_rag_index
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/document/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/document/raw_client.py
|
MIT
|
def create_from_url(
self, *, url: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> AddKnowledgeBaseResponseModel:
"""
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.create_from_url(
url="url",
)
"""
_response = self._raw_client.create_from_url(url=url, name=name, request_options=request_options)
return _response.data
|
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.create_from_url(
url="url",
)
|
create_from_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def create_from_file(
self,
*,
file: core.File,
name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddKnowledgeBaseResponseModel:
"""
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.create_from_file()
"""
_response = self._raw_client.create_from_file(file=file, name=name, request_options=request_options)
return _response.data
|
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.create_from_file()
|
create_from_file
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def create_from_text(
self, *, text: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> AddKnowledgeBaseResponseModel:
"""
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.create_from_text(
text="text",
)
"""
_response = self._raw_client.create_from_text(text=text, name=name, request_options=request_options)
return _response.data
|
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.create_from_text(
text="text",
)
|
create_from_text
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def get(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> DocumentsGetResponse:
"""
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsGetResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get(documentation_id, request_options=request_options)
return _response.data
|
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsGetResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def delete(
self,
documentation_id: str,
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.delete(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.delete(documentation_id, force=force, request_options=request_options)
return _response.data
|
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.delete(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def update(
self, documentation_id: str, *, name: str, request_options: typing.Optional[RequestOptions] = None
) -> DocumentsUpdateResponse:
"""
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsUpdateResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.update(
documentation_id="21m00Tcm4TlvDq8ikWAM",
name="name",
)
"""
_response = self._raw_client.update(documentation_id, name=name, request_options=request_options)
return _response.data
|
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsUpdateResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.update(
documentation_id="21m00Tcm4TlvDq8ikWAM",
name="name",
)
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def get_agents(
self,
documentation_id: str,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetKnowledgeBaseDependentAgentsResponseModel:
"""
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseDependentAgentsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.get_agents(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get_agents(
documentation_id, cursor=cursor, page_size=page_size, request_options=request_options
)
return _response.data
|
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseDependentAgentsResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.get_agents(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
|
get_agents
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def create_from_url(
self, *, url: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> AddKnowledgeBaseResponseModel:
"""
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.create_from_url(
url="url",
)
asyncio.run(main())
"""
_response = await self._raw_client.create_from_url(url=url, name=name, request_options=request_options)
return _response.data
|
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.create_from_url(
url="url",
)
asyncio.run(main())
|
create_from_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def create_from_file(
self,
*,
file: core.File,
name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddKnowledgeBaseResponseModel:
"""
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.create_from_file()
asyncio.run(main())
"""
_response = await self._raw_client.create_from_file(file=file, name=name, request_options=request_options)
return _response.data
|
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.create_from_file()
asyncio.run(main())
|
create_from_file
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def create_from_text(
self, *, text: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> AddKnowledgeBaseResponseModel:
"""
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.create_from_text(
text="text",
)
asyncio.run(main())
"""
_response = await self._raw_client.create_from_text(text=text, name=name, request_options=request_options)
return _response.data
|
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.create_from_text(
text="text",
)
asyncio.run(main())
|
create_from_text
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def get(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> DocumentsGetResponse:
"""
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsGetResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(documentation_id, request_options=request_options)
return _response.data
|
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsGetResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def delete(
self,
documentation_id: str,
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.delete(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.delete(documentation_id, force=force, request_options=request_options)
return _response.data
|
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.delete(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def update(
self, documentation_id: str, *, name: str, request_options: typing.Optional[RequestOptions] = None
) -> DocumentsUpdateResponse:
"""
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsUpdateResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.update(
documentation_id="21m00Tcm4TlvDq8ikWAM",
name="name",
)
asyncio.run(main())
"""
_response = await self._raw_client.update(documentation_id, name=name, request_options=request_options)
return _response.data
|
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
DocumentsUpdateResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.update(
documentation_id="21m00Tcm4TlvDq8ikWAM",
name="name",
)
asyncio.run(main())
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def get_agents(
self,
documentation_id: str,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetKnowledgeBaseDependentAgentsResponseModel:
"""
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseDependentAgentsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.get_agents(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get_agents(
documentation_id, cursor=cursor, page_size=page_size, request_options=request_options
)
return _response.data
|
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetKnowledgeBaseDependentAgentsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.get_agents(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
|
get_agents
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
async def get_content(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> None:
"""
Get the entire content of a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.get_content(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get_content(documentation_id, request_options=request_options)
return _response.data
|
Get the entire content of a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.get_content(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
|
get_content
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/client.py
|
MIT
|
def create_from_url(
self, *, url: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[AddKnowledgeBaseResponseModel]:
"""
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/url",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"url": url,
"name": name,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
|
create_from_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def create_from_file(
self,
*,
file: core.File,
name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AddKnowledgeBaseResponseModel]:
"""
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/file",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"name": name,
},
files={
"file": file,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
|
create_from_file
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def create_from_text(
self, *, text: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[AddKnowledgeBaseResponseModel]:
"""
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/text",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"text": text,
"name": name,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
|
create_from_text
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def get(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[DocumentsGetResponse]:
"""
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[DocumentsGetResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
DocumentsGetResponse,
construct_type(
type_=DocumentsGetResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[DocumentsGetResponse]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def delete(
self,
documentation_id: str,
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[typing.Optional[typing.Any]]:
"""
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
params={
"force": force,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def update(
self, documentation_id: str, *, name: str, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[DocumentsUpdateResponse]:
"""
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[DocumentsUpdateResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"name": name,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
DocumentsUpdateResponse,
construct_type(
type_=DocumentsUpdateResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[DocumentsUpdateResponse]
Successful Response
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def get_agents(
self,
documentation_id: str,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetKnowledgeBaseDependentAgentsResponseModel]:
"""
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetKnowledgeBaseDependentAgentsResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/dependent-agents",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"page_size": page_size,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetKnowledgeBaseDependentAgentsResponseModel,
construct_type(
type_=GetKnowledgeBaseDependentAgentsResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetKnowledgeBaseDependentAgentsResponseModel]
Successful Response
|
get_agents
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def get_content(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[None]:
"""
Get the entire content of a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[None]
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/content",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get the entire content of a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[None]
|
get_content
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def create_from_url(
self, *, url: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[AddKnowledgeBaseResponseModel]:
"""
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/url",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"url": url,
"name": name,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Create a knowledge base document generated by scraping the given webpage.
Parameters
----------
url : str
URL to a page of documentation that the agent will have access to in order to interact with users.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
|
create_from_url
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def create_from_file(
self,
*,
file: core.File,
name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AddKnowledgeBaseResponseModel]:
"""
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/file",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"name": name,
},
files={
"file": file,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Create a knowledge base document generated form the uploaded file.
Parameters
----------
file : core.File
See core.File for more documentation
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
|
create_from_file
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def create_from_text(
self, *, text: str, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[AddKnowledgeBaseResponseModel]:
"""
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/text",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"text": text,
"name": name,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Create a knowledge base document containing the provided text.
Parameters
----------
text : str
Text content to be added to the knowledge base.
name : typing.Optional[str]
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
|
create_from_text
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def get(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[DocumentsGetResponse]:
"""
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[DocumentsGetResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
DocumentsGetResponse,
construct_type(
type_=DocumentsGetResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get details about a specific documentation making up the agent's knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[DocumentsGetResponse]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def delete(
self,
documentation_id: str,
*,
force: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
"""
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
params={
"force": force,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Delete a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
force : typing.Optional[bool]
If set to true, the document will be deleted regardless of whether it is used by any agents and it will be deleted from the dependent agents.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def update(
self, documentation_id: str, *, name: str, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[DocumentsUpdateResponse]:
"""
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[DocumentsUpdateResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"name": name,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
DocumentsUpdateResponse,
construct_type(
type_=DocumentsUpdateResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Update the name of a document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
name : str
A custom, human-readable name for the document.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[DocumentsUpdateResponse]
Successful Response
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def get_agents(
self,
documentation_id: str,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetKnowledgeBaseDependentAgentsResponseModel]:
"""
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetKnowledgeBaseDependentAgentsResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/dependent-agents",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"page_size": page_size,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetKnowledgeBaseDependentAgentsResponseModel,
construct_type(
type_=GetKnowledgeBaseDependentAgentsResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get a list of agents depending on this knowledge base document
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetKnowledgeBaseDependentAgentsResponseModel]
Successful Response
|
get_agents
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
async def get_content(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[None]:
"""
Get the entire content of a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[None]
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/content",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get the entire content of a document from the knowledge base
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[None]
|
get_content
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/raw_client.py
|
MIT
|
def get(
self, documentation_id: str, chunk_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> KnowledgeBaseDocumentChunkResponseModel:
"""
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
KnowledgeBaseDocumentChunkResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.chunk.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
chunk_id="chunk_id",
)
"""
_response = self._raw_client.get(documentation_id, chunk_id, request_options=request_options)
return _response.data
|
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
KnowledgeBaseDocumentChunkResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.knowledge_base.documents.chunk.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
chunk_id="chunk_id",
)
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/client.py
|
MIT
|
async def get(
self, documentation_id: str, chunk_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> KnowledgeBaseDocumentChunkResponseModel:
"""
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
KnowledgeBaseDocumentChunkResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.chunk.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
chunk_id="chunk_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(documentation_id, chunk_id, request_options=request_options)
return _response.data
|
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
KnowledgeBaseDocumentChunkResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.knowledge_base.documents.chunk.get(
documentation_id="21m00Tcm4TlvDq8ikWAM",
chunk_id="chunk_id",
)
asyncio.run(main())
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/client.py
|
MIT
|
def get(
self, documentation_id: str, chunk_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[KnowledgeBaseDocumentChunkResponseModel]:
"""
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[KnowledgeBaseDocumentChunkResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/chunk/{jsonable_encoder(chunk_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
KnowledgeBaseDocumentChunkResponseModel,
construct_type(
type_=KnowledgeBaseDocumentChunkResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[KnowledgeBaseDocumentChunkResponseModel]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/raw_client.py
|
MIT
|
async def get(
self, documentation_id: str, chunk_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[KnowledgeBaseDocumentChunkResponseModel]:
"""
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[KnowledgeBaseDocumentChunkResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/chunk/{jsonable_encoder(chunk_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
KnowledgeBaseDocumentChunkResponseModel,
construct_type(
type_=KnowledgeBaseDocumentChunkResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get details about a specific documentation part used by RAG.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
chunk_id : str
The id of a document RAG chunk from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[KnowledgeBaseDocumentChunkResponseModel]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/knowledge_base/documents/chunk/raw_client.py
|
MIT
|
def calculate(
self,
*,
prompt_length: int,
number_of_pages: int,
rag_enabled: bool,
request_options: typing.Optional[RequestOptions] = None,
) -> LlmUsageCalculatorResponseModel:
"""
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.llm_usage.calculate(
prompt_length=1,
number_of_pages=1,
rag_enabled=True,
)
"""
_response = self._raw_client.calculate(
prompt_length=prompt_length,
number_of_pages=number_of_pages,
rag_enabled=rag_enabled,
request_options=request_options,
)
return _response.data
|
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.llm_usage.calculate(
prompt_length=1,
number_of_pages=1,
rag_enabled=True,
)
|
calculate
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/llm_usage/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/llm_usage/client.py
|
MIT
|
async def calculate(
self,
*,
prompt_length: int,
number_of_pages: int,
rag_enabled: bool,
request_options: typing.Optional[RequestOptions] = None,
) -> LlmUsageCalculatorResponseModel:
"""
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.llm_usage.calculate(
prompt_length=1,
number_of_pages=1,
rag_enabled=True,
)
asyncio.run(main())
"""
_response = await self._raw_client.calculate(
prompt_length=prompt_length,
number_of_pages=number_of_pages,
rag_enabled=rag_enabled,
request_options=request_options,
)
return _response.data
|
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.llm_usage.calculate(
prompt_length=1,
number_of_pages=1,
rag_enabled=True,
)
asyncio.run(main())
|
calculate
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/llm_usage/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/llm_usage/client.py
|
MIT
|
def calculate(
self,
*,
prompt_length: int,
number_of_pages: int,
rag_enabled: bool,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[LlmUsageCalculatorResponseModel]:
"""
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/llm-usage/calculate",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"prompt_length": prompt_length,
"number_of_pages": number_of_pages,
"rag_enabled": rag_enabled,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
LlmUsageCalculatorResponseModel,
construct_type(
type_=LlmUsageCalculatorResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
|
calculate
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/llm_usage/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/llm_usage/raw_client.py
|
MIT
|
async def calculate(
self,
*,
prompt_length: int,
number_of_pages: int,
rag_enabled: bool,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[LlmUsageCalculatorResponseModel]:
"""
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/llm-usage/calculate",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"prompt_length": prompt_length,
"number_of_pages": number_of_pages,
"rag_enabled": rag_enabled,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
LlmUsageCalculatorResponseModel,
construct_type(
type_=LlmUsageCalculatorResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Returns a list of LLM models and the expected cost for using them based on the provided values.
Parameters
----------
prompt_length : int
Length of the prompt in characters.
number_of_pages : int
Pages of content in PDF documents or URLs in the agent's knowledge base.
rag_enabled : bool
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
|
calculate
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/llm_usage/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/llm_usage/raw_client.py
|
MIT
|
def create(
self, *, request: PhoneNumbersCreateRequestBody, request_options: typing.Optional[RequestOptions] = None
) -> CreatePhoneNumberResponseModel:
"""
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreatePhoneNumberResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
from elevenlabs.conversational_ai.phone_numbers import (
PhoneNumbersCreateRequestBody_Twilio,
)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.create(
request=PhoneNumbersCreateRequestBody_Twilio(
phone_number="phone_number",
label="label",
sid="sid",
token="token",
),
)
"""
_response = self._raw_client.create(request=request, request_options=request_options)
return _response.data
|
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreatePhoneNumberResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
from elevenlabs.conversational_ai.phone_numbers import (
PhoneNumbersCreateRequestBody_Twilio,
)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.create(
request=PhoneNumbersCreateRequestBody_Twilio(
phone_number="phone_number",
label="label",
sid="sid",
token="token",
),
)
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
def get(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> PhoneNumbersGetResponse:
"""
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersGetResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.get(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
"""
_response = self._raw_client.get(phone_number_id, request_options=request_options)
return _response.data
|
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersGetResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.get(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
"""
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.delete(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
"""
_response = self._raw_client.delete(phone_number_id, request_options=request_options)
return _response.data
|
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.delete(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
def update(
self,
phone_number_id: str,
*,
agent_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> PhoneNumbersUpdateResponse:
"""
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersUpdateResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.update(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
"""
_response = self._raw_client.update(phone_number_id, agent_id=agent_id, request_options=request_options)
return _response.data
|
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersUpdateResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.update(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
def list(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.List[PhoneNumbersListResponseItem]:
"""
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.List[PhoneNumbersListResponseItem]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.list()
"""
_response = self._raw_client.list(request_options=request_options)
return _response.data
|
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.List[PhoneNumbersListResponseItem]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.phone_numbers.list()
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
async def create(
self, *, request: PhoneNumbersCreateRequestBody, request_options: typing.Optional[RequestOptions] = None
) -> CreatePhoneNumberResponseModel:
"""
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreatePhoneNumberResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
from elevenlabs.conversational_ai.phone_numbers import (
PhoneNumbersCreateRequestBody_Twilio,
)
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.create(
request=PhoneNumbersCreateRequestBody_Twilio(
phone_number="phone_number",
label="label",
sid="sid",
token="token",
),
)
asyncio.run(main())
"""
_response = await self._raw_client.create(request=request, request_options=request_options)
return _response.data
|
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreatePhoneNumberResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
from elevenlabs.conversational_ai.phone_numbers import (
PhoneNumbersCreateRequestBody_Twilio,
)
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.create(
request=PhoneNumbersCreateRequestBody_Twilio(
phone_number="phone_number",
label="label",
sid="sid",
token="token",
),
)
asyncio.run(main())
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
async def get(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> PhoneNumbersGetResponse:
"""
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersGetResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.get(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(phone_number_id, request_options=request_options)
return _response.data
|
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersGetResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.get(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
asyncio.run(main())
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
async def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
"""
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.delete(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
asyncio.run(main())
"""
_response = await self._raw_client.delete(phone_number_id, request_options=request_options)
return _response.data
|
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.delete(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
asyncio.run(main())
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
async def update(
self,
phone_number_id: str,
*,
agent_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> PhoneNumbersUpdateResponse:
"""
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersUpdateResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.update(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
asyncio.run(main())
"""
_response = await self._raw_client.update(phone_number_id, agent_id=agent_id, request_options=request_options)
return _response.data
|
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PhoneNumbersUpdateResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.update(
phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
asyncio.run(main())
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
async def list(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.List[PhoneNumbersListResponseItem]:
"""
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.List[PhoneNumbersListResponseItem]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.list()
asyncio.run(main())
"""
_response = await self._raw_client.list(request_options=request_options)
return _response.data
|
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.List[PhoneNumbersListResponseItem]
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.phone_numbers.list()
asyncio.run(main())
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/client.py
|
MIT
|
def create(
self, *, request: PhoneNumbersCreateRequestBody, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[CreatePhoneNumberResponseModel]:
"""
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[CreatePhoneNumberResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/phone-numbers/create",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json=convert_and_respect_annotation_metadata(
object_=request, annotation=PhoneNumbersCreateRequestBody, direction="write"
),
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
CreatePhoneNumberResponseModel,
construct_type(
type_=CreatePhoneNumberResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[CreatePhoneNumberResponseModel]
Successful Response
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
def get(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[PhoneNumbersGetResponse]:
"""
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PhoneNumbersGetResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PhoneNumbersGetResponse,
construct_type(
type_=PhoneNumbersGetResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PhoneNumbersGetResponse]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[typing.Optional[typing.Any]]:
"""
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.Optional[typing.Any]]
Successful Response
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
def update(
self,
phone_number_id: str,
*,
agent_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[PhoneNumbersUpdateResponse]:
"""
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PhoneNumbersUpdateResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"agent_id": agent_id,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PhoneNumbersUpdateResponse,
construct_type(
type_=PhoneNumbersUpdateResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PhoneNumbersUpdateResponse]
Successful Response
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
def list(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[typing.List[PhoneNumbersListResponseItem]]:
"""
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.List[PhoneNumbersListResponseItem]]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/phone-numbers/",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.List[PhoneNumbersListResponseItem],
construct_type(
type_=typing.List[PhoneNumbersListResponseItem], # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[typing.List[PhoneNumbersListResponseItem]]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
async def create(
self, *, request: PhoneNumbersCreateRequestBody, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[CreatePhoneNumberResponseModel]:
"""
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[CreatePhoneNumberResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/phone-numbers/create",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json=convert_and_respect_annotation_metadata(
object_=request, annotation=PhoneNumbersCreateRequestBody, direction="write"
),
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
CreatePhoneNumberResponseModel,
construct_type(
type_=CreatePhoneNumberResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Import Phone Number from provider configuration (Twilio or SIP trunk)
Parameters
----------
request : PhoneNumbersCreateRequestBody
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[CreatePhoneNumberResponseModel]
Successful Response
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
async def get(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[PhoneNumbersGetResponse]:
"""
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PhoneNumbersGetResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PhoneNumbersGetResponse,
construct_type(
type_=PhoneNumbersGetResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Retrieve Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PhoneNumbersGetResponse]
Successful Response
|
get
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
async def delete(
self, phone_number_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[typing.Optional[typing.Any]]:
"""
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.Optional[typing.Any],
construct_type(
type_=typing.Optional[typing.Any], # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Delete Phone Number by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.Optional[typing.Any]]
Successful Response
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
async def update(
self,
phone_number_id: str,
*,
agent_id: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[PhoneNumbersUpdateResponse]:
"""
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PhoneNumbersUpdateResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"agent_id": agent_id,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PhoneNumbersUpdateResponse,
construct_type(
type_=PhoneNumbersUpdateResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Update Phone Number details by ID
Parameters
----------
phone_number_id : str
The id of an agent. This is returned on agent creation.
agent_id : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PhoneNumbersUpdateResponse]
Successful Response
|
update
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
async def list(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[typing.List[PhoneNumbersListResponseItem]]:
"""
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.List[PhoneNumbersListResponseItem]]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/phone-numbers/",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
typing.List[PhoneNumbersListResponseItem],
construct_type(
type_=typing.List[PhoneNumbersListResponseItem], # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Retrieve all Phone Numbers
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[typing.List[PhoneNumbersListResponseItem]]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/phone_numbers/raw_client.py
|
MIT
|
def create(
self, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
) -> PostWorkspaceSecretResponseModel:
"""
Create a new secret for the workspace
Parameters
----------
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.secrets.create(
name="name",
value="value",
)
"""
_response = self._raw_client.create(name=name, value=value, request_options=request_options)
return _response.data
|
Create a new secret for the workspace
Parameters
----------
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.secrets.create(
name="name",
value="value",
)
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/secrets/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/secrets/client.py
|
MIT
|
async def list(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> GetWorkspaceSecretsResponseModel:
"""
Get all workspace secrets for the user
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetWorkspaceSecretsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.secrets.list()
asyncio.run(main())
"""
_response = await self._raw_client.list(request_options=request_options)
return _response.data
|
Get all workspace secrets for the user
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetWorkspaceSecretsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.secrets.list()
asyncio.run(main())
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/secrets/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/secrets/client.py
|
MIT
|
async def create(
self, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
) -> PostWorkspaceSecretResponseModel:
"""
Create a new secret for the workspace
Parameters
----------
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.secrets.create(
name="name",
value="value",
)
asyncio.run(main())
"""
_response = await self._raw_client.create(name=name, value=value, request_options=request_options)
return _response.data
|
Create a new secret for the workspace
Parameters
----------
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.secrets.create(
name="name",
value="value",
)
asyncio.run(main())
|
create
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/secrets/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/secrets/client.py
|
MIT
|
async def delete(self, secret_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
Delete a workspace secret if it's not in use
Parameters
----------
secret_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.secrets.delete(
secret_id="secret_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.delete(secret_id, request_options=request_options)
return _response.data
|
Delete a workspace secret if it's not in use
Parameters
----------
secret_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.secrets.delete(
secret_id="secret_id",
)
asyncio.run(main())
|
delete
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/secrets/client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/secrets/client.py
|
MIT
|
def list(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetWorkspaceSecretsResponseModel]:
"""
Get all workspace secrets for the user
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetWorkspaceSecretsResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/secrets",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetWorkspaceSecretsResponseModel,
construct_type(
type_=GetWorkspaceSecretsResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
|
Get all workspace secrets for the user
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetWorkspaceSecretsResponseModel]
Successful Response
|
list
|
python
|
elevenlabs/elevenlabs-python
|
src/elevenlabs/conversational_ai/secrets/raw_client.py
|
https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/secrets/raw_client.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.