Check out our upcoming events and meetups! View events →
Updates the configuration of a specific assistant version. Can not update main version
import Telnyx from 'telnyx';
const client = new Telnyx({
apiKey: process.env['TELNYX_API_KEY'], // This is the default and can be omitted
});
const inferenceEmbedding = await client.ai.assistants.versions.update('version_id', {
assistant_id: 'assistant_id',
});
console.log(inferenceEmbedding.id);{
"id": "<string>",
"name": "<string>",
"created_at": "2023-11-07T05:31:56Z",
"model": "<string>",
"instructions": "<string>",
"version_id": "<string>",
"version_created_at": "2023-11-07T05:31:56Z",
"description": "<string>",
"tools": [
{
"type": "webhook",
"webhook": {
"name": "<string>",
"description": "<string>",
"url": "https://example.com/api/v1/function",
"method": "POST",
"headers": [
{
"name": "<string>",
"value": "<string>"
}
],
"body_parameters": {
"properties": {
"age": {
"description": "The age of the customer.",
"type": "integer"
},
"location": {
"description": "The location of the customer.",
"type": "string"
}
},
"required": [
"age",
"location"
],
"type": "object"
},
"path_parameters": {
"properties": {
"id": {
"description": "The id of the customer.",
"type": "string"
}
},
"required": [
"id"
],
"type": "object"
},
"query_parameters": {
"properties": {
"page": {
"description": "The page number.",
"type": "integer"
}
},
"required": [
"page"
],
"type": "object"
},
"async": false,
"timeout_ms": 5250,
"store_fields_as_variables": [
{
"name": "<string>",
"value_path": "<string>"
}
]
}
}
],
"mcp_servers": [],
"greeting": "<string>",
"llm_api_key_ref": "<string>",
"external_llm": {
"model": "<string>",
"base_url": "<string>",
"llm_api_key_ref": "<string>",
"authentication_method": "token",
"certificate_ref": "<string>",
"token_retrieval_url": "<string>",
"forward_metadata": false
},
"fallback_config": {
"model": "<string>",
"llm_api_key_ref": "<string>",
"external_llm": {
"model": "<string>",
"base_url": "<string>",
"llm_api_key_ref": "<string>",
"authentication_method": "token",
"certificate_ref": "<string>",
"token_retrieval_url": "<string>",
"forward_metadata": false
}
},
"voice_settings": {
"voice": "<string>",
"voice_speed": 1,
"api_key_ref": "<string>",
"temperature": 0.5,
"similarity_boost": 0.75,
"use_speaker_boost": true,
"style": 0,
"speed": 1,
"language_boost": null,
"expressive_mode": false,
"background_audio": {
"type": "predefined_media",
"value": "silence"
}
},
"transcription": {
"model": "deepgram/flux",
"language": "<string>",
"api_key_ref": "<string>",
"region": "<string>",
"settings": {
"smart_format": true,
"numerals": true,
"eot_threshold": 0.8,
"eot_timeout_ms": 5000,
"eager_eot_threshold": 0.4,
"keyterm": "<string>",
"end_of_turn_confidence_threshold": 0.4,
"min_turn_silence": 400,
"max_turn_silence": 1280
}
},
"telephony_settings": {
"default_texml_app_id": "<string>",
"supports_unauthenticated_web_calls": true,
"noise_suppression": "krisp",
"noise_suppression_config": {
"attenuation_limit": 100,
"mode": "advanced"
},
"time_limit_secs": 1800,
"user_idle_timeout_secs": 7205,
"user_idle_reply_secs": 10,
"voicemail_detection": {
"on_voicemail_detected": {
"action": "stop_assistant",
"voicemail_message": {
"type": "prompt",
"prompt": "<string>",
"message": "<string>"
}
}
},
"recording_settings": {
"enabled": true,
"channels": "dual",
"format": "mp3"
}
},
"messaging_settings": {
"default_messaging_profile_id": "<string>",
"delivery_status_webhook_url": "<string>",
"conversation_inactivity_minutes": 5000000
},
"enabled_features": [
"telephony"
],
"insight_settings": {
"insight_group_id": "<string>"
},
"privacy_settings": {
"data_retention": true
},
"dynamic_variables_webhook_url": "<string>",
"dynamic_variables_webhook_timeout_ms": 1500,
"dynamic_variables": {},
"import_metadata": {
"import_provider": "elevenlabs",
"import_id": "<string>"
},
"widget_settings": {
"theme": "light",
"audio_visualizer_config": {
"color": "verdant",
"preset": "<string>"
},
"start_call_text": "<string>",
"default_state": "expanded",
"position": "fixed",
"view_history_url": "<string>",
"report_issue_url": "<string>",
"give_feedback_url": "<string>",
"agent_thinking_text": "<string>",
"speak_to_interrupt_text": "<string>",
"logo_icon_url": "<string>"
},
"interruption_settings": {
"enable": true,
"start_speaking_plan": {
"wait_seconds": 0.4,
"transcription_endpointing_plan": {
"on_punctuation_seconds": 0.1,
"on_no_punctuation_seconds": 1.5,
"on_number_seconds": 0.5
}
}
},
"integrations": [],
"observability_settings": {
"status": "disabled",
"secret_key_ref": "<string>",
"public_key_ref": "<string>",
"host": "<string>",
"prompt_name": "<string>",
"prompt_version": 2,
"prompt_label": "<string>",
"prompt_sync": "disabled"
},
"version_name": "New assistant",
"related_mission_ids": [],
"tags": [],
"post_conversation_settings": {
"enabled": false
}
}Documentation Index
Fetch the complete documentation index at: https://developers.telnyx.com/llms.txt
Use this file to discover all available pages before exploring further.
Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
ID of the model to use when external_llm is not set. You can use the Get models API to see available models. If external_llm is provided, the assistant uses external_llm instead of this field. If neither model nor external_llm is provided, Telnyx applies the default model.
System instructions for the assistant. These may be templated with dynamic variables
Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer tool_ids to attach shared tools created with the AI Tools endpoints.
Show child attributes
MCP servers attached to the assistant. Create MCP servers with /ai/mcp_servers, then reference them by id here.
Show child attributes
IDs of shared tools to attach to the assistant. New integrations should prefer tool_ids over inline tools.
Text that the assistant will use to start the conversation. This may be templated with dynamic variables. Use an empty string to have the assistant wait for the user to speak first. Use the special value <assistant-speaks-first-with-model-generated-message> to have the assistant generate the greeting based on the system instructions.
This is only needed when using third-party inference providers selected by model. The identifier for an integration secret /v2/integration_secrets that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use external_llm.llm_api_key_ref instead. Warning: Free plans are unlikely to work with this integration.
Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
If telephony is enabled, the assistant will be able to make and receive calls. If messaging is enabled, the assistant will be able to send and receive messages.
telephony, messaging Show child attributes
Show child attributes
If dynamic_variables_webhook_url is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. Gotcha: the webhook response must wrap variables under a top-level dynamic_variables object, e.g. {"dynamic_variables": {"customer_name": "Jane"}}. Returning a flat object will be ignored and variables will fall back to their defaults. See the dynamic variables guide for the full request/response format and timeout behavior.
Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the dynamic variables guide.
1 <= x <= 10000Map of dynamic variables and their default values
Configuration settings for the assistant's web widget.
Show child attributes
Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like deepgram/flux, end-of-turn behavior is controlled by the transcription end-of-turn settings under transcription.settings (eot_threshold, eot_timeout_ms, eager_eot_threshold).
Show child attributes
Connected integrations attached to the assistant. The catalog of available integrations is at /ai/integrations; the user's connected integrations are at /ai/integrations/connections. Each item references a catalog integration by integration_id.
Show child attributes
Show child attributes
Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.
Human-readable name for the assistant version.
50Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.
Show child attributes
Returns the updated assistant version configuration
ID of the model to use when external_llm is not set. You can use the Get models API to see available models. If external_llm is provided, the assistant uses external_llm instead of this field. If neither model nor external_llm is provided, Telnyx applies the default model.
System instructions for the assistant. These may be templated with dynamic variables
Identifier for the assistant version returned by version-aware assistant endpoints.
Timestamp when this assistant version was created.
Deprecated for new integrations. Inline tool definitions available to the assistant. Prefer tool_ids to attach shared tools created with the AI Tools endpoints.
Show child attributes
MCP servers attached to the assistant. Create MCP servers with /ai/mcp_servers, then reference them by id here.
Show child attributes
Text that the assistant will use to start the conversation. This may be templated with dynamic variables. Use an empty string to have the assistant wait for the user to speak first. Use the special value <assistant-speaks-first-with-model-generated-message> to have the assistant generate the greeting based on the system instructions.
This is only needed when using third-party inference providers selected by model. The identifier for an integration secret /v2/integration_secrets that refers to your LLM provider's API key. For bring-your-own endpoint authentication, use external_llm.llm_api_key_ref instead. Warning: Free plans are unlikely to work with this integration.
Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
If telephony is enabled, the assistant will be able to make and receive calls. If messaging is enabled, the assistant will be able to send and receive messages.
telephony, messaging Show child attributes
Show child attributes
If dynamic_variables_webhook_url is set, Telnyx sends a POST request to this URL at the start of the conversation to resolve dynamic variables. Gotcha: the webhook response must wrap variables under a top-level dynamic_variables object, e.g. {"dynamic_variables": {"customer_name": "Jane"}}. Returning a flat object will be ignored and variables will fall back to their defaults. See the dynamic variables guide for the full request/response format and timeout behavior.
Timeout in milliseconds for the dynamic variables webhook. Must be between 1 and 10000 ms. If the webhook does not respond within this timeout, the call proceeds with default values. See the dynamic variables guide.
1 <= x <= 10000Map of dynamic variables and their values
Show child attributes
Configuration settings for the assistant's web widget.
Show child attributes
Settings for interruptions and how the assistant decides the user has finished speaking. These timings are most relevant when using non turn-taking transcription models. For turn-taking models like deepgram/flux, end-of-turn behavior is controlled by the transcription end-of-turn settings under transcription.settings (eot_threshold, eot_timeout_ms, eager_eot_threshold).
Show child attributes
Connected integrations attached to the assistant. The catalog of available integrations is at /ai/integrations; the user's connected integrations are at /ai/integrations/connections. Each item references a catalog integration by integration_id.
Show child attributes
Show child attributes
Human-readable name for the assistant version.
50IDs of missions related to this assistant.
Tags associated with the assistant. Tags can also be managed with the assistant tag endpoints.
Configuration for post-conversation processing. When enabled, the assistant receives one additional LLM turn after the conversation ends, allowing it to execute tool calls such as logging to a CRM or sending a summary. The assistant can execute multiple parallel or sequential tools during this phase. Telephony-control tools (e.g. hangup, transfer) are unavailable post-conversation. Beta feature.
Show child attributes
Was this page helpful?
import Telnyx from 'telnyx';
const client = new Telnyx({
apiKey: process.env['TELNYX_API_KEY'], // This is the default and can be omitted
});
const inferenceEmbedding = await client.ai.assistants.versions.update('version_id', {
assistant_id: 'assistant_id',
});
console.log(inferenceEmbedding.id);{
"id": "<string>",
"name": "<string>",
"created_at": "2023-11-07T05:31:56Z",
"model": "<string>",
"instructions": "<string>",
"version_id": "<string>",
"version_created_at": "2023-11-07T05:31:56Z",
"description": "<string>",
"tools": [
{
"type": "webhook",
"webhook": {
"name": "<string>",
"description": "<string>",
"url": "https://example.com/api/v1/function",
"method": "POST",
"headers": [
{
"name": "<string>",
"value": "<string>"
}
],
"body_parameters": {
"properties": {
"age": {
"description": "The age of the customer.",
"type": "integer"
},
"location": {
"description": "The location of the customer.",
"type": "string"
}
},
"required": [
"age",
"location"
],
"type": "object"
},
"path_parameters": {
"properties": {
"id": {
"description": "The id of the customer.",
"type": "string"
}
},
"required": [
"id"
],
"type": "object"
},
"query_parameters": {
"properties": {
"page": {
"description": "The page number.",
"type": "integer"
}
},
"required": [
"page"
],
"type": "object"
},
"async": false,
"timeout_ms": 5250,
"store_fields_as_variables": [
{
"name": "<string>",
"value_path": "<string>"
}
]
}
}
],
"mcp_servers": [],
"greeting": "<string>",
"llm_api_key_ref": "<string>",
"external_llm": {
"model": "<string>",
"base_url": "<string>",
"llm_api_key_ref": "<string>",
"authentication_method": "token",
"certificate_ref": "<string>",
"token_retrieval_url": "<string>",
"forward_metadata": false
},
"fallback_config": {
"model": "<string>",
"llm_api_key_ref": "<string>",
"external_llm": {
"model": "<string>",
"base_url": "<string>",
"llm_api_key_ref": "<string>",
"authentication_method": "token",
"certificate_ref": "<string>",
"token_retrieval_url": "<string>",
"forward_metadata": false
}
},
"voice_settings": {
"voice": "<string>",
"voice_speed": 1,
"api_key_ref": "<string>",
"temperature": 0.5,
"similarity_boost": 0.75,
"use_speaker_boost": true,
"style": 0,
"speed": 1,
"language_boost": null,
"expressive_mode": false,
"background_audio": {
"type": "predefined_media",
"value": "silence"
}
},
"transcription": {
"model": "deepgram/flux",
"language": "<string>",
"api_key_ref": "<string>",
"region": "<string>",
"settings": {
"smart_format": true,
"numerals": true,
"eot_threshold": 0.8,
"eot_timeout_ms": 5000,
"eager_eot_threshold": 0.4,
"keyterm": "<string>",
"end_of_turn_confidence_threshold": 0.4,
"min_turn_silence": 400,
"max_turn_silence": 1280
}
},
"telephony_settings": {
"default_texml_app_id": "<string>",
"supports_unauthenticated_web_calls": true,
"noise_suppression": "krisp",
"noise_suppression_config": {
"attenuation_limit": 100,
"mode": "advanced"
},
"time_limit_secs": 1800,
"user_idle_timeout_secs": 7205,
"user_idle_reply_secs": 10,
"voicemail_detection": {
"on_voicemail_detected": {
"action": "stop_assistant",
"voicemail_message": {
"type": "prompt",
"prompt": "<string>",
"message": "<string>"
}
}
},
"recording_settings": {
"enabled": true,
"channels": "dual",
"format": "mp3"
}
},
"messaging_settings": {
"default_messaging_profile_id": "<string>",
"delivery_status_webhook_url": "<string>",
"conversation_inactivity_minutes": 5000000
},
"enabled_features": [
"telephony"
],
"insight_settings": {
"insight_group_id": "<string>"
},
"privacy_settings": {
"data_retention": true
},
"dynamic_variables_webhook_url": "<string>",
"dynamic_variables_webhook_timeout_ms": 1500,
"dynamic_variables": {},
"import_metadata": {
"import_provider": "elevenlabs",
"import_id": "<string>"
},
"widget_settings": {
"theme": "light",
"audio_visualizer_config": {
"color": "verdant",
"preset": "<string>"
},
"start_call_text": "<string>",
"default_state": "expanded",
"position": "fixed",
"view_history_url": "<string>",
"report_issue_url": "<string>",
"give_feedback_url": "<string>",
"agent_thinking_text": "<string>",
"speak_to_interrupt_text": "<string>",
"logo_icon_url": "<string>"
},
"interruption_settings": {
"enable": true,
"start_speaking_plan": {
"wait_seconds": 0.4,
"transcription_endpointing_plan": {
"on_punctuation_seconds": 0.1,
"on_no_punctuation_seconds": 1.5,
"on_number_seconds": 0.5
}
}
},
"integrations": [],
"observability_settings": {
"status": "disabled",
"secret_key_ref": "<string>",
"public_key_ref": "<string>",
"host": "<string>",
"prompt_name": "<string>",
"prompt_version": 2,
"prompt_label": "<string>",
"prompt_sync": "disabled"
},
"version_name": "New assistant",
"related_mission_ids": [],
"tags": [],
"post_conversation_settings": {
"enabled": false
}
}