Home Assistant Unofficial Reference 2024.12.1
conversation.py
Go to the documentation of this file.
1 """Conversation support for Anthropic."""
2 
3 from collections.abc import Callable
4 import json
5 from typing import Any, Literal, cast
6 
7 import anthropic
8 from anthropic._types import NOT_GIVEN
9 from anthropic.types import (
10  Message,
11  MessageParam,
12  TextBlock,
13  TextBlockParam,
14  ToolParam,
15  ToolResultBlockParam,
16  ToolUseBlock,
17  ToolUseBlockParam,
18 )
19 import voluptuous as vol
20 from voluptuous_openapi import convert
21 
22 from homeassistant.components import conversation
24 from homeassistant.config_entries import ConfigEntry
25 from homeassistant.const import CONF_LLM_HASS_API, MATCH_ALL
26 from homeassistant.core import HomeAssistant
27 from homeassistant.exceptions import HomeAssistantError, TemplateError
28 from homeassistant.helpers import device_registry as dr, intent, llm, template
29 from homeassistant.helpers.entity_platform import AddEntitiesCallback
30 from homeassistant.util import ulid
31 
32 from . import AnthropicConfigEntry
33 from .const import (
34  CONF_CHAT_MODEL,
35  CONF_MAX_TOKENS,
36  CONF_PROMPT,
37  CONF_TEMPERATURE,
38  DOMAIN,
39  LOGGER,
40  RECOMMENDED_CHAT_MODEL,
41  RECOMMENDED_MAX_TOKENS,
42  RECOMMENDED_TEMPERATURE,
43 )
44 
45 # Max number of back and forth with the LLM to generate a response
46 MAX_TOOL_ITERATIONS = 10
47 
48 
50  hass: HomeAssistant,
51  config_entry: AnthropicConfigEntry,
52  async_add_entities: AddEntitiesCallback,
53 ) -> None:
54  """Set up conversation entities."""
55  agent = AnthropicConversationEntity(config_entry)
56  async_add_entities([agent])
57 
58 
60  tool: llm.Tool, custom_serializer: Callable[[Any], Any] | None
61 ) -> ToolParam:
62  """Format tool specification."""
63  return ToolParam(
64  name=tool.name,
65  description=tool.description or "",
66  input_schema=convert(tool.parameters, custom_serializer=custom_serializer),
67  )
68 
69 
71  message: Message,
72 ) -> MessageParam:
73  """Convert from class to TypedDict."""
74  param_content: list[TextBlockParam | ToolUseBlockParam] = []
75 
76  for message_content in message.content:
77  if isinstance(message_content, TextBlock):
78  param_content.append(TextBlockParam(type="text", text=message_content.text))
79  elif isinstance(message_content, ToolUseBlock):
80  param_content.append(
81  ToolUseBlockParam(
82  type="tool_use",
83  id=message_content.id,
84  name=message_content.name,
85  input=message_content.input,
86  )
87  )
88 
89  return MessageParam(role=message.role, content=param_content)
90 
91 
94 ):
95  """Anthropic conversation agent."""
96 
97  _attr_has_entity_name = True
98  _attr_name = None
99 
100  def __init__(self, entry: AnthropicConfigEntry) -> None:
101  """Initialize the agent."""
102  self.entryentry = entry
103  self.history: dict[str, list[MessageParam]] = {}
104  self._attr_unique_id_attr_unique_id = entry.entry_id
105  self._attr_device_info_attr_device_info = dr.DeviceInfo(
106  identifiers={(DOMAIN, entry.entry_id)},
107  manufacturer="Anthropic",
108  model="Claude",
109  entry_type=dr.DeviceEntryType.SERVICE,
110  )
111  if self.entryentry.options.get(CONF_LLM_HASS_API):
112  self._attr_supported_features_attr_supported_features_attr_supported_features = (
113  conversation.ConversationEntityFeature.CONTROL
114  )
115 
116  @property
117  def supported_languages(self) -> list[str] | Literal["*"]:
118  """Return a list of supported languages."""
119  return MATCH_ALL
120 
121  async def async_added_to_hass(self) -> None:
122  """When entity is added to Home Assistant."""
123  await super().async_added_to_hass()
124  self.entryentry.async_on_unload(
125  self.entryentry.add_update_listener(self._async_entry_update_listener_async_entry_update_listener)
126  )
127 
128  async def async_process(
129  self, user_input: conversation.ConversationInput
131  """Process a sentence."""
132  options = self.entryentry.options
133  intent_response = intent.IntentResponse(language=user_input.language)
134  llm_api: llm.APIInstance | None = None
135  tools: list[ToolParam] | None = None
136  user_name: str | None = None
137  llm_context = llm.LLMContext(
138  platform=DOMAIN,
139  context=user_input.context,
140  user_prompt=user_input.text,
141  language=user_input.language,
142  assistant=conversation.DOMAIN,
143  device_id=user_input.device_id,
144  )
145 
146  if options.get(CONF_LLM_HASS_API):
147  try:
148  llm_api = await llm.async_get_api(
149  self.hasshass,
150  options[CONF_LLM_HASS_API],
151  llm_context,
152  )
153  except HomeAssistantError as err:
154  LOGGER.error("Error getting LLM API: %s", err)
155  intent_response.async_set_error(
156  intent.IntentResponseErrorCode.UNKNOWN,
157  f"Error preparing LLM API: {err}",
158  )
160  response=intent_response, conversation_id=user_input.conversation_id
161  )
162  tools = [
163  _format_tool(tool, llm_api.custom_serializer) for tool in llm_api.tools
164  ]
165 
166  if user_input.conversation_id is None:
167  conversation_id = ulid.ulid_now()
168  messages = []
169 
170  elif user_input.conversation_id in self.history:
171  conversation_id = user_input.conversation_id
172  messages = self.history[conversation_id]
173 
174  else:
175  # Conversation IDs are ULIDs. We generate a new one if not provided.
176  # If an old OLID is passed in, we will generate a new one to indicate
177  # a new conversation was started. If the user picks their own, they
178  # want to track a conversation and we respect it.
179  try:
180  ulid.ulid_to_bytes(user_input.conversation_id)
181  conversation_id = ulid.ulid_now()
182  except ValueError:
183  conversation_id = user_input.conversation_id
184 
185  messages = []
186 
187  if (
188  user_input.context
189  and user_input.context.user_id
190  and (
191  user := await self.hasshass.auth.async_get_user(user_input.context.user_id)
192  )
193  ):
194  user_name = user.name
195 
196  try:
197  prompt_parts = [
198  template.Template(
199  llm.BASE_PROMPT
200  + options.get(CONF_PROMPT, llm.DEFAULT_INSTRUCTIONS_PROMPT),
201  self.hasshass,
202  ).async_render(
203  {
204  "ha_name": self.hasshass.config.location_name,
205  "user_name": user_name,
206  "llm_context": llm_context,
207  },
208  parse_result=False,
209  )
210  ]
211 
212  except TemplateError as err:
213  LOGGER.error("Error rendering prompt: %s", err)
214  intent_response.async_set_error(
215  intent.IntentResponseErrorCode.UNKNOWN,
216  f"Sorry, I had a problem with my template: {err}",
217  )
219  response=intent_response, conversation_id=conversation_id
220  )
221 
222  if llm_api:
223  prompt_parts.append(llm_api.api_prompt)
224 
225  prompt = "\n".join(prompt_parts)
226 
227  # Create a copy of the variable because we attach it to the trace
228  messages = [*messages, MessageParam(role="user", content=user_input.text)]
229 
230  LOGGER.debug("Prompt: %s", messages)
231  LOGGER.debug("Tools: %s", tools)
232  trace.async_conversation_trace_append(
233  trace.ConversationTraceEventType.AGENT_DETAIL,
234  {"system": prompt, "messages": messages},
235  )
236 
237  client = self.entryentry.runtime_data
238 
239  # To prevent infinite loops, we limit the number of iterations
240  for _iteration in range(MAX_TOOL_ITERATIONS):
241  try:
242  response = await client.messages.create(
243  model=options.get(CONF_CHAT_MODEL, RECOMMENDED_CHAT_MODEL),
244  messages=messages,
245  tools=tools or NOT_GIVEN,
246  max_tokens=options.get(CONF_MAX_TOKENS, RECOMMENDED_MAX_TOKENS),
247  system=prompt,
248  temperature=options.get(CONF_TEMPERATURE, RECOMMENDED_TEMPERATURE),
249  )
250  except anthropic.AnthropicError as err:
251  intent_response.async_set_error(
252  intent.IntentResponseErrorCode.UNKNOWN,
253  f"Sorry, I had a problem talking to Anthropic: {err}",
254  )
256  response=intent_response, conversation_id=conversation_id
257  )
258 
259  LOGGER.debug("Response %s", response)
260 
261  messages.append(_message_convert(response))
262 
263  if response.stop_reason != "tool_use" or not llm_api:
264  break
265 
266  tool_results: list[ToolResultBlockParam] = []
267  for tool_call in response.content:
268  if isinstance(tool_call, TextBlock):
269  LOGGER.info(tool_call.text)
270 
271  if not isinstance(tool_call, ToolUseBlock):
272  continue
273 
274  tool_input = llm.ToolInput(
275  tool_name=tool_call.name,
276  tool_args=cast(dict[str, Any], tool_call.input),
277  )
278  LOGGER.debug(
279  "Tool call: %s(%s)", tool_input.tool_name, tool_input.tool_args
280  )
281 
282  try:
283  tool_response = await llm_api.async_call_tool(tool_input)
284  except (HomeAssistantError, vol.Invalid) as e:
285  tool_response = {"error": type(e).__name__}
286  if str(e):
287  tool_response["error_text"] = str(e)
288 
289  LOGGER.debug("Tool response: %s", tool_response)
290  tool_results.append(
291  ToolResultBlockParam(
292  type="tool_result",
293  tool_use_id=tool_call.id,
294  content=json.dumps(tool_response),
295  )
296  )
297 
298  messages.append(MessageParam(role="user", content=tool_results))
299 
300  self.history[conversation_id] = messages
301 
302  for content in response.content:
303  if isinstance(content, TextBlock):
304  intent_response.async_set_speech(content.text)
305  break
306 
308  response=intent_response, conversation_id=conversation_id
309  )
310 
312  self, hass: HomeAssistant, entry: ConfigEntry
313  ) -> None:
314  """Handle options update."""
315  # Reload as we update device info + entity name + supported features
316  await hass.config_entries.async_reload(entry.entry_id)
conversation.ConversationResult async_process(self, conversation.ConversationInput user_input)
None _async_entry_update_listener(self, HomeAssistant hass, ConfigEntry entry)
MessageParam _message_convert(Message message)
Definition: conversation.py:72
ToolParam _format_tool(llm.Tool tool, Callable[[Any], Any]|None custom_serializer)
Definition: conversation.py:61
None async_setup_entry(HomeAssistant hass, AnthropicConfigEntry config_entry, AddEntitiesCallback async_add_entities)
Definition: conversation.py:53