Skip to content

AnthropicAI

AnthropicAI

AnthropicAI extends the base LLM and implements the Anthropic API.

AnthropicAI Config

AnthropicAI Config is the configuration object for AnthropicAI. It is used to configure AnthropicAI and is passed to AnthropicAI when it is created.

director.llm.anthropic.AnthropicAIConfig

Bases: BaseLLMConfig

AnthropicAI Config

AnthropicAI Interface

AnthropicAI is the LLM used by the agents and tools. It is used to generate responses to messages.

director.llm.anthropic.AnthropicAI

AnthropicAI(config=None)

Bases: BaseLLM

Parameters:

Name Type Description Default
config AnthropicAIConfig

AnthropicAI Config

None
Source code in backend/director/llm/anthropic.py
def __init__(self, config: AnthropicAIConfig = None):
    """
    :param config: AnthropicAI Config
    """
    if config is None:
        config = AnthropicAIConfig()
    super().__init__(config=config)
    try:
        import anthropic
    except ImportError:
        raise ImportError("Please install Anthropic python library.")

    self.client = anthropic.Anthropic(api_key=self.api_key)

_format_tools

_format_tools(tools)

Format the tools to the format that Anthropic expects.

Example::

[
    {
        "name": "get_weather",
        "description": "Get the current weather in a given location",
        "input_schema": {
            "type": "object",
            "properties": {
                "location": {
                    "type": "string",
                    "description": "The city and state, e.g. San Francisco, CA",
                }
            },
            "required": ["location"],
        },
    }
]
Source code in backend/director/llm/anthropic.py
def _format_tools(self, tools: list):
    """Format the tools to the format that Anthropic expects.

    **Example**::

        [
            {
                "name": "get_weather",
                "description": "Get the current weather in a given location",
                "input_schema": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        }
                    },
                    "required": ["location"],
                },
            }
        ]
    """
    formatted_tools = []
    for tool in tools:
        formatted_tools.append(
            {
                "name": tool["name"],
                "description": tool["description"],
                "input_schema": tool["parameters"],
            }
        )
    return formatted_tools

chat_completions

chat_completions(
    messages, tools=[], stop=None, response_format=None
)

Get completions for chat.

tools docs: https://docs.anthropic.com/en/docs/build-with-claude/tool-use

Source code in backend/director/llm/anthropic.py
def chat_completions(
    self, messages: list, tools: list = [], stop=None, response_format=None
):
    """Get completions for chat.

    tools docs: https://docs.anthropic.com/en/docs/build-with-claude/tool-use
    """
    system, messages = self._format_messages(messages)
    params = {
        "model": self.chat_model,
        "messages": messages,
        "system": system,
        "max_tokens": self.max_tokens,
    }
    if tools:
        params["tools"] = self._format_tools(tools)

    try:
        response = self.client.messages.create(**params)
    except Exception as e:
        raise e
        return LLMResponse(content=f"Error: {e}")

    return LLMResponse(
        content=response.content[0].text,
        tool_calls=[
            {
                "id": response.content[1].id,
                "tool": {
                    "name": response.content[1].name,
                    "arguments": response.content[1].input,
                },
                "type": response.content[1].type,
            }
        ]
        if next(
            (block for block in response.content if block.type == "tool_use"), None
        )
        is not None
        else [],
        finish_reason=response.stop_reason,
        send_tokens=response.usage.input_tokens,
        recv_tokens=response.usage.output_tokens,
        total_tokens=(response.usage.input_tokens + response.usage.output_tokens),
        status=LLMResponseStatus.SUCCESS,
    )