Skip to content

OpenAI

OpenAI

OpenAI extends the Base LLM and implements the OpenAI API.

OpenAI Config

OpenAI Config is the configuration object for OpenAI. It is used to configure OpenAI and is passed to OpenAI when it is created.

director.llm.openai.OpenaiConfig

Bases: BaseLLMConfig

OpenAI Config

OpenAI Interface

OpenAI is the LLM used by the agents and tools. It is used to generate responses to messages.

director.llm.openai.OpenAI

OpenAI(config=None)

Bases: BaseLLM

Parameters:

Name Type Description Default
config OpenaiConfig

OpenAI Config

None
Source code in backend/director/llm/openai.py
def __init__(self, config: OpenaiConfig = None):
    """
    :param config: OpenAI Config
    """
    if config is None:
        config = OpenaiConfig()
    super().__init__(config=config)
    try:
        import openai
    except ImportError:
        raise ImportError("Please install OpenAI python library.")

    self.client = openai.OpenAI(api_key=self.api_key, base_url=self.api_base)

_format_messages

_format_messages(messages)

Format the messages to the format that OpenAI expects.

Source code in backend/director/llm/openai.py
def _format_messages(self, messages: list):
    """Format the messages to the format that OpenAI expects."""
    formatted_messages = []
    for message in messages:
        if message["role"] == "assistant" and message.get("tool_calls"):
            formatted_messages.append(
                {
                    "role": message["role"],
                    "content": message["content"],
                    "tool_calls": [
                        {
                            "id": tool_call["id"],
                            "function": {
                                "name": tool_call["tool"]["name"],
                                "arguments": json.dumps(
                                    tool_call["tool"]["arguments"]
                                ),
                            },
                            "type": tool_call["type"],
                        }
                        for tool_call in message["tool_calls"]
                    ],
                }
            )
        else:
            formatted_messages.append(message)
    return formatted_messages

_format_tools

_format_tools(tools)

Format the tools to the format that OpenAI expects.

Example::

[
    {
        "type": "function",
        "function": {
            "name": "get_delivery_date",
            "description": "Get the delivery date for a customer's order.",
            "parameters": {
                "type": "object",
                "properties": {
                    "order_id": {
                        "type": "string",
                        "description": "The customer's order ID."
                    }
                },
                "required": ["order_id"],
                "additionalProperties": False
            }
        }
    }
]
Source code in backend/director/llm/openai.py
def _format_tools(self, tools: list):
    """Format the tools to the format that OpenAI expects.

    **Example**::

        [
            {
                "type": "function",
                "function": {
                    "name": "get_delivery_date",
                    "description": "Get the delivery date for a customer's order.",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "order_id": {
                                "type": "string",
                                "description": "The customer's order ID."
                            }
                        },
                        "required": ["order_id"],
                        "additionalProperties": False
                    }
                }
            }
        ]
    """
    formatted_tools = []
    for tool in tools:
        formatted_tools.append(
            {
                "type": "function",
                "function": {
                    "name": tool["name"],
                    "description": tool["description"],
                    "parameters": tool["parameters"],
                },
                "strict": True,
            }
        )
    return formatted_tools

chat_completions

chat_completions(
    messages, tools=[], stop=None, response_format=None
)

Get completions for chat.

docs: https://platform.openai.com/docs/guides/function-calling

Source code in backend/director/llm/openai.py
def chat_completions(
    self, messages: list, tools: list = [], stop=None, response_format=None
):
    """Get completions for chat.

    docs: https://platform.openai.com/docs/guides/function-calling
    """
    params = {
        "model": self.chat_model,
        "messages": self._format_messages(messages),
        "temperature": self.temperature,
        "max_tokens": self.max_tokens,
        "top_p": self.top_p,
        "stop": stop,
        "timeout": self.timeout,
    }
    if tools:
        params["tools"] = self._format_tools(tools)
        params["tool_choice"] = "auto"

    if response_format:
        params["response_format"] = response_format

    try:
        response = self.client.chat.completions.create(**params)
    except Exception as e:
        print(f"Error: {e}")
        return LLMResponse(content=f"Error: {e}")

    return LLMResponse(
        content=response.choices[0].message.content or "",
        tool_calls=[
            {
                "id": tool_call.id,
                "tool": {
                    "name": tool_call.function.name,
                    "arguments": json.loads(tool_call.function.arguments),
                },
                "type": tool_call.type,
            }
            for tool_call in response.choices[0].message.tool_calls
        ]
        if response.choices[0].message.tool_calls
        else [],
        finish_reason=response.choices[0].finish_reason,
        send_tokens=response.usage.prompt_tokens,
        recv_tokens=response.usage.completion_tokens,
        total_tokens=response.usage.total_tokens,
        status=LLMResponseStatus.SUCCESS,
    )