Source code for ai.analysis.cost_analyzer_decorator_test

from unittest.mock import Mock

import pytest
from openai.types.beta.threads.run import Usage
from openai.types.chat.chat_completion import ChatCompletion

from ai.analysis.cost_analyzer_decorator import cost_analyzer
from ai.analysis.dataset_usage_analyzer import DatasetUsageAnalyzer
from ai.analysis.run.assistant_run import AssistantRun
from ai.assistant.assistant import Assistant, AssistantName
from ai.assistant.model.model_provider import ModelProvider


[docs] class MockedAssistant(Assistant): """A minimal Assistant implementation that returns a mocked ChatCompletion."""
[docs] async def run_openai(self, prompt: str) -> ChatCompletion: """Simulate an OpenAI API call returning preset usage counts. Args: prompt (str): The input prompt (ignored in this mock). Returns: ChatCompletion: A mock completion with .model and .usage set. """ mocked_run = Mock(spec=ChatCompletion) mocked_run.model = "gpt-4o" mocked_run.usage = Mock(spec=Usage) mocked_run.usage.prompt_tokens = 100 mocked_run.usage.completion_tokens = 50 return mocked_run
[docs] @pytest.mark.asyncio async def test_cost_analyzer(): """Verify that cost_analyzer logs and records an AssistantRun correctly. - Decorates MockedAssistant with cost_analyzer. - Executes run_openai to trigger cost logging. - Asserts that DatasetUsageAnalyzer.add_run was called once with an AssistantRun containing the expected assistant name and token counts. Raises: AssertionError: If any of the expected calls or attributes are missing. """ usage_analyzer = Mock(spec=DatasetUsageAnalyzer) DecoratedAssistantClass = cost_analyzer(dataset_usage_analyzer=usage_analyzer)( MockedAssistant ) usage_analyzer.add_run = Mock() assistant_name: AssistantName = AssistantName.Email_Answer decorated_instance = DecoratedAssistantClass( name=assistant_name, model_name="gpt-4o", base_prompt="test prompt", temperature=0.7, model_provider=ModelProvider( name="OpenAI", base_url="https://api.openai.com", api_key="sk_test_1234567890", ), ) await decorated_instance.run_openai("test prompt") # Verify that add_run was called exactly once usage_analyzer.add_run.assert_called_once() # Inspect the AssistantRun passed to add_run passed_assistant_run: AssistantRun = usage_analyzer.add_run.call_args[0][0] assert isinstance(passed_assistant_run, AssistantRun) assert passed_assistant_run.assistant.name == assistant_name assert passed_assistant_run._completion_result.usage.prompt_tokens == 100 assert passed_assistant_run._completion_result.usage.completion_tokens == 50