BrianIsaac's picture
feat: add multi-agent advisory council with bull/bear debate
1586274
"""Tests for the AdvisoryCouncil multi-agent debate system."""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from backend.agents.council.advisory_council import AdvisoryCouncil, DebateRound
class TestDebateRound:
"""Tests for the DebateRound dataclass."""
def test_debate_round_creation(self):
"""Test creating a DebateRound instance."""
round_data = DebateRound(
round_number=1,
bull_argument="Bull argument",
bear_argument="Bear argument",
bull_confidence=75.0,
bear_confidence=60.0
)
assert round_data.round_number == 1
assert round_data.bull_argument == "Bull argument"
assert round_data.bear_argument == "Bear argument"
assert round_data.bull_confidence == 75.0
assert round_data.bear_confidence == 60.0
class TestAdvisoryCouncil:
"""Tests for the AdvisoryCouncil class."""
@pytest.fixture
def mock_mcp_router(self):
"""Create a mock MCP router."""
return MagicMock()
@pytest.fixture
def council(self, mock_mcp_router):
"""Create an AdvisoryCouncil instance with mocked dependencies."""
with patch('backend.agents.council.advisory_council.ChatAnthropic'):
council = AdvisoryCouncil(mock_mcp_router)
return council
def test_council_initialization(self, council, mock_mcp_router):
"""Test AdvisoryCouncil initialisation."""
assert council.mcp_router == mock_mcp_router
assert council.max_debate_rounds == 2
assert council.convergence_threshold == 5
assert council.min_rounds == 1
def test_extract_confidence_patterns(self, council):
"""Test confidence extraction from various text patterns."""
assert council._extract_confidence("Confidence: 75") == 75.0
assert council._extract_confidence("80% confidence in this") == 80.0
assert council._extract_confidence("I rate this 65/100") == 65.0
assert council._extract_confidence("No confidence mentioned") == 50.0
@pytest.mark.asyncio
async def test_run_specialists_empty_data(self, council):
"""Test specialists return appropriate message when no data available."""
state = {
"fundamentals": {},
"technical_indicators": {},
"sentiment_data": {},
"economic_data": {},
"risk_analysis": {}
}
results = await council._run_specialists(state)
assert len(results) == 5
for result in results:
assert "No" in result["analysis"] or "data" in result["analysis"]
@pytest.mark.asyncio
async def test_run_council_structure(self, council):
"""Test that run_council returns expected structure."""
mock_response = MagicMock()
mock_response.content = "Test analysis with confidence: 70"
council.model.ainvoke = AsyncMock(return_value=mock_response)
state = {
"fundamentals": {"AAPL": {"pe_ratio": 25}},
"technical_indicators": {"AAPL": {"rsi": 55}},
"sentiment_data": {"AAPL": {"score": 0.6}},
"economic_data": {"gdp_growth": 2.5},
"risk_analysis": {"var": 0.05}
}
result = await council.run_council(state)
assert "specialist_analyses" in result
assert "bull_case" in result
assert "bear_case" in result
assert "debate_transcript" in result
assert "consensus" in result
@pytest.mark.asyncio
async def test_build_bull_case(self, council):
"""Test bull case building."""
mock_response = MagicMock()
mock_response.content = "Strong bullish thesis. Confidence: 80"
council.model.ainvoke = AsyncMock(return_value=mock_response)
analyses = [
{"specialist": "Fundamental Analyst", "analysis": "Good fundamentals"},
{"specialist": "Technical Analyst", "analysis": "Bullish patterns"}
]
result = await council._build_bull_case(analyses, {})
assert result["case"] == "bull"
assert "thesis" in result
assert result["confidence"] == 80.0
@pytest.mark.asyncio
async def test_build_bear_case(self, council):
"""Test bear case building."""
mock_response = MagicMock()
mock_response.content = "Bearish warning signs. Confidence: 65"
council.model.ainvoke = AsyncMock(return_value=mock_response)
analyses = [
{"specialist": "Risk Analyst", "analysis": "High volatility"}
]
result = await council._build_bear_case(analyses, {})
assert result["case"] == "bear"
assert result["confidence"] == 65.0
@pytest.mark.asyncio
async def test_run_debate_min_rounds(self, council):
"""Test that debate runs at least minimum rounds."""
mock_response = MagicMock()
mock_response.content = "Argument with confidence: 70"
council.model.ainvoke = AsyncMock(return_value=mock_response)
bull_case = {"thesis": "Bull thesis", "confidence": 70}
bear_case = {"thesis": "Bear thesis", "confidence": 65}
result = await council._run_debate(bull_case, bear_case, {})
assert len(result) >= council.min_rounds
@pytest.mark.asyncio
async def test_synthesise_consensus_bullish(self, council):
"""Test consensus synthesis for bullish outcome."""
mock_response = MagicMock()
mock_response.content = "Balanced recommendation favoring bulls"
council.model.ainvoke = AsyncMock(return_value=mock_response)
debate_rounds = [
DebateRound(1, "Bull 1", "Bear 1", 80, 50),
DebateRound(2, "Bull 2", "Bear 2", 80, 50)
]
result = await council._synthesise_consensus(
[], {"thesis": "", "confidence": 80}, {"thesis": "", "confidence": 50},
debate_rounds
)
assert result["stance"] == "bullish"
assert "recommendation" in result
assert "bull_score" in result
assert "bear_score" in result
@pytest.mark.asyncio
async def test_synthesise_consensus_bearish(self, council):
"""Test consensus synthesis for bearish outcome."""
mock_response = MagicMock()
mock_response.content = "Balanced recommendation favoring bears"
council.model.ainvoke = AsyncMock(return_value=mock_response)
debate_rounds = [
DebateRound(1, "Bull 1", "Bear 1", 40, 80),
DebateRound(2, "Bull 2", "Bear 2", 40, 80)
]
result = await council._synthesise_consensus(
[], {"thesis": "", "confidence": 40}, {"thesis": "", "confidence": 80},
debate_rounds
)
assert result["stance"] == "bearish"
@pytest.mark.asyncio
async def test_synthesise_consensus_neutral(self, council):
"""Test consensus synthesis for neutral outcome."""
mock_response = MagicMock()
mock_response.content = "Balanced recommendation"
council.model.ainvoke = AsyncMock(return_value=mock_response)
debate_rounds = [
DebateRound(1, "Bull 1", "Bear 1", 60, 60),
DebateRound(2, "Bull 2", "Bear 2", 60, 60)
]
result = await council._synthesise_consensus(
[], {"thesis": "", "confidence": 60}, {"thesis": "", "confidence": 60},
debate_rounds
)
assert result["stance"] == "neutral"
class TestAdvisoryCouncilImport:
"""Test that AdvisoryCouncil can be imported correctly."""
def test_import_from_council_module(self):
"""Test importing AdvisoryCouncil from council module."""
from backend.agents.council import AdvisoryCouncil
assert AdvisoryCouncil is not None
def test_import_direct(self):
"""Test importing AdvisoryCouncil directly."""
from backend.agents.council.advisory_council import AdvisoryCouncil
assert AdvisoryCouncil is not None