"""
Deep Analysis Agent: graph definition and public run API.
State, nodes, and executor logic live in deep_analysis_state, deep_analysis_nodes.
"""

import os
from typing import Optional

from langgraph.graph import StateGraph, END

from .state import DeepAnalysisState
from .helpers import load_analytical_schema
from .nodes import (
    ReasonerNode,
    ExecutorNode,
    FinalizerNode,
    FindingsNode,
    ReasonerAbortNode,
    should_investigate,
    # QueryFixerNode: defined in nodes.py but intentionally not wired — SQL errors are recorded in
    # ToolMessages and the graph still proceeds to findings → finalizer.
    # DecideNode, should_continue_after_decide  # commented out: single-turn flow, no decider
)
from dotenv import load_dotenv
from core.logger_config import get_logger

load_dotenv()
if os.getenv("LANGSMITH_API_KEY"):
    os.environ.setdefault("LANGCHAIN_TRACING_V2", "true")
    os.environ.setdefault("LANGCHAIN_PROJECT", "user-profiling")

logger = get_logger(__name__)

ANALYTICAL_SCHEMA = load_analytical_schema()

# Node instances (schema injected where needed)
reasoner_node = ReasonerNode(analytical_schema=ANALYTICAL_SCHEMA)
executor_node = ExecutorNode()
finalizer_node = FinalizerNode()
findings_node = FindingsNode()
reasoner_abort_node = ReasonerAbortNode()
# decide_node = DecideNode()  # commented out: single-turn flow, no decider

# Graph: Reasoner -> Executor -> Findings -> Finalizer (no loops, no decider)
workflow = StateGraph(DeepAnalysisState)
workflow.add_node("reasoner", reasoner_node)
workflow.add_node("executor", executor_node)
workflow.add_node("findings", findings_node)
workflow.add_node("finalizer", finalizer_node)
workflow.add_node("reasoner_abort", reasoner_abort_node)
# workflow.add_node("decide_continue_or_finalize", decide_node)  # single-turn: no decider

workflow.set_entry_point("reasoner")
workflow.add_conditional_edges("reasoner", should_investigate, {"execute": "executor", "finalize": "finalizer"})
workflow.add_conditional_edges("executor", should_investigate, {
    "reasoner_abort": "reasoner_abort",
    "finalize": "finalizer",
    "findings": "findings",
})
workflow.add_edge("findings", "finalizer")
workflow.add_edge("reasoner_abort", "reasoner")
workflow.add_edge("finalizer", END)
# workflow.add_conditional_edges("decide_continue_or_finalize", should_continue_after_decide, {...})  # single-turn: no decider

app = workflow.compile()


def _get_default_max_turns() -> int:
    try:
        return max(1, int(os.getenv("DEEP_ANALYSIS_MAX_TURNS", "1")))
    except ValueError:
        return 1


class DeepAnalysisAgent:
    def run(
        self,
        user_id: int,
        profile_summary: str,
        max_turns: Optional[int] = None,
    ) -> str:
        if max_turns is None:
            max_turns = _get_default_max_turns()
        else:
            max_turns = max(1, max_turns)
        logger.info(
            "Deep analysis pipeline start (Reasoner -> Executor -> Findings -> Finalize) for User %s",
            user_id,
        )

        initial_state: DeepAnalysisState = {
            "messages": [],
            "user_id": user_id,
            "profile_json": profile_summary,
            "max_turns": max_turns,
            "last_query": None,
            "last_queries_batch": None,
            "last_error": None,
            "failed_query_index": None,
            "fixed_query": None,
            "findings": [],
            "llm_findings": [],
            "failed_investigations": [],
            "retry_count": 0,
            "tool_call_count": 0,
            "context_near_limit": False,
            "batch_all_empty": None,
            "empty_turn_decision": None,
            "final_analysis": None,
        }

        final_state = app.invoke(initial_state)
        return final_state.get("final_analysis") or "Investigation could not be completed."
