"""OpenAI-powered restaurant website generation with OpenRouter provider config and LangSmith integration."""

import os
from typing import Dict, Any, AsyncGenerator
from openai import AsyncOpenAI
from langsmith.wrappers import wrap_openai
from core.config import settings

class AIWorkflow:
    """OpenAI-powered workflow for handling AI interactions for restaurant websites."""
    
    def __init__(self):
        # Initialize OpenAI client with OpenRouter
        self.client = AsyncOpenAI(
            api_key=settings.openrouter_api_key,
            base_url="https://openrouter.ai/api/v1",
            default_headers={
                "HTTP-Referer": settings.openrouter_referrer,
                "X-Title": settings.openrouter_title
            }
        )
        
        # Wrap with LangSmith for tracing if enabled
        if settings.langsmith_api_key and settings.langsmith_tracing:
            self.client = wrap_openai(self.client)
        
        # Load system prompt from markdown file
        self.system_prompt = self._load_system_prompt()
        
        # Build provider config
        self.provider_config = self._build_provider_config()
    
    def _load_system_prompt(self) -> str:
        """Load system prompt from markdown file with fallback."""
        try:
            # Get the directory of this file (src/workflows) and look for prompts subdirectory
            current_dir = os.path.dirname(os.path.abspath(__file__))
            prompt_file = os.path.join(current_dir, "prompts", "system_prompt.md")
            
            with open(prompt_file, 'r', encoding='utf-8') as f:
                return f.read().strip()
                
        except Exception as e:
            # Fallback to original hardcoded prompt
            print(f"Warning: Could not load system prompt from file: {e}")
            return """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. Also, try to ellaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE."""
    
    def _build_provider_config(self) -> Dict[str, Any]:
        """Build OpenRouter provider configuration."""
        return {
            "provider": {
                "order": settings.provider_order.split(",") if settings.provider_order else [],
                "allow_fallbacks": settings.provider_allow_fallbacks,
                "ignore": settings.provider_ignore.split(",") if settings.provider_ignore else [],
                "data_collection": settings.provider_data_collection
            }
        }
    
    
    async def generate_response(
        self, 
        prompt: str, 
        thread_id: str = "default"
    ) -> AsyncGenerator[str, None]:
        """
        Generate AI response with streaming for restaurant website generation.
        
        Args:
            prompt: User prompt (restaurant website requirements)
            thread_id: Thread ID for conversation persistence
            
        Yields:
            Streaming response chunks
        """
        # Minimal retry + per-request timeout wrapper to reduce streaming cutoffs
        import asyncio
        from functools import partial

        # Prepare messages
        messages = [
            {"role": "system", "content": self.system_prompt},
            {"role": "user", "content": prompt}
        ]

        async def _stream_call():
            # Call provider with streaming enabled
            return await self.client.chat.completions.create(
                model=settings.agent_model,
                messages=messages,
                temperature=settings.temperature,
                max_tokens=settings.max_tokens,
                stream=True,
                extra_body=self.provider_config
            )

        # Configure lightweight retry policy
        max_attempts = 3
        attempt = 0
        per_request_timeout = getattr(settings, 'request_timeout_seconds', 120)

        while attempt < max_attempts:
            attempt += 1
            try:
                # enforce per-request timeout to avoid indefinite hangs
                stream = await asyncio.wait_for(_stream_call(), timeout=per_request_timeout)

                async for chunk in stream:
                    # defensive checks for streaming token structure
                    if getattr(chunk, 'choices', None) and chunk.choices and getattr(chunk.choices[0], 'delta', None):
                        content = getattr(chunk.choices[0].delta, 'content', None)
                        if content:
                            yield content

                # If we completed the stream successfully, break out
                break

            except asyncio.TimeoutError:
                # Timeout -> retry
                if attempt >= max_attempts:
                    yield "Error processing request: request timed out"
                    return
                await asyncio.sleep(1 * attempt)
                continue
            except Exception as e:
                # Retry on transient errors, otherwise surface
                if attempt >= max_attempts:
                    yield f"Error processing request: {str(e)}"
                    return
                await asyncio.sleep(1 * attempt)
                continue
    
    async def generate_response_sync(
        self, 
        prompt: str, 
        thread_id: str = "default"
    ) -> Dict[str, Any]:
        """
        Generate AI response synchronously for restaurant website generation.
        
        Args:
            prompt: User prompt (restaurant website requirements)
            thread_id: Thread ID for conversation persistence
            
        Returns:
            Dictionary containing response and metadata
        """
        try:
            # Prepare messages
            messages = [
                {"role": "system", "content": self.system_prompt},
                {"role": "user", "content": prompt}
            ]
            
            # Get response from OpenAI client with provider config
            response = await self.client.chat.completions.create(
                model=settings.agent_model,
                messages=messages,
                temperature=settings.temperature,
                max_tokens=settings.max_tokens,
                extra_body=self.provider_config
            )
            
            return {
                "response": response.choices[0].message.content,
                "error": None
            }
            
        except Exception as e:
            return {
                "response": "",
                "error": str(e)
            }