"""
Automated Tasks for Website Generation - Phase 1 (No AI Required)
Handles JSON parsing, template selection, and template setup automatically
"""
import json
import logging
import os
import shutil
import re
from pathlib import Path
from typing import Dict, Any
from datetime import datetime
import aiofiles

# Get the project root directory (parent of src/)
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

logger = logging.getLogger(__name__)

class AutomatedTaskError(Exception):
    """Custom exception for automated task errors"""
    pass

def parse_json_input(user_input: str | Dict[str, Any]) -> Dict[str, Any]:
    """Parse and validate JSON input for website generation."""
    try:
        # Parse JSON if string, use directly if dict
        data = json.loads(user_input) if isinstance(user_input, str) else user_input
        
        # Validate required fields
        required_fields = ['websiteName', 'websiteDescription']
        missing_fields = [field for field in required_fields if not data.get(field)]
        if missing_fields:
            raise AutomatedTaskError(f"Missing required fields: {', '.join(missing_fields)}")
        
        # Structure and validate data with defaults
        website_data = {
            'websiteName': data['websiteName'].strip(),
            'websiteDescription': data.get('websiteDescription', '').strip(),
            'restaurantPhone': data.get('restaurantPhone', ''),
            'restaurantEmail': data.get('restaurantEmail', ''),
            'restaurantAddress': data.get('restaurantAddress', ''),
            'operatingHours': data.get('operatingHours', {}),
            'socialUrls': data.get('socialUrls', {}),
            'facebookUrl': data.get('facebookUrl', ''),
            'instagramUrl': data.get('instagramUrl', ''),
            'twitterUrl': data.get('twitterUrl', ''),
            'pages': data.get('pages', []),
            'colors': {
                'primary': data.get('primaryColor', data.get('colors', {}).get('primary', '#2c3e50')),
                'secondary': data.get('secondaryColor', data.get('colors', {}).get('secondary', '#3498db')),
                'accent': data.get('accentColor', data.get('colors', {}).get('accent', '#e74c3c'))
            },
            # Normalize typography value to canonical slug (e.g., 'Playfair Display' -> 'playfair')
            'typography': _normalize_font_key(data.get('typography', data.get('selectedFont', 'Inter'))),
            'logoInfo': data.get('logoInfo', data.get('websiteLogo', {}))
        }
        
        logger.info(f"Successfully parsed JSON input for website: {website_data['websiteName']}")
        return website_data
        
    except json.JSONDecodeError as e:
        raise AutomatedTaskError(f"Invalid JSON format: {e}")
    except Exception as e:
        if isinstance(e, AutomatedTaskError):
            raise
        raise AutomatedTaskError(f"Error parsing JSON input: {e}")

async def setup_template(website_data: Dict[str, Any]) -> Dict[str, Any]:
    """Create website folder and HTML files for content generation."""
    try:
        website_name = website_data['websiteName']
        pages = website_data.get('pages', [])
        
        # Create safe folder name and destination path
        safe_folder_name = _create_safe_folder_name(website_name)
        dest_path = Path(os.path.join(PROJECT_ROOT, "templates", "generated", safe_folder_name))
        
        # Ensure generated directory exists and clean destination
        dest_path.parent.mkdir(parents=True, exist_ok=True)
        if dest_path.exists():
            shutil.rmtree(dest_path)
            logger.info(f"Removed existing directory: {dest_path}")
        
        # Create the folder
        dest_path.mkdir(parents=True, exist_ok=True)
        
        # Create HTML files based on pages array
        html_files_created = await _create_html_files(dest_path, website_data, pages)
        
        # Copy logo files and clean up uploads
        logo_files_copied = await _copy_logo_files(website_data, dest_path)
        
        setup_result = {
            'success': True,
            'destination_path': str(dest_path),
            'safe_folder_name': safe_folder_name,
            'html_files_created': html_files_created,
            'logo_files_copied': logo_files_copied,
            'timestamp': datetime.now().isoformat(),
            'website_data': website_data
        }
        
        logger.info(f"Successfully created website folder: {dest_path}")
        logger.info(f"HTML files created: {html_files_created}")

        return setup_result
        
    except Exception as e:
        if isinstance(e, AutomatedTaskError):
            raise
        raise AutomatedTaskError(f"Error creating website folder: {e}")

async def _create_html_files(dest_path: Path, website_data: Dict[str, Any], pages: list) -> list:
    """Create single index.html file for single-page website."""
    html_files_created = []
    
    try:
        # Create only index.html for single-page website
        page_file = dest_path / "index.html"
        
        # Create empty HTML file - will be populated by AI service
        html_content = """<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Loading...</title>
</head>
<body>
    <p>Website is being generated...</p>
</body>
</html>"""
        
        await _write_file_async(page_file, html_content)
        html_files_created.append(str(page_file))
        logger.info(f"Created placeholder HTML file: {page_file.name}")
        
        return html_files_created
        
    except Exception as e:
        logger.error(f"Error creating HTML file: {e}")
        return []

def _create_safe_folder_name(website_name: str) -> str:
    """Create a safe folder name from website name with improved formatting."""
    if not website_name or not website_name.strip():
        safe_name = 'website'
    else:
        # Clean and normalize the website name
        # Keep only alphanumeric characters, spaces, hyphens, and underscores
        safe_name = ''.join(c for c in website_name.strip() if c.isalnum() or c in (' ', '-', '_'))
        
        # Replace multiple spaces with single space, then replace spaces with underscores
        safe_name = re.sub(r'\s+', ' ', safe_name).strip()
        safe_name = safe_name.replace(' ', '_')
        
        # Remove multiple consecutive underscores
        safe_name = re.sub(r'_+', '_', safe_name)
        
        # Remove leading/trailing underscores
        safe_name = safe_name.strip('_')
        
        # Convert to lowercase
        safe_name = safe_name.lower()
        
        # Ensure it's not empty after cleaning
        if not safe_name:
            safe_name = 'website'
        
        # Limit length to prevent overly long folder names
        if len(safe_name) > 50:
            safe_name = safe_name[:50].rstrip('_')
    
    # Add timestamp with improved format: YYYY_MM_DD_HH_MM_SS
    timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    return f"{safe_name}_{timestamp}"

# Canonical mapping of font slugs -> human-readable family names
FONT_KEY_TO_FAMILY = {
    'playfair': 'Playfair Display',
    'inter': 'Inter',
    'roboto': 'Roboto',
    'opensans': 'Open Sans',
    'lato': 'Lato',
    'montserrat': 'Montserrat',
    'poppins': 'Poppins',
    'raleway': 'Raleway',
    'nunito': 'Nunito',
    'source': 'Source Sans Pro',
    'dancing': 'Dancing Script',
    'pacifico': 'Pacifico',
    'satisfy': 'Satisfy',
    'greatvibes': 'Great Vibes',
    'bebas': 'Bebas Neue',
    'righteous': 'Righteous'
}

def _normalize_font_key(font_value: str) -> str:
    """Normalize arbitrary font input to a canonical slug used across UI/backend.

    Tries to match known slugs first, then matches against known family names
    (e.g. 'Playfair Display' -> 'playfair'). Falls back to 'inter'.
    """
    if not font_value:
        return 'inter'
    val = str(font_value).strip().lower()
    # direct slug match
    if val in FONT_KEY_TO_FAMILY:
        return val
    # normalized family match (remove spaces/punctuation)
    norm = ''.join(c for c in val if c.isalnum())
    for key, family in FONT_KEY_TO_FAMILY.items():
        fam_norm = ''.join(c for c in family.lower() if c.isalnum())
        if norm == fam_norm or norm.startswith(fam_norm) or fam_norm.startswith(norm) or fam_norm in norm:
            return key
    # fallback: take first token if it matches a key
    first = val.split()[0]
    if first in FONT_KEY_TO_FAMILY:
        return first
    return 'inter'

def _get_font_family_name(font_key: str) -> str:
    """Convert font key to actual font family name."""
    key = _normalize_font_key(font_key)
    return FONT_KEY_TO_FAMILY.get(key, 'Playfair Display')

def _get_font_fallback(font_key: str) -> str:
    """Get appropriate fallback font family."""
    serif_keys = {'playfair', 'merriweather', 'lora', 'ptserif', 'cormorant'}
    key = _normalize_font_key(font_key)
    return 'serif' if key in serif_keys else 'sans-serif'

def format_website_prompt(website_data: Dict[str, Any]) -> str:
    """
    Format website data into AI prompt for website generation.
    
    Args:
        website_data: Parsed website data from JSON input
        
    Returns:
        Formatted prompt string for AI workflow
    """
    try:
        # Extract basic information
        website_name = website_data.get('websiteName', '')
        description = website_data.get('websiteDescription', '')
        phone = website_data.get('restaurantPhone', '')
        email = website_data.get('restaurantEmail', '')
        address = website_data.get('restaurantAddress', '')
        
        # Format operating hours
        operating_hours = _format_operating_hours(website_data.get('operatingHours', {}))
        
        # Format logo
        logo_info = _format_logo_info(website_data.get('logoInfo', {}))
        
        # Format social media
        social_media = _format_social_media(website_data)
        
        # Format pages
        pages = _format_pages(website_data.get('pages', []))
        
        # Format design info
        design_info = _format_design_info(website_data)
        
        # Build the prompt dynamically, omitting empty sections
        prompt_sections = ["Generate a website for a restaurant based on the following specifications:", ""]
        
        # General Information section
        general_info = ["General Information:"]
        if logo_info:  # Only add logo line if not empty
            general_info.append(f"Logo: {logo_info}")
        general_info.append(f"Website Name: {website_name}")
        general_info.append(f"Year: {datetime.now().year}")
        if description.strip():
            general_info.append(f"Description: {description}")
        general_info.append("Type: restaurant")
        
        prompt_sections.extend(general_info)
        prompt_sections.append("")
        
        # Contact Information section
        contact_info = ["Contact Information:"]
        contact_added = False
        if phone.strip():
            contact_info.append(f"Phone: {phone}")
            contact_added = True
        if email.strip():
            contact_info.append(f"Email: {email}")
            contact_added = True
        if address.strip():
            contact_info.append(f"Address: {address}")
            contact_added = True
        
        if contact_added:
            prompt_sections.extend(contact_info)
            prompt_sections.append("")
        
        # Operating Hours section
        if operating_hours.strip():
            prompt_sections.extend(["Operating Hours:", operating_hours, ""])
        
        # Social Media section
        if social_media.strip():
            prompt_sections.extend(["Social Media:", social_media, ""])
        
        # Website Structure section
        # prompt_sections.extend(["Website Structure:", f"Navigation Sections: {pages}", "Note: Use anchor links (#section-id) for navigation since this is a single HTML file", ""])
        
        # Design & Branding section
        prompt_sections.extend(["Design & Branding:", design_info])
        
        prompt = '\n'.join(prompt_sections)
        
        logger.info(f"Successfully formatted website data to AI prompt for: {website_name}")
        return prompt
        
    except Exception as e:
        logger.error(f"Error formatting website prompt: {e}")
        raise AutomatedTaskError(f"Error formatting website prompt: {e}")

def _format_operating_hours(operating_hours: Dict[str, Any]) -> str:
    """Format operating hours for DeepSite prompt with smart grouping."""
    if not operating_hours:
        return ""  # Empty operating hours - omit completely
    
    days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
    hours_list = []
    i = 0
    
    while i < len(days):
        day = days[i]
        day_data = operating_hours.get(day, {})
        
        if not day_data.get('isOpen', False):
            # Closed day
            hours_list.append(f"{day.capitalize()}: Closed")
            i += 1
        else:
            # Open day - start a group
            open_time = _convert_24h_to_12h(day_data.get('open', '09:00'))
            close_time = _convert_24h_to_12h(day_data.get('close', '22:00'))
            current_hours = f"{open_time} - {close_time}"
            
            # Find consecutive days with same hours
            group_end = i
            for j in range(i + 1, len(days)):
                next_day = days[j]
                next_day_data = operating_hours.get(next_day, {})
                
                if not next_day_data.get('isOpen', False):
                    # Next day is closed, end group
                    break
                
                next_open_time = _convert_24h_to_12h(next_day_data.get('open', '09:00'))
                next_close_time = _convert_24h_to_12h(next_day_data.get('close', '22:00'))
                next_hours = f"{next_open_time} - {next_close_time}"
                
                if next_hours != current_hours:
                    # Different hours, end group
                    break
                
                group_end = j
            
            # Format the group
            if group_end == i:
                # Single day
                hours_list.append(f"{day.capitalize()}: {current_hours}")
            else:
                # Multiple consecutive days
                first_day = day.capitalize()
                last_day = days[group_end].capitalize()
                hours_list.append(f"{first_day} - {last_day}: {current_hours}")
            
            i = group_end + 1
    
    return '\n'.join(hours_list)

def _convert_24h_to_12h(time_24h: str) -> str:
    """Convert 24-hour time to 12-hour format."""
    try:
        hour, minute = time_24h.split(':')
        hour = int(hour)
        minute = int(minute)
        
        if hour == 0:
            return f"12:{minute:02d} AM"
        elif hour < 12:
            return f"{hour}:{minute:02d} AM"
        elif hour == 12:
            return f"12:{minute:02d} PM"
        else:
            return f"{hour - 12}:{minute:02d} PM"
    except:
        return time_24h  # Return original if conversion fails

def _format_logo_info(logo_info: Dict[str, Any]) -> str:
    """Format logo information for DeepSite prompt."""
    if not logo_info:
        return ""  # Empty logo - omit completely
    
    # Check for both possible field names: 'filename' or 'serverFilename'
    filename = logo_info.get('filename') or logo_info.get('serverFilename', '')
    if not filename:
        return ""  # Empty logo - omit completely
    
    # Return relative path in asset/img/ format
    return f"asset/img/{filename}"

def _format_social_media(website_data: Dict[str, Any]) -> str:
    """Format social media URLs for DeepSite prompt."""
    social_urls = website_data.get('socialUrls', {})
    
    facebook = social_urls.get('facebook', website_data.get('facebookUrl', ''))
    instagram = social_urls.get('instagram', website_data.get('instagramUrl', ''))
    twitter = social_urls.get('twitter', website_data.get('twitterUrl', ''))
    
    lines = []
    if facebook and facebook.strip():
        lines.append(f"Facebook: {facebook}")
    if instagram and instagram.strip():
        lines.append(f"Instagram: {instagram}")
    if twitter and twitter.strip():
        lines.append(f"Twitter: {twitter}")
    
    return '\n'.join(lines) if lines else ""

def _format_pages(pages: list) -> str:
    """Format pages list for DeepSite prompt."""
    if not pages:
        return "home, about, contact"
    
    return ", ".join(pages)

def _format_design_info(website_data: Dict[str, Any]) -> str:
    """Format design and branding information for DeepSite prompt."""
    colors = website_data.get('colors', {})
    typography = website_data.get('typography', 'inter')
    
    # Get font family name from font key
    font_family = _get_font_family_name(typography)
    
    return f"""Primary Color: {colors.get('primary', '#2c3e50')}
Secondary Color: {colors.get('secondary', '#3498db')}
Accent Color: {colors.get('accent', '#e74c3c')}
Typography: {font_family}
Selected Font: {font_family}"""

async def _copy_logo_files(website_data: Dict[str, Any], dest_path: Path) -> list:
    """
    Copy logo files to asset/img/ directory and delete from uploads after successful copy.
    
    Args:
        website_data: Website data containing logo information
        dest_path: Destination path for the website
        
    Returns:
        List of copied logo files
    """
    logo_files_copied = []
    
    try:
        logo_info = website_data.get('logoInfo', {})
        if not logo_info:
            logger.info("No logo file to copy")
            return logo_files_copied
        
        # Check for both possible field names: 'filename' or 'serverFilename'
        logo_filename = logo_info.get('filename') or logo_info.get('serverFilename', '')
        if not logo_filename:
            logger.info("No logo filename found")
            return logo_files_copied
        
        # Create asset/img directory
        asset_img_dir = dest_path / "asset" / "img"
        asset_img_dir.mkdir(parents=True, exist_ok=True)
        
        # Look for logo in uploads/logos directory
        project_root = Path(__file__).parent.parent.parent
        source_logo_path = project_root / "uploads" / "logos" / logo_filename
        
        if source_logo_path.exists():
            # Copy logo to asset/img/
            dest_logo_path = asset_img_dir / logo_filename
            shutil.copy2(source_logo_path, dest_logo_path)
            logo_files_copied.append(str(dest_logo_path))
            logger.info(f"Copied logo file: {logo_filename}")
            
            # Delete the source file from uploads after successful copy
            try:
                source_logo_path.unlink()
                logger.info(f"Deleted source logo file from uploads: {logo_filename}")
            except Exception as delete_error:
                logger.error(f"Failed to delete source logo file {logo_filename}: {delete_error}")
                # Don't fail the entire operation if deletion fails
        return logo_files_copied
        
    except Exception as e:
        logger.error(f"Error copying logo files: {e}")
        return logo_files_copied

async def _write_file_async(file_path: Path, content: str) -> None:
    """Write content to file asynchronously."""
    async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
        await f.write(content)

# Export functions for use in workflow
__all__ = [
    'parse_json_input',
    'setup_template',
    'format_website_prompt',
    '_copy_logo_files',
    'AutomatedTaskError'
]