"""
ML Model Training for HK Racing Analytics
"""
from typing import Tuple, Dict
from loguru import logger
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss, accuracy_score, precision_score
import xgboost as xgb
import joblib
from pathlib import Path


class RacingModel:
    """XGBoost model for horse racing predictions"""
    
    def __init__(self, model_dir: str = "models"):
        self.model_dir = Path(model_dir)
        self.model_dir.mkdir(exist_ok=True)
        self.model = None
        self.feature_names = [
            'avg_finish_pos_last_6',
            'win_rate_last_6',
            'place_rate_last_6',
            'days_since_last_run',
            'runs_in_last_30_days',
            'class_rating',
            'class_diff_from_last',
            'distance_starts',
            'distance_win_rate',
            'distance_place_rate',
            'track_starts',
            'track_win_rate',
            'track_place_rate',
            'jockey_win_rate_30_days',
            'jockey_place_rate_30_days',
            'trainer_win_rate_30_days',
            'trainer_place_rate_30_days',
            'barrier_win_rate',
            'barrier_place_rate'
        ]
        logger.info(f"Model initialized with {len(self.feature_names)} features")
    
    def prepare_data(self, df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Series]:
        """
        Prepare data for training
        
        Args:
            df: DataFrame with features and target
        
        Returns:
            X: Features DataFrame
            y: Target Series (1 if won, 0 otherwise)
        """
        # Select features
        X = df[self.feature_names].copy()
        
        # Fill missing values
        X = X.fillna(0)
        
        # Target: win (finishing_position == 1)
        y = (df['finishing_position'] == 1).astype(int)
        
        return X, y
    
    def train(
        self,
        df: pd.DataFrame,
        test_size: float = 0.2,
        random_state: int = 42
    ) -> Dict:
        """
        Train the model
        
        Args:
            df: Training data with features and target
            test_size: Fraction for testing
            random_state: Random seed
        
        Returns:
            Dict with training metrics
        """
        logger.info("Preparing training data...")
        X, y = self.prepare_data(df)
        
        # Split data
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=test_size, random_state=random_state, stratify=y
        )
        
        logger.info(f"Training samples: {len(X_train)}, Test samples: {len(X_test)}")
        
        # Calculate class weights (winners are rare)
        scale_pos_weight = (y_train == 0).sum() / (y_train == 1).sum()
        
        # Create model
        self.model = xgb.XGBClassifier(
            n_estimators=100,
            max_depth=6,
            learning_rate=0.1,
            scale_pos_weight=scale_pos_weight,
            random_state=random_state,
            use_label_encoder=False,
            eval_metric='logloss'
        )
        
        # Train
        logger.info("Training model...")
        self.model.fit(
            X_train, y_train,
            eval_set=[(X_test, y_test)],
            verbose=False
        )
        
        # Evaluate
        y_pred_proba = self.model.predict_proba(X_test)[:, 1]
        y_pred = self.model.predict(X_test)
        
        metrics = {
            'log_loss': log_loss(y_test, y_pred_proba),
            'accuracy': accuracy_score(y_test, y_pred),
            'precision': precision_score(y_test, y_pred, zero_division=0),
            'feature_importance': dict(zip(self.feature_names, self.model.feature_importances_))
        }
        
        logger.info(f"Training complete. Log loss: {metrics['log_loss']:.4f}")
        
        return metrics
    
    def predict(self, features: Dict) -> Dict:
        """
        Make prediction for a runner
        
        Args:
            features: Dict of feature values
        
        Returns:
            Dict with win_probability and confidence
        """
        if self.model is None:
            logger.warning("Model not trained, loading from disk...")
            self.load()
        
        # Prepare features
        X = pd.DataFrame([features])
        X = X[self.feature_names].fillna(0)
        
        # Predict
        win_proba = self.model.predict_proba(X)[0, 1]
        
        # Place probability (simplified: higher than win)
        place_proba = min(win_proba * 2.5, 0.95)
        
        # Confidence (distance from 0.5)
        confidence = abs(win_proba - 0.5) * 2
        
        return {
            'win_probability': float(win_proba),
            'place_probability': float(place_proba),
            'confidence': float(confidence)
        }
    
    def explain_prediction(self, features: Dict, top_n: int = 5) -> list:
        """
        Explain prediction with top features
        
        Args:
            features: Dict of feature values
            top_n: Number of top features to return
        
        Returns:
            List of (feature_name, contribution) tuples
        """
        if self.model is None:
            return []
        
        # Get feature importance
        importance = self.model.feature_importances_
        
        # Sort by importance
        indices = np.argsort(importance)[::-1][:top_n]
        
        explanations = []
        for idx in indices:
            feature_name = self.feature_names[idx]
            feature_value = features.get(feature_name, 0)
            feature_importance = importance[idx]
            
            # Simple explanation
            if feature_value is None:
                feature_value = 0
            
            if 'win_rate' in feature_name and feature_value > 0.2:
                impact = "positive"
            elif 'win_rate' in feature_name and feature_value < 0.1:
                impact = "negative"
            elif 'barrier' in feature_name and feature_value > 0.12:
                impact = "positive"
            else:
                impact = "neutral"
            
            explanations.append({
                'feature': feature_name,
                'value': feature_value,
                'importance': float(feature_importance),
                'impact': impact
            })
        
        return explanations
    
    def save(self, version: str = "v1.0"):
        """Save model to disk"""
        model_path = self.model_dir / f"racing_model_{version}.joblib"
        joblib.dump(self.model, model_path)
        logger.info(f"Model saved to {model_path}")
    
    def load(self, version: str = "v1.0"):
        """Load model from disk"""
        model_path = self.model_dir / f"racing_model_{version}.joblib"
        if model_path.exists():
            self.model = joblib.load(model_path)
            logger.info(f"Model loaded from {model_path}")
        else:
            logger.warning(f"Model not found at {model_path}")


if __name__ == "__main__":
    # Test training with mock data
    model = RacingModel()
    
    # Create mock training data
    np.random.seed(42)
    n_samples = 1000
    
    df = pd.DataFrame({
        'avg_finish_pos_last_6': np.random.uniform(1, 14, n_samples),
        'win_rate_last_6': np.random.uniform(0, 0.5, n_samples),
        'place_rate_last_6': np.random.uniform(0, 0.7, n_samples),
        'days_since_last_run': np.random.randint(7, 60, n_samples),
        'runs_in_last_30_days': np.random.randint(0, 5, n_samples),
        'class_rating': np.random.randint(40, 120, n_samples),
        'class_diff_from_last': np.random.randint(-10, 10, n_samples),
        'distance_starts': np.random.randint(0, 10, n_samples),
        'distance_win_rate': np.random.uniform(0, 0.3, n_samples),
        'distance_place_rate': np.random.uniform(0, 0.5, n_samples),
        'track_starts': np.random.randint(0, 15, n_samples),
        'track_win_rate': np.random.uniform(0, 0.3, n_samples),
        'track_place_rate': np.random.uniform(0, 0.5, n_samples),
        'jockey_win_rate_30_days': np.random.uniform(0, 0.3, n_samples),
        'jockey_place_rate_30_days': np.random.uniform(0, 0.5, n_samples),
        'trainer_win_rate_30_days': np.random.uniform(0, 0.3, n_samples),
        'trainer_place_rate_30_days': np.random.uniform(0, 0.5, n_samples),
        'barrier_win_rate': np.random.uniform(0.05, 0.15, n_samples),
        'barrier_place_rate': np.random.uniform(0.2, 0.4, n_samples),
        'finishing_position': np.random.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], n_samples)
    })
    
    # Train
    metrics = model.train(df)
    print(f"Metrics: {metrics}")
    
    # Save
    model.save("v1.0")
    
    # Test prediction
    features = {
        'avg_finish_pos_last_6': 3.5,
        'win_rate_last_6': 0.15,
        'place_rate_last_6': 0.35
    }
    pred = model.predict(features)
    print(f"Prediction: {pred}")
