"""
Pose detection using MediaPipe
"""

import cv2
import mediapipe as mp
import numpy as np
from typing import Optional, List, Tuple, Dict
import math


class PoseDetector:
    """
    Wrapper for MediaPipe Pose detection
    """
    
    # MediaPipe landmark indices
    LANDMARKS = {
        'NOSE': 0,
        'LEFT_EYE_INNER': 1, 'LEFT_EYE': 2, 'LEFT_EYE_OUTER': 3,
        'RIGHT_EYE_INNER': 4, 'RIGHT_EYE': 5, 'RIGHT_EYE_OUTER': 6,
        'LEFT_EAR': 7, 'RIGHT_EAR': 8,
        'MOUTH_LEFT': 9, 'MOUTH_RIGHT': 10,
        'LEFT_SHOULDER': 11, 'RIGHT_SHOULDER': 12,
        'LEFT_ELBOW': 13, 'RIGHT_ELBOW': 14,
        'LEFT_WRIST': 15, 'RIGHT_WRIST': 16,
        'LEFT_PINKY': 17, 'RIGHT_PINKY': 18,
        'LEFT_INDEX': 19, 'RIGHT_INDEX': 20,
        'LEFT_THUMB': 21, 'RIGHT_THUMB': 22,
        'LEFT_HIP': 23, 'RIGHT_HIP': 24,
        'LEFT_KNEE': 25, 'RIGHT_KNEE': 26,
        'LEFT_ANKLE': 27, 'RIGHT_ANKLE': 28,
        'LEFT_HEEL': 29, 'RIGHT_HEEL': 30,
        'LEFT_FOOT_INDEX': 31, 'RIGHT_FOOT_INDEX': 32
    }
    
    def __init__(self, 
                 model_complexity: int = 1,
                 min_detection_confidence: float = 0.5,
                 min_tracking_confidence: float = 0.5,
                 enable_segmentation: bool = False):
        """
        Initialize pose detector
        
        Args:
            model_complexity: 0, 1, or 2 (higher = more accurate but slower)
            min_detection_confidence: Minimum confidence for pose detection
            min_tracking_confidence: Minimum confidence for pose tracking
            enable_segmentation: Enable person segmentation (slower)
        """
        self.mp_pose = mp.solutions.pose
        self.mp_draw = mp.solutions.drawing_utils
        self.mp_drawing_styles = mp.solutions.drawing_styles
        
        self.pose = self.mp_pose.Pose(
            model_complexity=model_complexity,
            min_detection_confidence=min_detection_confidence,
            min_tracking_confidence=min_tracking_confidence,
            enable_segmentation=enable_segmentation
        )
        
        self.landmarks = None
        self.image_width = 0
        self.image_height = 0
        
    def detect(self, frame: np.ndarray) -> bool:
        """
        Detect pose in frame
        
        Args:
            frame: BGR image (numpy array)
            
        Returns:
            True if pose detected, False otherwise
        """
        self.image_height, self.image_width, _ = frame.shape
        
        # Convert BGR to RGB
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        
        # Process frame
        results = self.pose.process(rgb_frame)
        
        if results.pose_landmarks:
            self.landmarks = results.pose_landmarks.landmark
            return True
        else:
            self.landmarks = None
            return False
    
    def get_landmark(self, landmark_name: str) -> Optional[Tuple[int, int]]:
        """
        Get pixel coordinates of a landmark
        
        Args:
            landmark_name: Name from LANDMARKS dict
            
        Returns:
            (x, y) pixel coordinates or None if not available
        """
        if self.landmarks is None:
            return None
        
        idx = self.LANDMARKS.get(landmark_name)
        if idx is None:
            return None
        
        landmark = self.landmarks[idx]
        x = int(landmark.x * self.image_width)
        y = int(landmark.y * self.image_height)
        
        return (x, y)
    
    def get_landmark_normalized(self, landmark_name: str) -> Optional[Tuple[float, float, float]]:
        """
        Get normalized coordinates (0-1) and visibility of a landmark
        
        Returns:
            (x, y, visibility) or None
        """
        if self.landmarks is None:
            return None
        
        idx = self.LANDMARKS.get(landmark_name)
        if idx is None:
            return None
        
        landmark = self.landmarks[idx]
        return (landmark.x, landmark.y, landmark.visibility)
    
    def calculate_angle(self, point1_name: str, point2_name: str, point3_name: str) -> Optional[float]:
        """
        Calculate angle between three points (point2 is the vertex)
        
        Args:
            point1_name: First point
            point2_name: Vertex point
            point3_name: Third point
            
        Returns:
            Angle in degrees (0-180) or None if points not available
        """
        p1 = self.get_landmark(point1_name)
        p2 = self.get_landmark(point2_name)
        p3 = self.get_landmark(point3_name)
        
        if None in [p1, p2, p3]:
            return None
        
        # Convert to numpy arrays
        p1 = np.array(p1)
        p2 = np.array(p2)
        p3 = np.array(p3)
        
        # Calculate vectors
        v1 = p1 - p2
        v2 = p3 - p2
        
        # Calculate angle using dot product
        cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
        cos_angle = np.clip(cos_angle, -1.0, 1.0)  # Prevent domain errors
        angle = np.degrees(np.arccos(cos_angle))
        
        return float(angle)
    
    def calculate_angle_from_vertical(self, point1_name: str, point2_name: str) -> Optional[float]:
        """
        Calculate angle of line segment from vertical
        
        Returns:
            Angle in degrees (0 = vertical, 90 = horizontal) or None
        """
        p1 = self.get_landmark(point1_name)
        p2 = self.get_landmark(point2_name)
        
        if None in [p1, p2]:
            return None
        
        dx = p2[0] - p1[0]
        dy = p2[1] - p1[1]
        
        angle = abs(math.degrees(math.atan2(dx, dy)))
        return angle
    
    def draw_landmarks(self, frame: np.ndarray, 
                       landmarks_to_highlight: Optional[List[str]] = None,
                       highlight_color: Tuple[int, int, int] = (0, 255, 255)) -> np.ndarray:
        """
        Draw pose landmarks on frame
        
        Args:
            frame: BGR image
            landmarks_to_highlight: List of landmark names to highlight in different color
            highlight_color: BGR color for highlighted landmarks
            
        Returns:
            Frame with landmarks drawn
        """
        if self.landmarks is None:
            return frame
        
        # Draw all landmarks and connections
        self.mp_draw.draw_landmarks(
            frame,
            self.mp_pose.Pose().process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).pose_landmarks,
            self.mp_pose.POSE_CONNECTIONS,
            self.mp_drawing_styles.get_default_pose_landmarks_style()
        )
        
        # Highlight specific landmarks if requested
        if landmarks_to_highlight:
            for landmark_name in landmarks_to_highlight:
                pos = self.get_landmark(landmark_name)
                if pos:
                    cv2.circle(frame, pos, 8, highlight_color, -1)
        
        return frame
    
    def __del__(self):
        """Cleanup"""
        self.pose.close()


# Test code
if __name__ == "__main__":
    import sys
    
    print("Testing pose detection with webcam...")
    print("Press 'q' to quit")
    
    # Use webcam for testing
    cap = cv2.VideoCapture(0)
    
    if not cap.isOpened():
        print("Error: Cannot open webcam")
        sys.exit(1)
    
    detector = PoseDetector(model_complexity=1)
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        
        # Detect pose
        if detector.detect(frame):
            # Calculate some angles
            left_elbow = detector.calculate_angle('LEFT_SHOULDER', 'LEFT_ELBOW', 'LEFT_WRIST')
            left_knee = detector.calculate_angle('LEFT_HIP', 'LEFT_KNEE', 'LEFT_ANKLE')
            
            # Draw landmarks
            frame = detector.draw_landmarks(frame, 
                                           landmarks_to_highlight=['LEFT_ELBOW', 'LEFT_KNEE'])
            
            # Display angles
            if left_elbow:
                cv2.putText(frame, f"Left Elbow: {left_elbow:.1f}deg", (10, 30),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            if left_knee:
                cv2.putText(frame, f"Left Knee: {left_knee:.1f}deg", (10, 60),
                           cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        else:
            cv2.putText(frame, "No pose detected", (10, 30),
                       cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        
        cv2.imshow('Pose Detection Test', frame)
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    
    cap.release()
    cv2.destroyAllWindows()
