import cv2
import mediapipe as mp
import cv2
import pandas as pd
import datetime
from django.core.mail import EmailMessage
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from multiprocessing import Process
from myprofile.models import *
from ultralytics import YOLO
from django.conf import settings

mp_pose = mp.solutions.pose

def detect_fall(keypoints, fall_threshold_y=0.3, fall_threshold_x=0.1):
    if mp_pose.PoseLandmark.NOSE.value not in keypoints or \
            mp_pose.PoseLandmark.RIGHT_ANKLE.value not in keypoints or \
            mp_pose.PoseLandmark.LEFT_ANKLE.value not in keypoints:
        return False

    head_y = keypoints[mp_pose.PoseLandmark.NOSE.value].y
    right_ankle_x = keypoints[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x
    left_ankle_x = keypoints[mp_pose.PoseLandmark.LEFT_ANKLE.value].x

    return (head_y - right_ankle_x) > fall_threshold_y and abs(right_ankle_x - left_ankle_x) < fall_threshold_x

def process_device(camera_id):

    camera = Camera.objects.get(id=camera_id)
    # video_path = r"/home/savan/kalpesh/VisnX/Fall_Detection/videos/vv.mp4"  # Replace with your video file
    cap = cv2.VideoCapture(camera.rtsp_url)
    pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)

    fall_threshold_y = 0.3
    fall_threshold_x = 0.1

    cv2.namedWindow('Fall Detection', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Fall Detection', 800, 600)  # Adjust as needed

    frame_number = 0
    fall_detected = False
    keypoints = {}

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        frame_number += 1

        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        results = pose.process(rgb_frame)

        if results.pose_landmarks:
            for landmark, landmark_data in enumerate(results.pose_landmarks.landmark):
                keypoints[landmark] = landmark_data

            if detect_fall(keypoints, fall_threshold_y, fall_threshold_x) and not fall_detected:
                fall_detected = True
                fall_frame_number = frame_number
                roi_color = (0, 0, 255)  # Red color
                head_coords = (int(keypoints[mp_pose.PoseLandmark.NOSE.value].x * frame.shape[1]),
                               int(keypoints[mp_pose.PoseLandmark.NOSE.value].y * frame.shape[0]))
                right_ankle_coords = (int(keypoints[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x * frame.shape[1]),
                                      int(keypoints[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y * frame.shape[0]))

                cv2.rectangle(frame, head_coords, right_ankle_coords, roi_color, 2)

                message = f"Fall Detected @Frame {frame_number}"
                cv2.putText(frame, message, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, roi_color, 2, cv2.LINE_AA)
                print(message)

            if fall_detected:
                mp.solutions.drawing_utils.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
                if frame_number == fall_frame_number:
                    # save the frame to the database in Alert model
                    alert = Alert(camera=camera, alert_message=f'Fall Detected @Frame {frame_number}')

                    # Create a CameraEvent object for FallDetected
                    fall_event = CameraEvent.objects.get_or_create(camera_event="FallDetected")[0]

                    # Save the CameraEvent object to the alert
                    alert.camera_events = fall_event
                    alert.save()

                    # save camera Incharge 
                    alert.camera_incharge.set(camera.camera_user.all())
                    # Save alert image to the database
                    _, encoded_frame = cv2.imencode('.jpg', frame)
                    image_file = ContentFile(encoded_frame.tobytes())
                    alert.frame.save(f'fall_{datetime.datetime.now()}.jpg', image_file)

                    # Send an email notification to the camera incharge
                    # subject = f'Alert: Person detected in {camera.camera_name}'
                    # message = f'A person has been detected in {camera.camera_name} at {datetime.datetime.now()}'
                    # from_email = settings.EMAIL_HOST_USER
                    # recipient_list = ['kuldip.nettyfy@gmail.com']
                    # email = EmailMessage(subject, message, from_email, recipient_list)
                    # email.attach_file(alert.frame.path)
                    # email.send()

        if not detect_fall(keypoints, fall_threshold_y, fall_threshold_x):
            fall_detected = False

        cv2.imshow('Fall Detection', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()




class Command(BaseCommand):
    help = 'Triggers alerts based on video analysis'

    def handle(self, *args, **options):
        camera = Camera.objects.filter(is_enabled=True)

        processes = []

        for cam in camera:
            process = Process(target=process_device, args=(cam.id,))
            process.start()
            processes.append(process)

        for process in processes:
            process.join()