import cv2
import numpy as np
from ultralytics import YOLO  # Import YOLO from ultralytics package
import supervision as sv
from django.db import transaction
from myprofile.models import Camera, CameraEvent, Roi, Alert, Organization, Location, Area
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
import requests
from multiprocessing import Process
import datetime
from django.conf import settings

def intrusion_detection(camera_id):
    # Initialize the YOLOv8 model
    model = YOLO("yolov8n.pt")  # Load YOLOv8 model from ultralytics

    # Open the video file
    camera = Camera.objects.filter(id=camera_id).first()
    video_path = camera.rtsp_url if camera.rtsp_url else "/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/videos/danger_test.mp4"
    cap = cv2.VideoCapture(video_path)

    # Check if the video capture object is opened successfully
    if not cap.isOpened():
        print("Error: Could not open video.")
        return
    # output_video_path = "/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/videos/danger_output.mp4"
    # Get ROI coordinates
    classes = ['person']
    roi_objs = Roi.objects.filter(camera=camera,camera_events__camera_event='Danger Zone').order_by('id')
    roi_coords_list = [np.array(roi_obj.coordinates['coordinates'], dtype=np.int32) for roi_obj in roi_objs]
    start_time = datetime.datetime.strptime("08:00", "%H:%M").time()
    end_time = datetime.datetime.strptime("17:00", "%H:%M").time()
    current_time_user = datetime.datetime.strptime("19:00", "%H:%M").time()
    people_enter_queue = {}
    skip_frames = 15

    frame_skip_counter = 0
    frame_count = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        frame_skip_counter += 1
        if frame_skip_counter <= skip_frames:
            continue
        frame_skip_counter = 0

        # Resize the frame to (1000, 700)
        frame = cv2.resize(frame, (1200, 700))

        # Annotate the frame with ROI
        annotated_frame = frame.copy()
        for roi_coords in roi_coords_list:
            cv2.drawContours(annotated_frame, [roi_coords], -1, (255, 255, 0), 2)

        # Run YOLOv8 tracking on the frame
        results = model(annotated_frame)
        

        # Your existing detection logic here...
        for result in results:
            class_label = ""
            x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
            class_idx = int(class_idx)
            
            if class_idx == 0:
                class_label = classes[class_idx]

            if class_label == "person":
                # Create a new person object with an empty labels set
                person = {"bbox": (x1, y1, x2, y2)}
        
                x1, y1, x2, y2 = person["bbox"]
                corners = [(x1, y1), (x1, y2), (x2, y1), (x2, y2)]

                def extract_bounding_box(corners):
                    # Extract x and y coordinates
                    x_coords = [point[0] for point in corners]
                    y_coords = [point[1] for point in corners]
                    
                    # Determine the bounding box
                    x1, y1 = min(x_coords), min(y_coords)
                    x2, y2 = max(x_coords), max(y_coords)
                    
                    return x1, y1, x2, y2

                def do_rois_interact(roi1_corners, roi2_coords):
                    # Extract bounding boxes
                    x1_A, y1_A, x2_A, y2_A = extract_bounding_box(roi1_corners)
                    
                    # Convert roi2_coords to a format suitable for bounding box extraction
                    roi2_corners = [(x, y) for x, y in roi2_coords]
                    x1_B, y1_B, x2_B, y2_B = extract_bounding_box(roi2_corners)
                    
                    # Check for overlap in 2D
                    if (x1_A < x2_B and x2_A > x1_B and
                        y1_A < y2_B and y2_A > y1_B):
                        return True
                    else:
                        return False

                if do_rois_interact(corners, roi_coords_list[0]):
                    if start_time < current_time_user < end_time:
                        # cv2.putText(annotated_frame, "Intrusion Detected", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                        cv2.rectangle(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)  # Draw the person bbox
                    else:
                        cv2.rectangle(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)  # Draw the person bbox
                        cv2.putText(annotated_frame, "Intrusion Detected", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                        
                        camera_event_obj = CameraEvent.objects.filter(camera_event='IntrusionDetected').first()
                        _, encoded_frame = cv2.imencode('.jpg', annotated_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
                        image_file = ContentFile(encoded_frame.tobytes(), name="alert_image.jpg")
                        org_obj = Organization.objects.all().first()
                        loc_obj = Location.objects.filter(loc_name = camera.location.loc_name).first() if camera.location else ''
                        area_obj = Area.objects.filter(area_name = camera.area.area_name).first() if camera.area else ''
                            
                        alert = Alert(
                                organization = org_obj,
                                camera=camera,
                                detection_choice='',
                                alert_message=f"Intrusion detected",
                                frame=image_file,
                                camera_events=camera_event_obj
                            )
                        if area_obj:
                            alert.area = area_obj
                        if loc_obj:
                            alert.location = loc_obj
                        alert.save()

                        camera_incharge_list = Camera.objects.get(id=camera_id).camera_user.all()
                        for user in camera_incharge_list:
                            alert.camera_incharge.add(user)
                        alert.save()

                        # Define the API endpoint
                        url = settings.URL+'/alert_image_upload'

                        # Create a dictionary with the file
                        files = {'image': (alert.frame.name.replace('alert_images/', ''), encoded_frame)}

                        # Make the request
                        response = requests.post(url, files=files)

                        # Print the response
                        print(response.json())

        # cv2.imshow("Annotated Frame", annotated_frame)

        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break

        frame_count += 1

    cap.release()
    # cv2.destroyAllWindows()
    # print(f"Output video saved at: {output_video_path}")

class Command(BaseCommand):
    help = 'Queue detection based on video analysis'

    def handle(self, *args, **options):
        cameras = Camera.objects.filter(is_enabled=True)

        # for camera in cameras:
        #     intrusion_detection(camera.id)
        processes = []  # Keep track of the processes so we can wait on them later.

        for device in cameras:
            # Start a new process for each device.
            process = Process(target=intrusion_detection, args=(device.id,))
            processes.append(process)
            process.start()            
        
        print(processes)
        for process in processes:
            process.join()