import cv2
import supervision as sv
from django.core.management.base import BaseCommand
from ultralytics import YOLO
import requests
from django.core.files.base import ContentFile
from myprofile.models import *
from multiprocessing import Process
from django.conf import settings

def rectangles_intersect(rect1, rect2):
    """
    Check if two rectangles intersect or touch each other.
    Each rectangle is defined by (x1, y1, x2, y2) where:
    - (x1, y1) is the top-left corner
    - (x2, y2) is the bottom-right corner
    """
    x1_1, y1_1, x2_1, y2_1 = rect1
    x1_2, y1_2, x2_2, y2_2 = rect2

    # Check if one rectangle is to the left or above the other
    if x1_1 >= x2_2 or x1_2 >= x2_1:
        return False
    if y1_1 >= y2_2 or y1_2 >= y2_1:
        return False

    return True

def hex_to_bgr(hex_color):
    hex_color = hex_color.lstrip('#')
    r, g, b = tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
    return (b, g, r)

def intrusion_detection(camera_id):
    # Load model and class list
    model = YOLO("yolov8n.pt")

    tracker = sv.ByteTrack()

    # Setup video capture
    camera = Camera.objects.filter(id=camera_id).first()
    
    if camera.rtsp_url:
        video_path = camera.rtsp_url
    else:
        video_path = "/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/videos/danger_test.mp4"
    cap = cv2.VideoCapture(video_path)
    roi_objs = Roi.objects.filter(camera=camera, camera_events__camera_event='IntrusionDetected').order_by('id')
    classes = ['person']
    roi_coord_objs = []
    for roi_obj in roi_objs:
        for coord in roi_obj.coordinates['coordinates']:
            data = {
                'x': coord['x'],
                'y': coord['y'],
                'w': coord['width'],
                'h': coord['height'],
                'color':roi_obj.color
            }
            roi_coord_objs.append(data)
    start_time = datetime.strptime("08:00", "%H:%M").time()
    end_time = datetime.strptime("17:00", "%H:%M").time()
    current_time_user = datetime.strptime("19:00", "%H:%M").time()
    frame_count = 0
    skip_frames = 15
    frame_skip_counter = 0
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break

        frame_skip_counter += 1
        if frame_skip_counter <= skip_frames:
            continue
        frame_skip_counter = 0

        frame = cv2.resize(frame, (1200, 700))

        roi_rect = ()
        color_name = (0, 0, 255)
        
        for roi_coord in roi_coord_objs:
            x, y, w, h = roi_coord['x'], roi_coord['y'], roi_coord['w'], roi_coord['h']
            roi_rect = (x, y, x + w, y + h)
            # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
            color_name = hex_to_bgr(roi_coord['color'])
        results = model(frame)

        if isinstance(results, list) and len(results) > 0:
            result = results[0]
            for detect_per in result.boxes.data.tolist():
                if int(detect_per[-1]) == 0:  # Class 'person' has an index of 0 in most YOLO models
                    x1, y1, x2, y2, conf, class_idx = list(map(int, detect_per[:-2])) + detect_per[-2:]
                    detection_rect = (x1, y1, x2, y2)

                    if rectangles_intersect(detection_rect, roi_rect):
                        if start_time < current_time_user < end_time:
                            # cv2.putText(annotated_frame, "Intrusion Detected", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)  # Draw the person bbox
                        else:
                            # print('Person entered in Intrusion zone')
                            # cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 1)
                            cv2.rectangle(frame, (x, y), (x + w, y + h), color_name, 2)
                            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
                            cv2.putText(frame, "Intrusion Detected", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                            org_obj = Organization.objects.all().first()
                            loc_obj = Location.objects.filter(loc_name = camera.location.loc_name).first() if camera.location else ''
                            area_obj = Area.objects.filter(area_name = camera.area.area_name).first() if camera.area else ''
                            
                            camera_event_obj = CameraEvent.objects.filter(camera_event='IntrusionDetected').first()
                            _, encoded_frame = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
                            image_file = ContentFile(encoded_frame.tobytes(), name="alert_image.jpg")
                            alert = Alert(
                                organization = org_obj,
                                camera=camera,
                                detection_choice='',
                                alert_message=f"Intrusion detected",
                                frame=image_file,
                                camera_events=camera_event_obj
                            )
                            if loc_obj:
                                alert.location = loc_obj
                            if area_obj:
                                alert.area = area_obj
                            alert.save()

                            camera_incharge_list = Camera.objects.get(id=camera_id).camera_user.all()
                            for user in camera_incharge_list:
                                alert.camera_incharge.add(user)
                            alert.save()

                            # Define the API endpoint
                            url = settings.URL+'/alert_image_upload'

                            # Create a dictionary with the file
                            files = {'image': (alert.frame.name.replace('alert_images/', ''), encoded_frame)}

                            # Make the request
                            response = requests.post(url, files=files)

                            # Print the response
                            print(response.json())
                            
                    else:
                        print("Outside ROI")

        # Display the annotated frame
        # cv2.imshow("Annotated Frame", frame)

        # Exit the loop when 'q' is pressed
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break

        frame_count += 1

    cap.release()
    # cv2.destroyAllWindows()

class Command(BaseCommand):
    help = 'Queue detection based on video analysis'

    def handle(self, *args, **options):
        cameras = Camera.objects.filter(is_enabled=True)

        # for camera in cameras:
        #     intrusion_detection(camera.id)
        processes = []  # Keep track of the processes so we can wait on them later.

        for device in cameras:
            # Start a new process for each device.
            process = Process(target=intrusion_detection, args=(device.id,))
            processes.append(process)
            process.start()            
        
        print(processes)
        for process in processes:
            process.join()