import cv2
from ultralytics import YOLO
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from myprofile.models import *
import requests
from django.conf import settings

def process_device(cam_id):
    # Load the YOLOv8 model
    model = YOLO('/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/model/fire_detection.pt')
    # work with best.pt
    camera = Camera.objects.filter(id=cam_id).first()
    video_path = '/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/myprofile/management/commands/videos/fire_test.mp4'
    cap = cv2.VideoCapture(video_path)
    # cap = cv2.VideoCapture(camera.rtsp_url)

    # Get video properties
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # Define the codec and create   VideoWriter object
    output_path = '/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/myprofile/management/commands/videos/fire_test_output.mp4'  
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
    skip_frames = 15
    frame_skip_counter = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        frame_skip_counter += 1
        if frame_skip_counter <= skip_frames:
            continue    
        frame_skip_counter = 0
        
        # Perform prediction
        results = model(frame)

        for result in results[0].boxes.data:
            class_id = int(result[5])
            if class_id == 0:  
                print("Fire detected")
                # Draw the bounding boxes on the frame
                annotated_frame = results[0].plot()
                # import ipdb;ipdb.set_trace()
                # print('Fireeeeeeee Alertttttttt ----------------------')
                camera_event_obj = CameraEvent.objects.filter(camera_event__icontains = 'fire').first()
                current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                _, encoded_frame = cv2.imencode('.jpg', annotated_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
                image_file = ContentFile(encoded_frame, name=f"fire_{current_time}.jpg")
                
                alert = Alert(
                    camera = camera,
                    alert_message = f'Fire detected at {current_time}.',
                    frame=image_file,
                    camera_events = camera_event_obj
                )
                alert.save()

                camera_incharge_list = camera.camera_user.all()
                for incharge in camera_incharge_list:
                    alert.camera_incharge.add(incharge)
                alert.save()
                
                # Define the API endpoint
                url = settings.URL+'/alert_image_upload'

                # Replace this with the actual ContentFile you have
                # image_content = ContentFile(b'image_file', 'test_image.jpg')

                # Create a dictionary with the file
                    
                files = {'image': ((alert.frame.name.replace('alert_images/', '')), encoded_frame)}

                # Make the request
                response = requests.post(url, files=files)

                # Print the response
                print(response.json())
                # Write the frame into the output file
                out.write(annotated_frame)

                # cv2.imshow('YOLOv8 Predictions', annotated_frame)

        
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break

        

    cap.release()
    out.release()
    # cv2.destroyAllWindows()
class Command(BaseCommand):
    help = 'Triggers alerts based on video analysis'
    
    def handle(self, *args, **options):
        # import ipdb; ipdb.set_trace()
        cameras = Camera.objects.filter(is_enabled=True).order_by('created_at')
        
        processes = []  # Keep track of the processes so we can wait on them later.

        for camera in cameras:
            process_device(camera.id)