# from __future__ import absolute_import, unicode_literals
# import cv2
# import redis
# from django.core.cache import cache
# from django.core.management.base import BaseCommand
# from myprofile.models import *
# from ultralytics import YOLO
# import time
# from django.core.mail import send_mail
# import os
# import tempfile
# from django.core.mail import EmailMessage
# import msgpack
# import asyncio
# import cv2
# import logging, sys
# import numpy as np
# import os
# from celery import Celery
# from celery import shared_task
# from celery.utils.log import get_task_logger
# from django.conf import settings


# class Command(BaseCommand):
#     help = 'Capture frames from RTSP streams, detect items in specified ROIs, save ROI frames in Redis, and generate alerts for undetected objects.'

#     def add_arguments(self, parser):
#         parser.add_argument('--camera_id', type=int, help='Specify the camera ID for which frames should be captured.')

#     def fetch_roi_data(self, camera_id):
#         try:
#             roi_data_list = Roi.objects.filter(camera_id=camera_id).values('id', 'x1', 'y1', 'width', 'height', 'roi_name', 'color')
#             return list(roi_data_list)
#         except Roi.DoesNotExist:
#             self.stdout.write(self.style.ERROR(f"No ROI coordinates found for camera {camera_id}"))
#             return []

#     def detect_objects_in_roi(self, roi_frame,expected_objects):
#         fps = 30
#         fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#         out = cv2.VideoWriter("output.mp4", fourcc, fps, (roi_frame.shape[1], roi_frame.shape[0]))  # Output video filename, codec, fps, frame size
#         model = YOLO('model/yolov8s.pt')
#         results = model(roi_frame)[0]
#         classes = [
#             'Helmet: yes', 'Glasses: yes', 'Gloves: yes', 'Shoes: yes', 'Person',
#             'Helmet: no', 'Glasses: no', 'Gloves: no', 'Shoes: no'
#         ]

#         detected_objects = []
#         bounding_boxes = []
#         persons = []
#         class_label = None
#         x1 = y1 = x2 = y2 = 0

#         for result in results:
#             x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
#             class_idx = int(class_idx)
#             class_label = classes[class_idx]

#             if class_label == "Person":
#                 # Create a new person object with an empty labels set
#                 person = {"bbox": (x1, y1, x2, y2), "labels": set()}
#                 persons.append(person)

#         for result in results:
#             x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
#             class_idx = int(class_idx)
#             class_label = classes[class_idx]

#             if class_label != "Person":
#                 cx, cy = (x1 + x2) / 2, (y1 + y2) / 2  # center of the PPE bbox
#                 for person in persons:
#                     px1, py1, px2, py2 = person["bbox"]
#                     if px1 <= cx <= px2 and py1 <= cy <= py2:  # if the center of the PPE bbox is within the Person bbox
#                         person["labels"].add(class_label)  # As "labels" is now a set, the same label cannot be added twice
#                         break
#         for person in persons:
#             x1, y1, x2, y2 = person["bbox"]
#             x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
#             if x1 >= 0 and y1 >= 0 and x2 >= 0 and y2 >= 0:
#                 cv2.rectangle(roi_frame, (x1, y1), (x2, y2), (0, 255, 0), 3)

#             # Set the text position just above the bbox
#             text_position = (int(x1), int(y1) - 10)

#             for label in person["labels"]:
#                 # Add a black rectangle for padding
#                 text_width, text_height = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
#                 rectangle_bgr = (0, 0, 0)
#                 (text_width, text_height) = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1.2, thickness=1)[0]
#                 box_coords = ((text_position[0], text_position[1]), (text_position[0] + text_width + 2, text_position[1] - text_height - 2))
#                 cv2.rectangle(roi_frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)

#                 if "yes" in label or "Person" in label:
#                     text_color = (0, 255, 0)  # Green color
#                 else:
#                     text_color = (0, 0, 255)  # Red color

#                 # Add the text
#                 cv2.putText(roi_frame, label, text_position, cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
#                 # Update text position for next label
#                 text_position = (text_position[0], text_position[1] - 30)

#         frame = cv2.resize(roi_frame, (roi_frame.shape[1], roi_frame.shape[0]))
#         out.write(frame)

#         # cv2.imshow("yolov8", frame)
#         detected_objects.append(class_label)
#         print(f"detected_objects========+++++++++=============:" ,detected_objects)
#         bounding_boxes.append((x1, y1, x2, y2))
#         return detected_objects, bounding_boxes

#     def handle(self, *args, **options):
#         camera_id = options.get('camera_id')
        
#         if camera_id is None:
#             self.stdout.write(self.style.ERROR('Please provide a valid camera ID using --camera_id argument.'))
#             return

#         try:
#             # Get the camera object based on the provided ID
#             camera = Camera.objects.get(id=camera_id)

#             # Check if the camera exists
#             if not camera:
#                 self.stdout.write(self.style.ERROR(f"Camera with ID {camera_id} does not exist."))
#                 return

#             # Establish a connection to the RTSP stream
#             rtsp_stream_url = camera.rtsp_url
#             cap = cv2.VideoCapture(rtsp_stream_url)
            
#             # Establish a connection to Redis
#             redis_client = redis.Redis(host='localhost', port=6379, db=0)

#             # Create a window to display frames
#             cv2.namedWindow("Frame with ROIs and Objects", cv2.WINDOW_NORMAL)

#             while True:
#                 # Read frames from the RTSP stream
#                 ret, frame = cap.read()

#                 # Check if the frame is successfully captured
#                 if ret:
#                     # Fetch ROI data from the database for the specified camera
#                     roi_data = self.fetch_roi_data(camera_id)
#                     if not roi_data:
#                         self.stdout.write(self.style.ERROR(f"No ROI data found for camera {camera_id}"))
#                         return

#                     # Process each ROI
#                     for roi_info in roi_data:
#                         # Extract ROI information
#                         roi_id = roi_info['id']
#                         x1, y1, width, height, roi_name, color = roi_info['x1'], roi_info['y1'], roi_info['width'], roi_info['height'], roi_info['roi_name'], roi_info['color']
#                         expected_objects = color.split(',')  # Split expected objects from color field
                        
#                         # Extract the ROI frame from the captured frame
#                         roi_frame = frame[y1:y1 + height, x1:x1 + width]
#                         print(f"ROI Frame Shape: {roi_frame.shape}")
#                         # Detect objects in the ROI
#                         detected_objects, bounding_boxes = self.detect_objects_in_roi(roi_frame, expected_objects)

#                         undetected_labels = ['Gloves: no', 'Helmet: no', 'Glasses: no', 'Shoes: no']
#                         if any(label in undetected_labels for label in detected_objects):
#                             # Save ROI frames in Redis
#                             _, encoded_roi = cv2.imencode('.jpg', roi_frame)
#                             roi_bytes = encoded_roi.tobytes()
#                             redis_key = f'frames:{camera_id}:{roi_name}:{roi_id}'
#                             redis_client.rpush(redis_key, roi_bytes)
#                             redis_client.expire(redis_key, 3600)  # Expire after 1 hour (3600 seconds)
#                             self.stdout.write(self.style.SUCCESS(f"===========Stored ROI frame in Redis===========: {redis_key}"))
                        
#                             # Save ROI frames in Django cache
#                             cache.set(f'roi_frame:{camera_id}', roi_bytes, timeout=3600) # Cache for 1 hour (3600 seconds)
#                             self.stdout.write(self.style.SUCCESS("===========Stored ROI frame in Django cache===="))

#                             alert_message = (f"Alert: {detected_objects} Not Detected in ROI Name : {roi_name} in camera id : {camera_id}")
#                             alert = Alert.objects.create(camera=camera,alert_message=alert_message,camera_incharge=camera.camera_incharge)
#                             self.stdout.write(self.style.SUCCESS(f"Alert=====: {detected_objects} Not Detected in ROI Name : {roi_name} in camera id : {camera_id}"))
                            
#                             # Object not detected in the ROI, crop and save the image
#                             object_detected = True  # Set the flag to indicate object detection
#                             cropped_image = frame.copy()  # image of full frame
#                             # cropped_image = frame[y1:y1 + height, x1:x1 + width]   #image of only roi

#                             # Save the cropped image temporarily
#                             temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')
#                             cv2.imwrite(temp_file.name, cropped_image)

#                             # Send email alert with the cropped image as an attachment
#                             subject = 'Alert: Object Not Detected'
#                             message = f"{alert_message}\n\nAdditional Details: {str(alert)}"
#                             from_email = 'kalpesh.nettyfy@gmail.com'
#                             recipient_list = ['kuldip.nettyfy@gmail.com']  # Replace with recipient's email address

#                             # Create an EmailMessage object and attach the cropped image
#                             email = EmailMessage(subject, message, from_email, recipient_list)
#                             email.attach_file(temp_file.name)

#                             # Send the email
#                             email.send()

#                             # Close and delete the temporary file after sending the email
#                             temp_file.close()
#                             os.remove(temp_file.name)

#                             self.stdout.write(self.style.SUCCESS(f"Alert sent with cropped image."))
                
#                         else:
#                             self.stdout.write(self.style.SUCCESS("===== Object Detected ====="))

#                         # Draw ROI and detected objects on the frame for visualization
#                         cv2.rectangle(frame, (x1, y1), (x1 + width, y1 + height), (0, 255, 0), 2)
#                         for label, (x, y, w, h) in zip(detected_objects, bounding_boxes):
#                             pt1 = (int(x1) + int(x), int(y1) + int(y))  # Top-left corner of the rectangle
#                             pt2 = (int(x1) + int(x) + int(w), int(y1) + int(y) + int(h))  # Bottom-right corner of the rectangle
#                             cv2.rectangle(frame, pt1, pt2, (0, 255, 0), 2)  # Draw rectangle around the detected object

#                             # Calculate the center of the rectangle to position the label
#                             center_x = int((pt1[0] + pt2[0]) / 2)

#                             # Check if label is not None before calculating its length
#                             if label is not None:
#                                 label_length = len(label)
#                                 label_position = (center_x - int(label_length * 4.5), pt1[1] - 15)  # Adjust the label position based on its length

#                                 # Display label above the rectangle
#                                 cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

#                     # Display the frame with ROIs and detected objects
#                     cv2.imshow("Frame with ROIs and Objects", frame)

#                     # Check for the 'q' key to exit the loop and stop the processing
#                     if cv2.waitKey(1) & 0xFF == ord('q'):
#                         break
#                     # Sleep for one minute before processing the next frame
#                     # time.sleep(60)
#             # Release the video capture and close all OpenCV windows
#             cap.release()
#             cv2.destroyAllWindows()

#         except KeyboardInterrupt:
#             # Handle keyboard interrupt by user
#             self.stdout.write(self.style.SUCCESS('Process interrupted by user. Exiting...'))
#         except Exception as e:
#             # Handle other exceptions and display an error message
#             self.stdout.write(self.style.ERROR(f"Error: {e}"))





#####================================= code of with Colored Roi without event detection logic ============================##########


"""

class Command(BaseCommand):
    help = 'Capture frames from RTSP streams, detect items in specified ROIs, save ROI frames in Redis, and display the frames'

    def fetch_roi_coordinates(self, camera_id):
        try:
            roi_data_list = Roi.objects.filter(camera_id=camera_id).values('x1', 'y1', 'width', 'height', 'color')
            return list(roi_data_list)
        except Roi.DoesNotExist:
            print(f"No ROI coordinates found for camera {camera_id}")
            return []
        except Exception as e:
            print(f"Failed to fetch ROIs from database: {e}")
            return []

    def handle(self, *args, **options):
        try:
            cameras = Camera.objects.all()

            while True:
                for camera in cameras:
                    rtsp_stream_url = camera.rtsp_url
                    camera_id = camera.id

                    cap = cv2.VideoCapture(rtsp_stream_url)

                    try:
                        if not cap.isOpened():
                            print(f"Error: Could not open RTSP stream '{rtsp_stream_url}' for camera ID {camera_id}")
                            continue

                        frame_delay = 1 / 30  # Delay between frames in seconds 

                        while True:
                            start_time = time.time()
                            ret, frame = cap.read()

                            if ret:
                                roi_coordinates = self.fetch_roi_coordinates(camera_id)

                                for roi_data in roi_coordinates:
                                    x1, y1, width, height, color = roi_data['x1'], roi_data['y1'], roi_data['width'], roi_data['height'], roi_data['color']
                                    roi_color = tuple(int(color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4)) # Convert '255,0,0' to (255, 0, 0)

                                    # Store ROI frame in Redis
                                    redis_client = redis.Redis(host='localhost', port=6379, db=0)
                                    _, encoded_roi = cv2.imencode('.jpg', frame[y1:y1 + height, x1:x1 + width])
                                    roi_bytes = encoded_roi.tobytes()
                                    redis_client.rpush(f'frames:{camera_id}', roi_bytes)
                                    print("============Stored ROI frame in Redis==============")

                                    # Cache ROI frame using Django's cache framework (adjust timeout as needed)
                                    cache.set(f'roi_frame:{camera_id}', roi_bytes, timeout=3600)  # Cache for 1 hour (3600 seconds)
                                    print("============Stored ROI frame in Django cache==============")
                                    # Overlay ROI on the frame with specified color
                                    cv2.rectangle(frame, (x1, y1), (x1 + width, y1 + height), roi_color, 2)

                                # Display the frame with ROIs
                                cv2.imshow('Frame with ROIs', frame)
                                
                                # Wait for the specified delay to control frame rate
                                processing_time = time.time() - start_time
                                remaining_time = max(0, frame_delay - processing_time)
                                time.sleep(remaining_time)

                                if cv2.waitKey(1) & 0xFF == ord('q'):
                                    break

                    except KeyboardInterrupt:
                        # Release resources and close windows in case of keyboard interrupt
                        cap.release()
                        cv2.destroyAllWindows()
                        break

        except Exception as e:
            print(f"Error: {e}")

"""
####======================================================== ENd Code ===============================================================================================






#=============================================== With Using Celery ==============================================================




# async def some_async_function():
#     # async code here
#     pass

# logger = logging.getLogger(__name__)

# class Command(BaseCommand):
#     help = 'Capture frames from RTSP streams, detect items in specified ROIs, save ROI frames in Redis, and generate alerts for undetected objects.'

#     def add_arguments(self, parser):
#         parser.add_argument('--camera_id', type=int, help='Specify the camera ID for which frames should be captured.')

#     def process_frames_in_batches(self, camera_url, batch_size, camera_id):
#         cap = cv2.VideoCapture(camera_url)
#         if not cap.isOpened():
#             self.stdout.write(self.style.ERROR(f"Failed to open camera with URL: {camera_url}"))
#             return

#         try:
#             while True:
#                 frames = []
#                 for _ in range(batch_size):
#                     ret, frame = cap.read()
#                     if ret:
#                         frames.append(frame)
#                 processed_frames = self.process_batch(frames, camera_id)
#                 for frame in processed_frames:
#                     # Display the frame with ROIs and detected objects
#                     cv2.imshow('Frame with ROIs and Detected Objects', frame)
#                     cv2.waitKey(1)  # Display frame for 1 millisecond
#         except KeyboardInterrupt:
#             # Release resources and close windows in case of keyboard interrupt
#             cap.release()
#             cv2.destroyAllWindows()
#         except Exception as e:
#             # Handle other exceptions and display an error message
#             self.stdout.write(self.style.ERROR(f"Error: {e}"))
            
#         finally:
#             cap.release()
#             cv2.destroyAllWindows()


#     def process_batch(self, frames, camera_id):
#         processed_frames = []
#         for frame in frames:
#             # Fetch ROI data from the database for the specified camera
#             roi_data = self.fetch_roi_data(camera_id)
#             if not roi_data:
#                 self.stdout.write(self.style.WARNING(f"No ROI data found for camera {camera_id}. Detecting objects in fullscreen."))
#                 roi_info = {
#                     "x1": 0,
#                     "y1": 0,
#                     "width": frame.shape[1],
#                     "height": frame.shape[0],
#                     "roi_name": "Fullscreen",
#                     "color": "#FFFFFF"  # White color for fullscreen
#                 }
#                 # roi_data = [roi_info]

#             for roi_info in roi_data:
#                 x1, y1, width, height, roi_name, color = roi_info['x1'], roi_info['y1'], roi_info['width'], roi_info['height'], roi_info['roi_name'], roi_info['color']
#                 expected_objects = color.split(',')  # Split expected objects from color field
#                 rectangle_color = tuple(int(color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4)) # Convert '255,0,0' to (255, 0, 0)
#                 # Extract the ROI frame
#                 roi_frame = frame[y1:y1 + height, x1:x1 + width]

#                 # Detect objects in the ROI frame
#                 detected_objects, bounding_boxes = self.detect_objects_in_roi(roi_frame, expected_objects, x1, y1, width, height)

#                 # Draw ROI rectangle
#                 # cv2.rectangle(frame, (x1, y1), (x1 + width, y1 + height), (255, 0, 0), 2)
#                 cv2.rectangle(frame, (x1, y1), (x1 + width, y1 + height), rectangle_color, 2)
#                 cv2.putText(frame, roi_name, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

#                 # Draw bounding boxes around detected objects within the ROI
#                 for (x1, y1, x2, y2), label in zip(bounding_boxes, detected_objects):
#                     x1 += roi_info['x1']
#                     x2 += roi_info['x1']
#                     y1 += roi_info['y1']
#                     y2 += roi_info['y1']
#                     x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
#                     cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
#                     cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

#                 # Display the frame with ROIs and detected objects
#                 cv2.imshow('Frame with ROIs and Detected Objects', frame)
#                 cv2.waitKey(1)  # Display frame for 1 millisecond

#                 # Save ROI frames in Redis and Django cache if specific conditions are met
#                 if any(label in detected_objects for label in ['Gloves: no', 'Helmet: no', 'Glasses: no', 'Shoes: no']):
#                     # Save ROI frames in Redis
#                     redis_client = redis.Redis(host='localhost', port=6379, db=0)
#                     serialized_frame = self.serialize_frame(frame)
#                     redis_key = f'frames:{camera_id}:{roi_name}:{roi_info["id"]}'
#                     redis_client.rpush(redis_key, serialized_frame)
#                     redis_client.expire(redis_key, 3600)  # Expire after 1 hour (3600 seconds)

#                     # Save ROI frames in Django cache
#                     cache_key = f'roi_frame:{camera_id}:{roi_name}'
#                     cache.set(cache_key, serialized_frame, timeout=3600)  # Cache for 1 hour (3600 seconds)

#                     # Generate alert
#                     self.generate_alert(detected_objects, roi_name, camera_id, frame)
#                 else:
#                     logger.info("Object Detected With YES")

#                 processed_frames.append(frame)

#         return processed_frames


#     def detect_objects_in_roi(self, roi_frame,expected_objects,x1, y1, width, height):
#         fps = 30
#         fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#         out = cv2.VideoWriter("output.mp4", fourcc, fps, (roi_frame.shape[1], roi_frame.shape[0]))  # Output video filename, codec, fps, frame size
#         model = YOLO('best.pt')
#         results = model(roi_frame)[0]
#         classes = [
#             'Helmet: yes', 'Glasses: yes', 'Gloves: yes', 'Shoes: yes', 'Person',
#             'Helmet: no', 'Glasses: no', 'Gloves: no', 'Shoes: no'
#         ]

#         detected_objects = []
#         bounding_boxes = []
#         persons = []
#         class_label = None
#         x1 = y1 = x2 = y2 = 0

#         for result in results:
#             x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
#             class_idx = int(class_idx)
#             class_label = classes[class_idx]

#             if class_label == "Person":
#                 # Create a new person object with an empty labels set
#                 person = {"bbox": (x1, y1, x2, y2), "labels": set()}
#                 persons.append(person)

#         for result in results:
#             x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
#             class_idx = int(class_idx)
#             class_label = classes[class_idx]

#             if class_label != "Person":
#                 cx, cy = (x1 + x2) / 2, (y1 + y2) / 2  # center of the PPE bbox
#                 for person in persons:
#                     px1, py1, px2, py2 = person["bbox"]
#                     if px1 <= cx <= px2 and py1 <= cy <= py2:  # if the center of the PPE bbox is within the Person bbox
#                         person["labels"].add(class_label)  # As "labels" is now a set, the same label cannot be added twice
#                         break
#         for person in persons:
#             x1, y1, x2, y2 = person["bbox"]
#             x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
#             if x1 >= 0 and y1 >= 0 and x2 >= 0 and y2 >= 0:
#                 cv2.rectangle(roi_frame, (x1, y1), (x2, y2), (0, 255, 0), 3)

#             # Set the text position just above the bbox
#             text_position = (int(x1), int(y1) - 10)

#             for label in person["labels"]:
#                 # Add a black rectangle for padding
#                 text_width, text_height = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
#                 rectangle_bgr = (0, 0, 0)
#                 (text_width, text_height) = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1.2, thickness=1)[0]
#                 box_coords = ((text_position[0], text_position[1]), (text_position[0] + text_width + 2, text_position[1] - text_height - 2))
#                 cv2.rectangle(roi_frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)

#                 if "yes" in label or "Person" in label:
#                     text_color = (0, 255, 0)  # Green color
#                 else:
#                     text_color = (0, 0, 255)  # Red color

#                 # Add the text
#                 cv2.putText(roi_frame, label, text_position, cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
#                 # Update text position for next label
#                 text_position = (text_position[0], text_position[1] - 30)
        
#         frame = cv2.resize(roi_frame, (roi_frame.shape[1], roi_frame.shape[0]))
#         out.write(frame)

#         # cv2.imshow("yolov8", frame)
#         detected_objects.append(class_label)
#         print(f"detected_objects========+++++++++=============:" ,detected_objects)
#         bounding_boxes.append((x1, y1, x2, y2))
#         return detected_objects, bounding_boxes

#     def serialize_frame(self, frame):
#         _, encoded_frame = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
#         return msgpack.packb(encoded_frame.tobytes())


#     def generate_alert(self, detected_objects, roi_name, camera_id, frame):
#         # Object not detected in the ROI, crop and save the image
#         object_detected = True
#         cropped_image = frame.copy()

#         # Save the cropped image temporarily
#         temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')
#         cv2.imwrite(temp_file.name, cropped_image)

#         # Create an instance of the Alert model and save it to the database
#         alert = Alert.objects.create(camera_id=camera_id, alert_message=f"{detected_objects} not detected in ROI {roi_name}")

#         # Send email alert with the cropped image as an attachment
#         subject = 'Alert: Object Not Detected'
#         message = f"{detected_objects} Not detected in ROI {roi_name} in camera ID {camera_id}, please check the camera."
#         print(message,"========================")
#         from_email = 'kalpesh.nettyfy@gmail.com'  # Replace with sender's email address
#         recipient_list = ['kuldip.nettyfy@gmail.com']  # Replace with recipient's email address

#         # Create an EmailMessage object and attach the cropped image
#         email = EmailMessage(subject, message, from_email, recipient_list)
#         email.attach_file(temp_file.name)

#         # Send the email
#         email.send()

#         # Close and delete the temporary file after sending the email
#         temp_file.close()
#         os.remove(temp_file.name)

#         logger.info(f"Alert sent with cropped image.")
        
#         self.stdout.write(self.style.SUCCESS(f"Alert sent with cropped image and saved in the database."))


#     def fetch_roi_data(self, camera_id):
#         # Fetch ROI data from the database for the specified camera

#         try:
#             roi_data_list = Roi.objects.filter(camera_id=camera_id).values('id', 'x1', 'y1', 'width', 'height', 'roi_name', 'color')
#             return list(roi_data_list)
#         except Roi.DoesNotExist:
#             logger.error(f"No ROI coordinates found for camera {camera_id}")
#             return []

    
#     def handle(self, *args, **options):
#         camera_id = options.get('camera_id')

#         if camera_id is None:
#             logger.error('Please provide a valid camera ID using --camera_id argument.')
#             return

#         try:
#             # Get the camera object based on the provided ID
#             camera = Camera.objects.get(id=camera_id)
#             camera_url = camera.rtsp_url

#             # Pass the camera URL, batch size, and camera ID to the Celery task
#             self.process_frames_in_batches(camera_url, 10, camera_id)

#         except KeyboardInterrupt:
#             logger.info('Process interrupted by user. Exiting...')
#         except Exception as e:
#             logger.error(f"Error: {e}")









# http://takemotopiano.aa1.netvolante.jp:8190/nphMotionJpeg?Resolution=640x480&Quality=Standard&Framerate=30





    # def process_batch(self, frames, camera_id):
    #     processed_frames = []
    #     for frame in frames:
    #         # Fetch ROI data from the database for the specified camera
    #         roi_data = self.fetch_roi_data(camera_id)
    #         if not roi_data:
    #             self.stdout.write(self.style.ERROR(f"No ROI data found for camera {camera_id}"))
    #             continue

    #         for roi_info in roi_data:
    #             x1, y1, width, height, roi_name, color = roi_info['x1'], roi_info['y1'], roi_info['width'], roi_info['height'], roi_info['roi_name'], roi_info['color']
    #             expected_objects = color.split(',')  # Split expected objects from color field
    #             rectangle_color = tuple(int(color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4)) # Convert '255,0,0' to (255, 0, 0)
    #             # Extract the ROI frame
    #             roi_frame = frame[y1:y1 + height, x1:x1 + width]

    #             # Detect objects in the ROI frame
    #             detected_objects, bounding_boxes = self.detect_objects_in_roi(roi_frame, expected_objects, x1, y1, width, height)

    #             # Draw ROI rectangle
    #             # cv2.rectangle(frame, (x1, y1), (x1 + width, y1 + height), (255, 0, 0), 2)
    #             cv2.rectangle(frame, (x1, y1), (x1 + width, y1 + height), rectangle_color, 2)
    #             cv2.putText(frame, roi_name, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

    #             # Draw bounding boxes around detected objects within the ROI
    #             for (x1, y1, x2, y2), label in zip(bounding_boxes, detected_objects):
    #                 x1 += roi_info['x1']
    #                 x2 += roi_info['x1']
    #                 y1 += roi_info['y1']
    #                 y2 += roi_info['y1']
    #                 x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
    #                 cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
    #                 cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

    #             # Display the frame with ROIs and detected objects
    #             cv2.imshow('Frame with ROIs and Detected Objects', frame)
    #             cv2.waitKey(1)  # Display frame for 1 millisecond

    #             # Save ROI frames in Redis and Django cache if specific conditions are met
    #             if any(label in detected_objects for label in ['Gloves: no', 'Helmet: no', 'Glasses: no', 'Shoes: no']):
    #                 # Save ROI frames in Redis
    #                 redis_client = redis.Redis(host='localhost', port=6379, db=0)
    #                 serialized_frame = self.serialize_frame(frame)
    #                 redis_key = f'frames:{camera_id}:{roi_name}:{roi_info["id"]}'
    #                 redis_client.rpush(redis_key, serialized_frame)
    #                 redis_client.expire(redis_key, 3600)  # Expire after 1 hour (3600 seconds)

    #                 # Save ROI frames in Django cache
    #                 cache_key = f'roi_frame:{camera_id}:{roi_name}'
    #                 cache.set(cache_key, serialized_frame, timeout=3600)  # Cache for 1 hour (3600 seconds)

    #                 # Generate alert
    #                 self.generate_alert(detected_objects, roi_name, camera_id, frame)
    #             else:
    #                 logger.info("Object Detected")

    #             processed_frames.append(frame)

    #     return processed_frames


# ==========================================================================================================================

from __future__ import absolute_import, unicode_literals
from multiprocessing import Process
import cv2
import redis
from django.core.cache import cache
from django.core.management.base import BaseCommand
from myprofile.models import *
from ultralytics import YOLO
import time
from django.core.mail import send_mail
import os
import tempfile
from django.core.mail import EmailMessage
import msgpack
import asyncio
import cv2
import logging, sys
import numpy as np
import os
from celery import Celery
from celery import shared_task
from celery.utils.log import get_task_logger
from django.conf import settings
import datetime
from django.core.files.base import ContentFile
import pandas as pd


# def detect_objects_in_roi(roi_frame, expected_objects):
#     model = YOLO('best.pt')
#     results = model(roi_frame)[0]
#     classes = [
#         'Helmet: yes', 'Glasses: yes', 'Gloves: yes', 'Shoes: yes', 'Person',
#         'Helmet: no', 'Glasses: no', 'Gloves: no', 'Shoes: no'
#     ]

#     detected_objects = []
#     bounding_boxes = []

#     for result in results:
#         x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
#         class_idx = int(class_idx)
#         class_label = classes[class_idx]

#         if class_label in expected_objects:
#             detected_objects.append(class_label)
#             bounding_boxes.append((x1, y1, x2, y2))

#     return detected_objects, bounding_boxes

def process_device(camera_id):

    output_width, output_height = 1280, 720

    get_camera = Camera.objects.get
    camera = get_camera(id=camera_id,is_enabled=True)
    
    camera_events = set(CameraEvent.objects.values_list('camera_event', flat=True))

    # camera_events = CameraEvent.objects.all().values_list('camera_event')
    # camera_events = [event[0] for event in camera_events]

    roi_data = Roi.objects.filter(camera=camera_id)

    classes = [
                'Helmet: yes', 'Jacket: yes', 'Gloves: yes', 'Shoes: yes', 'Person',
                'Helmet: no', 'Jacket: no', 'Gloves: no', 'Shoes: no'
            ]
    
    # Establish a connection to the RTSP stream
    rtsp_stream_url = camera.rtsp_url
    cap = cv2.VideoCapture(rtsp_stream_url)

    model = YOLO('best.pt')
    

    # Establish a connection to Redis
    redis_client = redis.Redis(host='localhost', port=6379, db=0)

    # Create a window to display frames
    cv2.namedWindow("Frame with ROIs and Objects", cv2.WINDOW_NORMAL)

    while True:
        # Read frames from the RTSP stream
        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, (output_width, output_height))
        # Check if the frame is successfully captured
        if ret:

            results = model(frame)[0]

            # Fetch ROI data from the database for the specified camera
            # roi_data = Roi.objects.filter(camera_id=camera_id).values('id', 'x1', 'y1', 'width', 'height', 'roi_name', 'color')

            for roi_info in roi_data:
                roi_x1 = roi_info.x1
                roi_y1 = roi_info.y1
                roi_width = roi_info.width
                roi_height = roi_info.height
                # roi_id = roi_info['id']
                # print(roi_id,"============= roi_id == roi_id =========")
                # x1, y1, width, height, roi_name, color = roi_info['x1'], roi_info['y1'], roi_info['width'], roi_info['height'], roi_info['roi_name'], roi_info['color']
                # expected_objects = color.split(',')  # Split expected objects from color field

                # Draw rectangle around ROI
                cv2.rectangle(frame, (roi_x1, roi_y1), (roi_x1 + roi_width, roi_y1 + roi_height), (255, 0, 0), 2)
                cv2.putText(frame, roi_info.roi_name, (roi_x1, roi_y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

                
                
                for result in results:
                        x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
                        class_idx = int(class_idx)
                        class_label = classes[class_idx]

                        # if class_label in ['Helmet: no', 'Jacket: no', 'Gloves: no', 'Shoes: no']:
                        if roi_x1 <= x1 <= roi_x1 + roi_width and roi_y1 <= y1 <= roi_y1 + roi_height:
                            # Draw bounding box around detected objects within the ROI
                            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
                            cv2.putText(frame, class_label, (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

                            # Save Alert in the database
                            alert_message = f"Alert: {class_label} Not Detected in ROI Name: {roi_info.roi_name} (Camera ID: {camera_id})"
                            alert = Alert.objects.create(
                                camera=camera,
                                alert_message=alert_message,
                                camera_events = CameraEvent.objects.first()
                            )
                            alert.save()
                            # Add camera in charge to the alert
                            camera_incharge = Camera.objects.get(id=camera_id).camera_user.all()
                            alert.camera_incharge.add(*camera_incharge)
                            alert.save()

                            # Save alert image to the database
                            _, encoded_frame = cv2.imencode('.jpg',frame)
                            image_file = ContentFile(encoded_frame.tobytes())
                            alert.frame.save('alert.jpg', image_file)

                            # Send email alert with the cropped image as an attachment
                            subject = 'Alert: Object Not Detected'
                            message = f"{class_label} Not detected in ROI {roi_info.roi_name} in camera ID {camera_id}, please check the camera."
                            from_email = 'kalpesh.nettyfy@gmail.com'
                            recipient_list = ['kuldip.nettyfy@gmail.com']

                            # Create an EmailMessage object and attach the cropped image
                            email = EmailMessage(subject, message, from_email, recipient_list)
                            email.attach_file(alert.frame.path)
                            # Send the email
                            email.send()
                            # Close and delete the temporary file after sending the email


            # Display the frame with ROIs and detected objects
            cv2.imshow("Frame with ROIs and Objects", frame)

            # Check for the 'q' key to exit the loop and stop the processing
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    # Release the video capture and close all OpenCV windows
    cap.release()
    cv2.destroyAllWindows()


class Command(BaseCommand):
    help = 'Triggers alerts based on video analysis'

    def handle(self, *args, **options):
        camera = Camera.objects.filter(is_enabled=True)

        processes = []

        for cam in camera:
            process = Process(target=process_device, args=(cam.id,))
            process.start()
            processes.append(process)

        for process in processes:
            process.join()

                






    #                     # # Create a new person object with an empty labels set
    #                     # person = {"bbox": (x1, y1, x2, y2), "labels": set()}
    #                     # persons.append(person)
    #                     for obj in expected_objects:
    #                         if obj in class_label:
    #                             alert_message = f"Alert: {obj} Not Detected in ROI Name: {roi_name} (Camera ID: {camera_id})"
    #                             alert = Alert.objects.create(
    #                                 camera=camera,
    #                                 alert_message=alert_message,
    #                             )
    #                             alert.save()

    #                             # Add camera in charge to the alert
    #                             camera_incharge = Camera.objects.get(id=camera_id).camera_user.all()
    #                             alert.camera_incharge.add(*camera_incharge)
    #                             alert.save()

    #                             # Save alert image to the database
    #                             _, encoded_frame = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
    #                             image_file = ContentFile(encoded_frame, name=f"alert_{datetime.datetime.now()}.jpg")
    #                             alert.frame.save(image_file.name, image_file)
    #                             break

    #             # Display the frame with ROIs and detected objects
    #             cv2.imshow("Frame with ROIs and Objects", frame)

    #             # Check for the 'q' key to exit the loop and stop the processing
    #             if cv2.waitKey(1) & 0xFF == ord('q'):
    #                 break

    # # Release the video capture and close all OpenCV windows
    # cap.release()
    # cv2.destroyAllWindows()
               




    #             for result in results:
    #                 x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
    #                 class_idx = int(class_idx)
    #                 class_label = classes[class_idx]

    #                 if class_label != "Person":
    #                     cx, cy = (x1 + x2) / 2, (y1 + y2) / 2
    #                     for person in persons:
    #                         px1, py1, px2, py2 = person["bbox"]
    #                         if px1 <= cx <= px2 and py1 <= cy <= py2:
    #                             person["labels"].add(class_label)
    #                             break

    #             # Draw ROI rectangle on the main frame
    #             cv2.rectangle(frame, (int(x1), int(y1)), (int(x1 + width), int(y1 + height)), (255, 0, 0), 2)
    #             cv2.putText(frame, roi_name, (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
                


    #             for person in persons:
    #                 x1, y1, x2, y2 = person["bbox"]
    #                 x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
    #                 if x1 >= 0 and y1 >= 0 and x2 >= 0 and y2 >= 0:
    #                     cv2.rectangle(roi_frame, (x1, y1), (x2, y2), (0, 255, 0), 3)

    #                 text_position = (int(x1), int(y1) - 10)

    #                 for label in person["labels"]:
    #                     text_width, text_height = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
    #                     rectangle_bgr = (0, 0, 0)
    #                     (text_width, text_height) = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1.2, thickness=1)[0]
    #                     box_coords = ((text_position[0], text_position[1]), (text_position[0] + text_width + 2, text_position[1] - text_height - 2))
    #                     cv2.rectangle(roi_frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)

    #                     if "yes" in label or "Person" in label:
    #                         text_color = (0, 255, 0)
    #                     else:
    #                         text_color = (0, 0, 255)

    #                     cv2.putText(roi_frame, label, text_position, cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
    #                     text_position = (text_position[0], text_position[1] - 30)

    #                     # save alert in Alert model

    #                     for label in expected_objects:
    #                         if label not in person["labels"]:
    #                             alert_message = f"Alert: {label} Not Detected in ROI Name: {roi_name} (Camera ID: {camera_id})"
    #                             alert = Alert.objects.create(
    #                                 camera=camera,
    #                                 alert_message=alert_message,
    #                             )
    #                             alert.save()

    #                             # Add camera in charge to the alert
    #                             camera_incharge = Camera.objects.get(id=camera_id).camera_user.all()
    #                             alert.camera_incharge.add(*camera_incharge)
    #                             alert.save()

    #                             # Save alert image to the database
    #                             _, encoded_frame = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
    #                             image_file = ContentFile(encoded_frame, name=f"alert_{datetime.datetime.now()}.jpg")
    #                             alert.frame.save(image_file.name, image_file)
    #                             break

    #             # Display the frame with ROIs and detected objects
    #             cv2.imshow("Frame with ROIs and Objects", frame)

    #             # Check for the 'q' key to exit the loop and stop the processing
    #             if cv2.waitKey(1) & 0xFF == ord('q'):
    #                 break

    # # Release the video capture and close all OpenCV windows
    # cap.release()
    # cv2.destroyAllWindows()
    

