from multiprocessing import Process
import django
import datetime
import cv2
from django.core.cache import cache
from django.core.management.base import BaseCommand
from ultralytics import YOLO
from myprofile.models import *
import requests
import threading
import logging
from django.core.files.base import ContentFile
logging.basicConfig(level=logging.INFO)
from django.db.models import Q
from myprofile.tracker import *

def send_telegram_notification(alert_message, chat_ids, bot_token, image_data):
    try:
        url = f"https://api.telegram.org/bot{bot_token}/sendPhoto"
        files = {'photo': ('image.jpg', image_data, 'image/jpeg')}
        data = {'chat_id' : chat_ids, "caption": alert_message}
        response = requests.post(url, files=files, data=data)
        print(response.status_code)
    except Exception as e:
        logging.error(f"Failed to send telegram notification: {e}")

def process_device(camera_id):
    django.db.close_old_connections()

    output_width, output_height = 720, 500

    try:
        print("one process")

        camera = Camera.objects.get(id=camera_id,is_enabled=True)
        fps = camera.fps

        fetch_events = LinkCameraEventToCamera.objects.filter(camera = camera, is_enabled=True).values_list('camera_events__camera_event', flat=True)
        
        camera_event_names = fetch_events
        event_dict = {}
        for i in camera_event_names: event_dict.update({i:[]})
        camera_event_names_with_condition = [f"{name}: no" for name in camera_event_names]

        data =  camera_event_names_with_condition
        data.append('Person')

        rules_classes = [
                'Helmet: yes', 'Jacket: yes', 'Gloves: yes', 'Shoes: yes', 'Person',
                'Helmet: no', 'Jacket: no', 'Gloves: no', 'Shoes: no'
            ]
        
        data_dict = {item: True for item in data}

        classes = [' '] * len(rules_classes)
        for i, item in enumerate(rules_classes):
            if item in data_dict:
                classes[i] = item
        
        cap = cv2.VideoCapture(camera.rtsp_url)

        model = YOLO("/home/nettyfy/visnx/visnx-backend/Nettyfy_visnx/bestppe1.pt")

        tracker = SimpleTracker()

        skip_frames = 15
        frame_skip_counter = 0

        previous_person_count = 0
        
        skip_counter = 0
        max_skips = 0

        while True:
            # existing frame processing logic...
            ret, frame = cap.read()
            if not ret:
                break
            
            frame_skip_counter += 1
            if frame_skip_counter <= skip_frames:
                continue
            
            
            frame_skip_counter = 0
            results = model(frame)[0]
            persons = []
            for result in results:
                x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
                class_idx = int(class_idx)
                class_label = classes[class_idx]

                if class_label == "Person":
                    person = {"bbox": (x1, y1, x2, y2), "labels": set()}
                    persons.append(person)
            
            current_person_count = len(persons)
            last_track_id = 0
            if current_person_count != previous_person_count:
                # give alert beacuse of change in person count
                print(f"Person count changed from {previous_person_count} to {current_person_count}")
                for result in results:
                    x1, y1, x2, y2, conf, class_idx = result.boxes.data.tolist()[0]
                    class_idx = int(class_idx)
                    class_label = classes[class_idx]

                    if class_label != "Person":
                        cx, cy = (x1 + x2) / 2, (y1 + y2) / 2  # center of the PPE bbox
                        for person in persons:
                            px1, py1, px2, py2 = person["bbox"]
                            if px1 <= cx <= px2 and py1 <= cy <= py2:  # if the center of the PPE bbox is within the Person bbox
                                person["labels"].add(class_label)  # As "labels" is now a set, the same label cannot be added twice
                                break
                for person in persons:
                    # Draw the bbox
                    x1, y1, x2, y2 = person["bbox"]
                    cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0,255, 0), 3)

                    # Set the text position just above the bbox
                    text_position = (int(x1), int(y1) - 10)

                    for label in person["labels"]:
                        # Add a black rectangle for padding
                        text_width, text_height = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
                        rectangle_bgr = (0, 0, 0)
                        (text_width, text_height) = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1.2, thickness=1)[0]
                        box_coords = ((text_position[0], text_position[1]), (text_position[0] + text_width + 2, text_position[1] - text_height - 2))
                        cv2.rectangle(frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)

                        # Select the color
                        if "yes" in label or "Person" in label:
                            text_color = (0, 255, 0)  # Green color
                        else:
                            text_color = (0, 0, 255)  # Red color

                        # Add the text
                        cv2.putText(frame, label, text_position, cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
                        # Update text position for next label
                        text_position = (text_position[0], text_position[1] - 30)


                # Update tracker with the latest detections
                tracker.update_tracks(persons)

                for track_id, person in tracker.tracks.items():
                    if last_track_id != track_id:
                        last_track_id = track_id
                    
                        detect = tracker.tracks.items()
                        detect = dict(detect)
                            
                        for key in detect:
                            labels = detect[key]['labels']
                            labels_list = [label for label in labels if label.strip()]


                        print(detect,"====================================")
                        # Draw the bbox and tracking info (including ID)
                        x1, y1, x2, y2 = person["bbox"]
                    
                        for lable in labels_list:
                            label_text = lable.split(':')[0]
                            camera_event_obj = CameraEvent.objects.filter(camera_event = label_text).first()
                            current_time = datetime.now()
                            formatted_current_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
                            _, encoded_frame = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
                            image_file = ContentFile(encoded_frame, name="alert_image.jpg")
                            alert = Alert(
                                camera = camera,
                                alert_message = f"Alert : {lable} detected at {formatted_current_time} in Device ID :{camera_id}",
                                frame=image_file,
                                camera_events = camera_event_obj
                            )
                            alert.save()

                            camera_incharge_list = Camera.objects.get(id=camera_id).camera_user.all()
                            for user in camera_incharge_list:
                                alert.camera_incharge.add(user)
                            alert.save()

                            bot_token_obj = TelegramBotToken.objects.all()

                            for bot in bot_token_obj:
                                try:
                                    send_notification = threading.Thread(target=send_telegram_notification, args=(f"Alert : {lable} detected at {formatted_current_time} in Device ID :{camera_id}", bot.chat_id, bot.bot_token, encoded_frame))
                                    send_notification.start()


                                except Exception as e:
                                    logging.error(f"Failed to send telegram notification: {e}")

                # Resize and show the frame
                frame = cv2.resize(frame, (output_width, output_height))
                cv2.imshow("yolov8", frame)

                frame_to_process = False

                if cv2.waitKey(1) == ord('q'):
                    break

            previous_person_count = current_person_count

    except Exception as e:
        print(f"Error processing device {camera_id}: {str(e)}")
    finally:
        # Clean up resources, if any.
        pass

class Command(BaseCommand):
    help = 'Triggers alerts based on video analysis'
    
    def handle(self, *args, **options):
        # import ipdb; ipdb.set_trace()
        cameras = Camera.objects.filter(is_enabled=True).order_by('created_at')
        
        for camera in cameras:
            process_device(camera.id)

        # for camera in cameras:
        #     # Start a new process for each device.
        #     process = Process(target=process_device, args=(camera.id,))
        #     process.start()
        #     processes.append(process)
        
        # for process in processes:
        #     process.join()  # Wait for all processes to complete.