
    Wpf"                      d   d Z ddlmZ ddlZddlmZ ddlmZ ddlmZ ddl	m
Z
 ddl	mZ dd	l	mZ dd
l	mZ ddl	mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddlm!Z! ddl"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z) dZ*d Z+ G d de          Z,dS ) zMediaPipe Holistic.    )
NamedTupleN)#constant_side_packet_calculator_pb2)gate_calculator_pb2)split_vector_calculator_pb2)image_to_tensor_calculator_pb2)inference_calculator_pb2)(tensors_to_classification_calculator_pb2) tensors_to_floats_calculator_pb2)#tensors_to_landmarks_calculator_pb2)ssd_anchors_calculator_pb2)"detections_to_rects_calculator_pb2)"landmark_projection_calculator_pb2)"local_file_contents_calculator_pb2)"non_max_suppression_calculator_pb2)"rect_transformation_calculator_pb2)switch_container_pb2)roi_tracking_calculator_pb2)SolutionBase)download_utils)FACEMESH_CONTOURS)FACEMESH_TESSELATION)HandLandmark)HAND_CONNECTIONS)PoseLandmark)POSE_CONNECTIONSzBmediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypbc                 v    | dk    rt          j        d           dS | dk    rt          j        d           dS dS )zoDownloads the pose landmark lite/heavy model from the MediaPipe Github repo if it doesn't exist in the package.r   z9mediapipe/modules/pose_landmark/pose_landmark_lite.tflite   z:mediapipe/modules/pose_landmark/pose_landmark_heavy.tfliteN)r   download_oss_model)model_complexitys    c/var/www/html/nettyfy-visnx/env/lib/python3.11/site-packages/mediapipe/python/solutions/holistic.py!_download_oss_pose_landmark_modelr!   7   sp     %CE E E E E1%DF F F F F     c                   R     e Zd ZdZ	 	 	 	 	 	 	 	 d
 fd	Zdej        def fd	Z xZ	S )Holistica'  MediaPipe Holistic.

  MediaPipe Holistic processes an RGB image and returns pose landmarks, left and
  right hand landmarks, and face mesh landmarks on the most prominent person
  detected.

  Please refer to https://solutions.mediapipe.dev/holistic#python-solution-api
  for usage examples.
  F   T      ?c	           
          t          |           t                                          t          ||o| ||o| || d||dg d           dS )a  Initializes a MediaPipe Holistic object.

    Args:
      static_image_mode: Whether to treat the input images as a batch of static
        and possibly unrelated images, or a video stream. See details in
        https://solutions.mediapipe.dev/holistic#static_image_mode.
      model_complexity: Complexity of the pose landmark model: 0, 1 or 2. See
        details in https://solutions.mediapipe.dev/holistic#model_complexity.
      smooth_landmarks: Whether to filter landmarks across different input
        images to reduce jitter. See details in
        https://solutions.mediapipe.dev/holistic#smooth_landmarks.
      enable_segmentation: Whether to predict segmentation mask. See details in
        https://solutions.mediapipe.dev/holistic#enable_segmentation.
      smooth_segmentation: Whether to filter segmentation across different input
        images to reduce jitter. See details in
        https://solutions.mediapipe.dev/holistic#smooth_segmentation.
      refine_face_landmarks: Whether to further refine the landmark coordinates
        around the eyes and lips, and output additional landmarks around the
        irises. Default to False. See details in
        https://solutions.mediapipe.dev/holistic#refine_face_landmarks.
      min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for person
        detection to be considered successful. See details in
        https://solutions.mediapipe.dev/holistic#min_detection_confidence.
      min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the
        pose landmarks to be considered tracked successfully. See details in
        https://solutions.mediapipe.dev/holistic#min_tracking_confidence.
    )r   smooth_landmarksenable_segmentationsmooth_segmentationrefine_face_landmarksuse_prev_landmarks)zQposelandmarkcpu__posedetectioncpu__TensorsToDetectionsCalculator.min_score_threshznposelandmarkcpu__poselandmarkbyroicpu__tensorstoposelandmarksandsegmentation__ThresholdingCalculator.threshold)pose_landmarkspose_world_landmarksleft_hand_landmarksright_hand_landmarksface_landmarkssegmentation_mask)binary_graph_pathside_inputscalculator_paramsoutputsN)r!   super__init___BINARYPB_FILE_PATH)
selfstatic_image_moder   r(   r)   r*   r+   min_detection_confidencemin_tracking_confidence	__class__s
            r    r8   zHolistic.__init__M   s    H &&6777	GG- 0 0 J9J5J#6#=,=(=%:&7"7
 
 )'	
 

 
 
#      r"   imagereturnc                     t                                          d|i          }|j        r$|j        j        D ]}|                    d           |j        r$|j        j        D ]}|                    d           |S )a.  Processes an RGB image and returns the pose landmarks, left and right hand landmarks, and face landmarks on the most prominent person detected.

    Args:
      image: An RGB image represented as a numpy ndarray.

    Raises:
      RuntimeError: If the underlying graph throws any error.
      ValueError: If the input image is not three channel RGB.

    Returns:
      A NamedTuple with fields describing the landmarks on the most prominate
      person detected:
        1) "pose_landmarks" field that contains the pose landmarks.
        2) "pose_world_landmarks" field that contains the pose landmarks in
        real-world 3D coordinates that are in meters with the origin at the
        center between hips.
        3) "left_hand_landmarks" field that contains the left-hand landmarks.
        4) "right_hand_landmarks" field that contains the right-hand landmarks.
        5) "face_landmarks" field that contains the face landmarks.
        6) "segmentation_mask" field that contains the segmentation mask if
           "enable_segmentation" is set to true.
    r?   )
input_datapresence)r7   processr-   landmark
ClearFieldr.   )r:   r?   resultsrE   r>   s       r    rD   zHolistic.process   s    0 ggoo'5)9o::G (,5 ( ((J''''# (2; ( ((J''''Nr"   )Fr%   TFTFr&   r&   )
__name__
__module____qualname____doc__r8   npndarrayr   rD   __classcell__)r>   s   @r    r$   r$   B   s          "' ! $#(#'%*(+'*9 9 9 9 9 9v2: *          r"   r$   )-rK   typingr   numpyrL   mediapipe.calculators.corer   r   r   mediapipe.calculators.tensorr   r   r	   r
   r   mediapipe.calculators.tfliter   mediapipe.calculators.utilr   r   r   r   r   mediapipe.framework.toolr   /mediapipe.modules.holistic_landmark.calculatorsr   mediapipe.python.solution_baser   mediapipe.python.solutionsr   0mediapipe.python.solutions.face_mesh_connectionsr   r    mediapipe.python.solutions.handsr   ,mediapipe.python.solutions.hands_connectionsr   mediapipe.python.solutions.poser   +mediapipe.python.solutions.pose_connectionsr   r9   r!   r$    r"   r    <module>r_      s9              
 K J J J J J : : : : : : B B B B B B G G G G G G A A A A A A Q Q Q Q Q Q I I I I I I L L L L L L C C C C C C I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I 9 9 9 9 9 9 W W W W W W 8 7 7 7 7 7 5 5 5 5 5 5 N N N N N N Q Q Q Q Q Q 9 9 9 9 9 9 I I I I I I 8 8 8 8 8 8 H H H H H H [ F F Fe e e e e| e e e e er"   