# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler.  DO NOT EDIT!
# source: mediapipe/calculators/tensor/inference_calculator.proto
# Protobuf Python Version: 4.25.1
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)

_sym_db = _symbol_database.Default()


from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
  mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
  mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
from mediapipe.framework import calculator_options_pb2 as mediapipe_dot_framework_dot_calculator__options__pb2


DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n7mediapipe/calculators/tensor/inference_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\x1a,mediapipe/framework/calculator_options.proto\"\xce\x11\n\x1aInferenceCalculatorOptions\x12\x12\n\nmodel_path\x18\x01 \x01(\t\x12\x16\n\x0etry_mmap_model\x18\x07 \x01(\x08\x12\x1a\n\x07use_gpu\x18\x02 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x1c\n\tuse_nnapi\x18\x03 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x1a\n\x0e\x63pu_num_thread\x18\x04 \x01(\x05:\x02-1\x12@\n\x08\x64\x65legate\x18\x05 \x01(\x0b\x32..mediapipe.InferenceCalculatorOptions.Delegate\x12T\n\x13input_output_config\x18\x08 \x01(\x0b\x32\x37.mediapipe.InferenceCalculatorOptions.InputOutputConfig\x1a\xb4\x08\n\x08\x44\x65legate\x12G\n\x06tflite\x18\x01 \x01(\x0b\x32\x35.mediapipe.InferenceCalculatorOptions.Delegate.TfLiteH\x00\x12\x41\n\x03gpu\x18\x02 \x01(\x0b\x32\x32.mediapipe.InferenceCalculatorOptions.Delegate.GpuH\x00\x12\x45\n\x05nnapi\x18\x03 \x01(\x0b\x32\x34.mediapipe.InferenceCalculatorOptions.Delegate.NnapiH\x00\x12I\n\x07xnnpack\x18\x04 \x01(\x0b\x32\x36.mediapipe.InferenceCalculatorOptions.Delegate.XnnpackH\x00\x1a\x08\n\x06TfLite\x1a\x84\x05\n\x03Gpu\x12#\n\x14use_advanced_gpu_api\x18\x01 \x01(\x08:\x05\x66\x61lse\x12H\n\x03\x61pi\x18\x04 \x01(\x0e\x32\x36.mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api:\x03\x41NY\x12\"\n\x14\x61llow_precision_loss\x18\x03 \x01(\x08:\x04true\x12\x1a\n\x12\x63\x61\x63hed_kernel_path\x18\x02 \x01(\t\x12\x1c\n\x14serialized_model_dir\x18\x07 \x01(\t\x12w\n\x16\x63\x61\x63he_writing_behavior\x18\n \x01(\x0e\x32G.mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior:\x0eWRITE_OR_ERROR\x12\x13\n\x0bmodel_token\x18\x08 \x01(\t\x12\x61\n\x05usage\x18\x05 \x01(\x0e\x32\x41.mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage:\x0fSUSTAINED_SPEED\"&\n\x03\x41pi\x12\x07\n\x03\x41NY\x10\x00\x12\n\n\x06OPENGL\x10\x01\x12\n\n\x06OPENCL\x10\x02\"G\n\x14\x43\x61\x63heWritingBehavior\x12\x0c\n\x08NO_WRITE\x10\x00\x12\r\n\tTRY_WRITE\x10\x01\x12\x12\n\x0eWRITE_OR_ERROR\x10\x02\"N\n\x0eInferenceUsage\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x16\n\x12\x46\x41ST_SINGLE_ANSWER\x10\x01\x12\x13\n\x0fSUSTAINED_SPEED\x10\x02\x1aI\n\x05Nnapi\x12\x11\n\tcache_dir\x18\x01 \x01(\t\x12\x13\n\x0bmodel_token\x18\x02 \x01(\t\x12\x18\n\x10\x61\x63\x63\x65lerator_name\x18\x03 \x01(\t\x1a\"\n\x07Xnnpack\x12\x17\n\x0bnum_threads\x18\x01 \x01(\x05:\x02-1B\n\n\x08\x64\x65legate\x1a\x88\x06\n\x11InputOutputConfig\x12l\n\x18input_tensor_indices_map\x18\x01 \x01(\x0b\x32H.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMapH\x00\x12h\n\x16input_tensor_names_map\x18\x03 \x01(\x0b\x32\x46.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMapH\x00\x12m\n\x19output_tensor_indices_map\x18\x02 \x01(\x0b\x32H.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMapH\x01\x12i\n\x17output_tensor_names_map\x18\x04 \x01(\x0b\x32\x46.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMapH\x01\x12i\n\x15\x66\x65\x65\x64\x62\x61\x63k_tensor_links\x18\x05 \x03(\x0b\x32J.mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink\x1a\x34\n\x10TensorIndicesMap\x12 \n\x14model_tensor_indices\x18\x01 \x03(\x05\x42\x02\x10\x01\x1a&\n\x0eTensorNamesMap\x12\x14\n\x0ctensor_names\x18\x01 \x03(\t\x1aS\n\x12\x46\x65\x65\x64\x62\x61\x63kTensorLink\x12\x1f\n\x17\x66rom_output_tensor_name\x18\x01 \x01(\t\x12\x1c\n\x14to_input_tensor_name\x18\x02 \x01(\tB\x10\n\x0eInputTensorMapB\x11\n\x0fOutputTensorMap2T\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xf7\xd3\xcb\xa0\x01 \x01(\x0b\x32%.mediapipe.InferenceCalculatorOptionsBA\n%com.google.mediapipe.calculator.protoB\x18InferenceCalculatorProto')

_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.calculators.tensor.inference_calculator_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
  _globals['DESCRIPTOR']._options = None
  _globals['DESCRIPTOR']._serialized_options = b'\n%com.google.mediapipe.calculator.protoB\030InferenceCalculatorProto'
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP'].fields_by_name['model_tensor_indices']._options = None
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP'].fields_by_name['model_tensor_indices']._serialized_options = b'\020\001'
  _globals['_INFERENCECALCULATOROPTIONS'].fields_by_name['use_gpu']._options = None
  _globals['_INFERENCECALCULATOROPTIONS'].fields_by_name['use_gpu']._serialized_options = b'\030\001'
  _globals['_INFERENCECALCULATOROPTIONS'].fields_by_name['use_nnapi']._options = None
  _globals['_INFERENCECALCULATOROPTIONS'].fields_by_name['use_nnapi']._serialized_options = b'\030\001'
  _globals['_INFERENCECALCULATOROPTIONS']._serialized_start=155
  _globals['_INFERENCECALCULATOROPTIONS']._serialized_end=2409
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE']._serialized_start=468
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE']._serialized_end=1544
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_TFLITE']._serialized_start=766
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_TFLITE']._serialized_end=774
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU']._serialized_start=777
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU']._serialized_end=1421
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_API']._serialized_start=1230
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_API']._serialized_end=1268
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_CACHEWRITINGBEHAVIOR']._serialized_start=1270
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_CACHEWRITINGBEHAVIOR']._serialized_end=1341
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_INFERENCEUSAGE']._serialized_start=1343
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_INFERENCEUSAGE']._serialized_end=1421
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_NNAPI']._serialized_start=1423
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_NNAPI']._serialized_end=1496
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_XNNPACK']._serialized_start=1498
  _globals['_INFERENCECALCULATOROPTIONS_DELEGATE_XNNPACK']._serialized_end=1532
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG']._serialized_start=1547
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG']._serialized_end=2323
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP']._serialized_start=2109
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP']._serialized_end=2161
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORNAMESMAP']._serialized_start=2163
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORNAMESMAP']._serialized_end=2201
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_FEEDBACKTENSORLINK']._serialized_start=2203
  _globals['_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_FEEDBACKTENSORLINK']._serialized_end=2286
# @@protoc_insertion_point(module_scope)
