diff --git a/tensorflow/lite/micro/kernels/pad_common.cc b/tensorflow/lite/micro/kernels/pad_common.cc index aceb861b946..f6e38cfc0d2 100644 --- a/tensorflow/lite/micro/kernels/pad_common.cc +++ b/tensorflow/lite/micro/kernels/pad_common.cc @@ -69,7 +69,8 @@ TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node) { // On Micro, outputs must be properly sized by the converter. // NOTE: This data is only available because the paddings buffer is stored in // the flatbuffer: - TF_LITE_ENSURE(context, IsConstantTensor(paddings)); + TF_LITE_ENSURE_MSG(context, IsConstantTensor(paddings), + "Non-constant >paddings< tensor is not supported"); const int32_t* paddings_data = GetTensorData(paddings); for (int i = 0; i < output->dims->size; i++) { int output_dim = output->dims->data[i]; diff --git a/tensorflow/lite/micro/kernels/resize_bilinear.cc b/tensorflow/lite/micro/kernels/resize_bilinear.cc index ab54e8114ac..5d5fac8a2c1 100644 --- a/tensorflow/lite/micro/kernels/resize_bilinear.cc +++ b/tensorflow/lite/micro/kernels/resize_bilinear.cc @@ -1,4 +1,4 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -50,7 +50,7 @@ TfLiteStatus ResizeBilinearPrepare(TfLiteContext* context, TfLiteNode* node) { output->type = input->type; TF_LITE_ENSURE_MSG(context, IsConstantTensor(size), - "Non constant size tensor not supported"); + "Non-constant >size< tensor is not supported"); // Ensure params are valid. auto* params = diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc index ef2d35de52e..5e57e361c13 100644 --- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc +++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -54,10 +54,8 @@ TfLiteStatus ResizeNearestNeighborPrepare(TfLiteContext* context, output->type = input->type; - if (!IsConstantTensor(size)) { - MicroPrintf("Dynamic tensors are unsupported in tfmicro."); - return kTfLiteError; - } + TF_LITE_ENSURE_MSG(context, IsConstantTensor(size), + "Non-constant >size< tensor is not supported"); micro_context->DeallocateTempTfLiteTensor(input); micro_context->DeallocateTempTfLiteTensor(size); diff --git a/tensorflow/lite/micro/kernels/split.cc b/tensorflow/lite/micro/kernels/split.cc index cae7074b15e..a795d5a49e7 100644 --- a/tensorflow/lite/micro/kernels/split.cc +++ b/tensorflow/lite/micro/kernels/split.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -76,7 +76,7 @@ TfLiteStatus SplitPrepare(TfLiteContext* context, TfLiteNode* node) { // But Micro doesn't support dynamic memory allocation, so we only support // constant axis tensor for now. TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), - "Non constant axis tensor not supported"); + "Non-constant >axis< tensor is not supported"); micro_context->DeallocateTempTfLiteTensor(axis); return kTfLiteOk; diff --git a/tensorflow/lite/micro/kernels/split_v.cc b/tensorflow/lite/micro/kernels/split_v.cc index ad96a20d51d..e23615103dc 100644 --- a/tensorflow/lite/micro/kernels/split_v.cc +++ b/tensorflow/lite/micro/kernels/split_v.cc @@ -1,4 +1,4 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -80,7 +80,7 @@ TfLiteStatus SplitVPrepare(TfLiteContext* context, TfLiteNode* node) { // constant axis tensor for now. TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 2); TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), - "Non constant axis tensor not supported"); + "Non-constant >axis< tensor is not supported"); micro_context->DeallocateTempTfLiteTensor(axis); return kTfLiteOk; } diff --git a/tensorflow/lite/micro/kernels/xtensa/pad.cc b/tensorflow/lite/micro/kernels/xtensa/pad.cc index d822c289db5..b8ebf3bde0b 100644 --- a/tensorflow/lite/micro/kernels/xtensa/pad.cc +++ b/tensorflow/lite/micro/kernels/xtensa/pad.cc @@ -1,4 +1,4 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -91,7 +91,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // On Micro, outputs must be properly sized by the converter. // NOTE: This data is only available because the paddings buffer is stored in // the flatbuffer: - TF_LITE_ENSURE(context, IsConstantTensor(paddings)); + TF_LITE_ENSURE_MSG(context, IsConstantTensor(paddings), + "Non-constant >paddings< tensor is not supported"); const int32_t* paddings_data = GetTensorData(paddings); for (int i = 0; i < output->dims->size; i++) { int output_dim = output->dims->data[i];