Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions runtime/executor/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1538,17 +1538,16 @@ Error Method::execute_instruction() {
// We know that instr_args_as_FreeCall is non-null because it was checked
// at init time.
auto free_call = instruction->instr_args_as_FreeCall();
auto& val = mutable_value(free_call->value_index());
if (val.isTensor()) {
auto& t = val.toTensor();
internal::reset_data_ptr(t);
} else {
auto t = mutable_value(free_call->value_index()).tryToTensor();
if (!t.ok()) {
ET_LOG(
Error,
"FreeCall target at index %u is not a Tensor",
static_cast<unsigned int>(free_call->value_index()));
err = Error::InvalidProgram;
err = t.error();
break;
}
internal::reset_data_ptr(t.get());
} break;
default:
ET_LOG(
Expand Down
6 changes: 5 additions & 1 deletion runtime/executor/tensor_parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,12 @@ ET_NODISCARD Result<BoxedEvalueList<std::optional<T>>> parseListOptionalType(
InvalidProgram,
"Invalid value index %" PRId32 " for ListOptional",
index);
auto optional_result = values[index].tryToOptional<T>();
if (!optional_result.ok()) {
return optional_result.error();
}
new (&optional_tensor_list[output_idx])
std::optional<T>(values[index].toOptional<T>());
std::optional<T>(std::move(optional_result.get()));
evalp_list[output_idx] = &values[static_cast<size_t>(index)];
}
output_idx++;
Expand Down
9 changes: 7 additions & 2 deletions runtime/executor/tensor_parser_exec_aten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,10 +98,15 @@ ET_NODISCARD Result<BoxedEvalueList<executorch::aten::Tensor>> parseTensorList(
"Invalid value index %" PRId32 " for TensorList",
tensor_index);

auto tensor_result =
values[static_cast<size_t>(tensor_index)].tryToTensor();
if (!tensor_result.ok()) {
return tensor_result.error();
}
// Placement new as the list elements are not initialized, so calling
// copy assignment is not defined if it's non trivial.
new (&tensor_list[output_idx]) executorch::aten::Tensor(
values[static_cast<size_t>(tensor_index)].toTensor());
new (&tensor_list[output_idx])
executorch::aten::Tensor(std::move(tensor_result.get()));
evalp_list[output_idx] = &values[static_cast<size_t>(tensor_index)];
output_idx++;
}
Expand Down
62 changes: 62 additions & 0 deletions runtime/executor/test/tensor_parser_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

#include <executorch/runtime/executor/tensor_parser.h>

#include <cstring>

#include <executorch/extension/data_loader/file_data_loader.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/tensor_layout.h>
Expand All @@ -19,14 +21,17 @@
using namespace ::testing;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::BoxedEvalueList;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::Program;
using executorch::runtime::Result;
using executorch::runtime::Span;
using executorch::runtime::TensorLayout;
using executorch::runtime::deserialization::parseListOptionalType;
using executorch::runtime::deserialization::parseTensor;
using executorch::runtime::deserialization::parseTensorList;
using executorch::runtime::deserialization::validateTensorLayout;
using executorch::runtime::testing::ManagedMemoryManager;
using torch::executor::util::FileDataLoader;
Expand Down Expand Up @@ -223,3 +228,60 @@ TEST(ValidateTensorLayoutTest, DimOrderSizeMismatchIsRejected) {
EXPECT_EQ(
validateTensorLayout(s_tensor, layout.get()), Error::InvalidExternalData);
}

// Helper to construct a flatbuffers::Vector<int32_t> from raw data.
// FlatBuffer vectors are stored as [uint32_t length][T elements...].
namespace {
struct FlatVectorInt32 {
static const flatbuffers::Vector<int32_t>* create(
std::vector<uint8_t>& buf,
const std::vector<int32_t>& elements) {
buf.resize(sizeof(uint32_t) + elements.size() * sizeof(int32_t));
uint32_t len = static_cast<uint32_t>(elements.size());
memcpy(buf.data(), &len, sizeof(len));
if (!elements.empty()) {
memcpy(
buf.data() + sizeof(uint32_t),
elements.data(),
elements.size() * sizeof(int32_t));
}
return reinterpret_cast<const flatbuffers::Vector<int32_t>*>(buf.data());
}
};
} // namespace

// parseTensorList should return an error when the EValue at the given index
// is not a Tensor, instead of aborting.
TEST_F(TensorParserTest, ParseTensorListRejectsNonTensorEValue) {
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);

// Create an EValue array with a non-Tensor value at index 0.
EValue values[2];
values[0] = EValue(static_cast<int64_t>(42)); // Int, not Tensor
values[1] = EValue(static_cast<int64_t>(7));

// Create a vector with index 0 (pointing to the Int EValue).
std::vector<uint8_t> vec_buf;
auto* indices = FlatVectorInt32::create(vec_buf, {0});

auto result = parseTensorList(indices, values, 2, &mmm.get());
EXPECT_EQ(result.error(), Error::InvalidType);
}

// parseListOptionalType should return an error when the EValue at the given
// index is neither None nor the expected type.
TEST_F(TensorParserTest, ParseListOptionalTypeRejectsWrongType) {
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);

// Create an EValue array with a non-Tensor, non-None value at index 0.
EValue values[2];
values[0] = EValue(static_cast<int64_t>(42)); // Int, not Tensor or None
values[1] = EValue(static_cast<int64_t>(7));

// Create a vector with index 0 (pointing to the Int EValue).
std::vector<uint8_t> vec_buf;
auto* indices = FlatVectorInt32::create(vec_buf, {0});

auto result = parseListOptionalType<Tensor>(indices, values, 2, &mmm.get());
EXPECT_EQ(result.error(), Error::InvalidType);
}
Loading