You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
367 lines
15 KiB
367 lines
15 KiB
//
|
|
// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
//
|
|
|
|
#include "DelegateOptionsTestHelper.hpp"
|
|
#include <common/include/ProfilingGuid.hpp>
|
|
#include <armnnUtils/Filesystem.hpp>
|
|
|
|
namespace armnnDelegate
|
|
{
|
|
|
|
TEST_SUITE("DelegateOptions")
|
|
{
|
|
|
|
TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
|
|
{
|
|
std::stringstream ss;
|
|
{
|
|
StreamRedirector redirect(std::cout, ss.rdbuf());
|
|
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 1, 2, 3, 4 };
|
|
std::vector<float> divData = { 2, 2, 3, 4 };
|
|
std::vector<float> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
// Enable ReduceFp32ToFp16
|
|
armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, false);
|
|
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
|
|
|
|
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
}
|
|
// ReduceFp32ToFp16 option is enabled
|
|
CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
|
|
CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
|
|
{
|
|
std::stringstream ss;
|
|
{
|
|
StreamRedirector redirect(std::cout, ss.rdbuf());
|
|
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 1, 2, 3, 4 };
|
|
std::vector<float> divData = { 2, 2, 3, 4 };
|
|
std::vector<float> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
// Enable Debug
|
|
armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false);
|
|
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
|
|
|
|
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
}
|
|
// Debug option triggered.
|
|
CHECK(ss.str().find("layerGuid") != std::string::npos);
|
|
CHECK(ss.str().find("layerName") != std::string::npos);
|
|
CHECK(ss.str().find("outputSlot") != std::string::npos);
|
|
CHECK(ss.str().find("shape") != std::string::npos);
|
|
CHECK(ss.str().find("data") != std::string::npos);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
|
|
{
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 1, 2, 3, 4 };
|
|
std::vector<float> divData = { 2, 2, 3, 4 };
|
|
std::vector<float> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
// Enable debug with debug callback function
|
|
armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false);
|
|
bool callback = false;
|
|
auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
|
|
{
|
|
armnn::IgnoreUnused(guid);
|
|
armnn::IgnoreUnused(slotIndex);
|
|
armnn::IgnoreUnused(tensor);
|
|
callback = true;
|
|
};
|
|
|
|
armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
|
|
armnnDelegate::DelegateOptions delegateOptions(backends,
|
|
optimizerOptions,
|
|
armnn::EmptyOptional(),
|
|
armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
|
|
|
|
CHECK(!callback);
|
|
|
|
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
|
|
// Check that the debug callback function was called.
|
|
CHECK(callback);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
|
|
{
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
|
|
std::vector<uint8_t> divData = { 2, 2, 3, 4 };
|
|
std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, true);
|
|
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
|
|
|
|
DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
|
|
{
|
|
std::stringstream stringStream;
|
|
std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
|
|
std::vector<std::string> values { "CpuRef", "1", "1"};
|
|
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
|
|
std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
|
|
|
|
// Create options_keys and options_values char array
|
|
size_t num_options = keys.size();
|
|
std::unique_ptr<const char*> options_keys =
|
|
std::unique_ptr<const char*>(new const char*[num_options + 1]);
|
|
std::unique_ptr<const char*> options_values =
|
|
std::unique_ptr<const char*>(new const char*[num_options + 1]);
|
|
for (size_t i=0; i<num_options; ++i)
|
|
{
|
|
options_keys.get()[i] = keys[i].c_str();
|
|
options_values.get()[i] = values[i].c_str();
|
|
}
|
|
|
|
StreamRedirector redirect(std::cout, stringStream.rdbuf());
|
|
|
|
armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
|
|
DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
|
|
!= std::string::npos);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
|
|
{
|
|
std::stringstream stringStream;
|
|
std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
|
|
std::vector<std::string> values { "CpuRef", "1", "0"};
|
|
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
|
|
std::vector<float> expectedResult = { 0.995004177f, -0.504846036f, -0.989992499f, -0.112152621f };
|
|
|
|
// Create options_keys and options_values char array
|
|
size_t num_options = keys.size();
|
|
std::unique_ptr<const char*> options_keys =
|
|
std::unique_ptr<const char*>(new const char*[num_options + 1]);
|
|
std::unique_ptr<const char*> options_values =
|
|
std::unique_ptr<const char*>(new const char*[num_options + 1]);
|
|
for (size_t i=0; i<num_options; ++i)
|
|
{
|
|
options_keys.get()[i] = keys[i].c_str();
|
|
options_values.get()[i] = values[i].c_str();
|
|
}
|
|
|
|
StreamRedirector redirect(std::cout, stringStream.rdbuf());
|
|
|
|
armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
|
|
DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
|
|
CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
|
|
== std::string::npos);
|
|
}
|
|
|
|
}
|
|
|
|
TEST_SUITE("DelegateOptions_CpuAccTests")
|
|
{
|
|
|
|
TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
|
|
{
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 1, 2, 3, 4 };
|
|
std::vector<float> divData = { 2, 2, 3, 4 };
|
|
std::vector<float> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
unsigned int numberOfThreads = 2;
|
|
|
|
armnn::ModelOptions modelOptions;
|
|
armnn::BackendOptions cpuAcc("CpuAcc",
|
|
{
|
|
{ "FastMathEnabled", true },
|
|
{ "NumberOfThreads", numberOfThreads }
|
|
});
|
|
modelOptions.push_back(cpuAcc);
|
|
|
|
armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
|
|
false, modelOptions, false);
|
|
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
|
|
|
|
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateSerializeToDot")
|
|
{
|
|
const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
|
|
if ( fs::exists(filename) )
|
|
{
|
|
fs::remove(filename);
|
|
}
|
|
std::stringstream ss;
|
|
{
|
|
StreamRedirector redirect(std::cout, ss.rdbuf());
|
|
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 1, 2, 3, 4 };
|
|
std::vector<float> divData = { 2, 2, 3, 4 };
|
|
std::vector<float> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
armnn::OptimizerOptionsOpaque optimizerOptions(false, false,
|
|
false, false);
|
|
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
|
|
// Enable serialize to dot by specifying the target file name.
|
|
delegateOptions.SetSerializeToDot(filename);
|
|
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
}
|
|
CHECK(fs::exists(filename));
|
|
// The file should have a size greater than 0 bytes.
|
|
CHECK(fs::file_size(filename) > 0);
|
|
// Clean up.
|
|
fs::remove(filename);
|
|
}
|
|
|
|
void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
|
|
std::vector<std::string>& values,
|
|
std::stringstream& ss)
|
|
{
|
|
StreamRedirector redirect(std::cout, ss.rdbuf());
|
|
|
|
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
|
|
std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
|
|
std::vector<float> inputData = { 1, 2, 3, 4 };
|
|
std::vector<float> divData = { 2, 2, 3, 4 };
|
|
std::vector<float> expectedResult = { 1, 2, 2, 2 };
|
|
|
|
// Create options_keys and options_values char array
|
|
size_t num_options = keys.size();
|
|
std::unique_ptr<const char*> options_keys =
|
|
std::unique_ptr<const char*>(new const char*[num_options + 1]);
|
|
std::unique_ptr<const char*> options_values =
|
|
std::unique_ptr<const char*>(new const char*[num_options + 1]);
|
|
for (size_t i=0; i<num_options; ++i)
|
|
{
|
|
options_keys.get()[i] = keys[i].c_str();
|
|
options_values.get()[i] = values[i].c_str();
|
|
}
|
|
|
|
armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(),
|
|
num_options, nullptr);
|
|
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
|
|
tensorShape,
|
|
inputData,
|
|
inputData,
|
|
divData,
|
|
expectedResult,
|
|
delegateOptions);
|
|
}
|
|
|
|
TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
|
|
{
|
|
SUBCASE("Fp16=1")
|
|
{
|
|
std::stringstream ss;
|
|
std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
|
|
std::vector<std::string> values { "CpuRef", "1", "1", "info"};
|
|
CreateFp16StringParsingTestRun(keys, values, ss);
|
|
CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
|
|
CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
|
|
}
|
|
SUBCASE("Fp16=true")
|
|
{
|
|
std::stringstream ss;
|
|
std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
|
|
std::vector<std::string> values { "CpuRef", "TRUE", "true"};
|
|
CreateFp16StringParsingTestRun(keys, values, ss);
|
|
CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
|
|
CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
|
|
}
|
|
SUBCASE("Fp16=True")
|
|
{
|
|
std::stringstream ss;
|
|
std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
|
|
std::vector<std::string> values { "CpuRef", "true", "True"};
|
|
CreateFp16StringParsingTestRun(keys, values, ss);
|
|
CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
|
|
CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
|
|
}
|
|
SUBCASE("Fp16=0")
|
|
{
|
|
std::stringstream ss;
|
|
std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
|
|
std::vector<std::string> values { "CpuRef", "true", "0"};
|
|
CreateFp16StringParsingTestRun(keys, values, ss);
|
|
CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
|
|
CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
|
|
}
|
|
SUBCASE("Fp16=false")
|
|
{
|
|
std::stringstream ss;
|
|
std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
|
|
std::vector<std::string> values { "CpuRef", "1", "false"};
|
|
CreateFp16StringParsingTestRun(keys, values, ss);
|
|
CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
|
|
CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace armnnDelegate
|