mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 12:20:11 +01:00
[Acceleration Service] Let validator return golden output and actual output in Validator::Results.
PiperOrigin-RevId: 468658896
This commit is contained in:
parent
17768c18f9
commit
28bcc6ccd5
|
|
@ -407,6 +407,7 @@ cc_library(
|
|||
":status_codes",
|
||||
"//tensorflow/lite:framework",
|
||||
"//tensorflow/lite:minimal_logging",
|
||||
"//tensorflow/lite/c:c_api",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/core/api",
|
||||
"//tensorflow/lite/experimental/acceleration/configuration:configuration_fbs",
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ limitations under the License.
|
|||
#include <vector>
|
||||
|
||||
#include "absl/container/flat_hash_set.h"
|
||||
#include "tensorflow/lite/c/c_api.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/core/api/profiler.h"
|
||||
#include "tensorflow/lite/core/subgraph.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
|
||||
|
|
@ -59,6 +61,14 @@ std::unique_ptr<tflite::delegates::DelegatePluginInterface> LoadDelegatePlugin(
|
|||
name + "Plugin", tflite_settings);
|
||||
}
|
||||
|
||||
void AddTensorDataToMap(TfLiteTensor* tensor,
|
||||
std::map<std::string, std::vector<char>>& output_map) {
|
||||
std::vector<char> char_output(TfLiteTensorByteSize(tensor));
|
||||
memcpy(char_output.data(), TfLiteTensorData(tensor),
|
||||
TfLiteTensorByteSize(tensor));
|
||||
output_map.emplace(TfLiteTensorName(tensor), std::move(char_output));
|
||||
}
|
||||
|
||||
constexpr int64_t kMicrosInSecond = 1000 * 1000;
|
||||
constexpr int64_t kNanosInMicro = 1000;
|
||||
|
||||
|
|
@ -109,7 +119,7 @@ class ValidatorProfiler : public ::tflite::Profiler {
|
|||
|
||||
} // namespace
|
||||
|
||||
MinibenchmarkStatus Validator::CheckGoldenOutput() {
|
||||
MinibenchmarkStatus Validator::CheckGoldenOutput(Results* results_out) {
|
||||
if (!interpreter_ || !model_loader_->GetModel()) {
|
||||
return kMinibenchmarkPreconditionNotMet;
|
||||
}
|
||||
|
|
@ -143,11 +153,17 @@ MinibenchmarkStatus Validator::CheckGoldenOutput() {
|
|||
}
|
||||
|
||||
// Check if we have validation data embedded or need to run CPU for it. If
|
||||
// the data is embedded, there is already an allocation for it from the model,
|
||||
// and we can skip running it on CPU.
|
||||
// the data is embedded, there is already an allocation for it from the model.
|
||||
// We can skip running it on CPU, and copy the embedded golden output to
|
||||
// results_out.
|
||||
TfLiteTensor* first_input_tensor =
|
||||
validation_entrypoint_->tensor(validation_entrypoint_->inputs()[0]);
|
||||
if (first_input_tensor->allocation) {
|
||||
for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) {
|
||||
AddTensorDataToMap(
|
||||
validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]),
|
||||
results_out->golden_inference_output);
|
||||
}
|
||||
return kMinibenchmarkSuccess;
|
||||
}
|
||||
|
||||
|
|
@ -169,7 +185,7 @@ MinibenchmarkStatus Validator::CheckGoldenOutput() {
|
|||
for (int i = 0; i < golden_validation_entrypoint->inputs().size() - 1; i++) {
|
||||
TfLiteTensor* input_tensor = golden_validation_entrypoint->tensor(
|
||||
golden_validation_entrypoint->inputs()[i]);
|
||||
memset(input_tensor->data.raw, 0, input_tensor->bytes);
|
||||
memset(input_tensor->data.data, 0, input_tensor->bytes);
|
||||
}
|
||||
|
||||
if (golden_validation_entrypoint->Invoke() != kTfLiteOk) {
|
||||
|
|
@ -179,13 +195,17 @@ MinibenchmarkStatus Validator::CheckGoldenOutput() {
|
|||
for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) {
|
||||
TfLiteTensor* input_tensor =
|
||||
validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]);
|
||||
TfLiteTensor* golden_tensor = golden_validation_entrypoint->tensor(
|
||||
TfLiteTensor* golden_output_tensor = golden_validation_entrypoint->tensor(
|
||||
golden_validation_entrypoint->outputs()[i]);
|
||||
if (input_tensor->bytes != golden_tensor->bytes) {
|
||||
if (input_tensor->bytes != golden_output_tensor->bytes) {
|
||||
return kMinibenchmarkValidationSubgraphInputsDontMatchOutputs;
|
||||
}
|
||||
memcpy(input_tensor->data.raw, golden_tensor->data.raw,
|
||||
input_tensor->bytes);
|
||||
|
||||
memcpy(input_tensor->data.data, golden_output_tensor->data.data,
|
||||
golden_output_tensor->bytes);
|
||||
|
||||
AddTensorDataToMap(golden_output_tensor,
|
||||
results_out->golden_inference_output);
|
||||
}
|
||||
|
||||
return kMinibenchmarkSuccess;
|
||||
|
|
@ -323,7 +343,7 @@ MinibenchmarkStatus Validator::RunValidation(Results* results_out) {
|
|||
MB_RETURN_IF_ERROR(CreateInterpreter(&results_out->delegate_error,
|
||||
&results_out->delegated_kernels));
|
||||
int64_t delegate_load_end_time_us = ElapsedTimeMicros();
|
||||
MB_RETURN_IF_ERROR(CheckGoldenOutput());
|
||||
MB_RETURN_IF_ERROR(CheckGoldenOutput(results_out));
|
||||
ValidatorProfiler profiler;
|
||||
main_model_->SetProfiler(&profiler, 0);
|
||||
TfLiteStatus status = validation_entrypoint_->Invoke();
|
||||
|
|
@ -331,10 +351,22 @@ MinibenchmarkStatus Validator::RunValidation(Results* results_out) {
|
|||
if (status != kTfLiteOk) {
|
||||
return kMinibenchmarkInvokeFailed;
|
||||
}
|
||||
|
||||
// Create results_out.
|
||||
int model_output_size = main_model_->outputs().size();
|
||||
// Model output.
|
||||
for (int i = 0; i < model_output_size; i++) {
|
||||
AddTensorDataToMap(
|
||||
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]),
|
||||
results_out->actual_inference_output);
|
||||
}
|
||||
// Accuracy metrics.
|
||||
const std::string kMetricPrefix = "metrics/";
|
||||
const std::string kOk("ok");
|
||||
for (int i : validation_entrypoint_->outputs()) {
|
||||
TfLiteTensor* tensor = validation_entrypoint_->tensor(i);
|
||||
for (int i = model_output_size; i < validation_entrypoint_->outputs().size();
|
||||
i++) {
|
||||
TfLiteTensor* tensor =
|
||||
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]);
|
||||
std::string name = tensor->name;
|
||||
if (name.find(kMetricPrefix) != 0) { // NOLINT
|
||||
continue;
|
||||
|
|
@ -359,6 +391,7 @@ MinibenchmarkStatus Validator::RunValidation(Results* results_out) {
|
|||
}
|
||||
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " accuracy: %s",
|
||||
results_out->ok ? "ok" : "not ok");
|
||||
// Performance metrics.
|
||||
results_out->delegate_prep_time_us =
|
||||
(delegate_load_end_time_us == -1 || delegate_load_start_time_us == -1)
|
||||
? -1
|
||||
|
|
|
|||
|
|
@ -62,8 +62,16 @@ class Validator {
|
|||
std::vector<int64_t> execution_time_us;
|
||||
// Any possible error from the delegate.
|
||||
int delegate_error = 0;
|
||||
// Number of delegated kernels
|
||||
// Number of delegated kernels.
|
||||
int delegated_kernels = 0;
|
||||
// Model output without the delegate.
|
||||
// key: output tensor name.
|
||||
// value: output tensor data in byte format.
|
||||
std::map<std::string, std::vector<char>> golden_inference_output;
|
||||
// Model output with the delegate.
|
||||
// key: output tensor name;
|
||||
// value: output tensor data in byte format.
|
||||
std::map<std::string, std::vector<char>> actual_inference_output;
|
||||
};
|
||||
|
||||
// Run the validation graph and return validation results.
|
||||
|
|
@ -87,8 +95,9 @@ class Validator {
|
|||
MinibenchmarkStatus CreateInterpreter(int* delegate_error_out,
|
||||
int* delegated_kernels_out);
|
||||
|
||||
// Check if the golden output exists. If not, run Model on CPU.
|
||||
MinibenchmarkStatus CheckGoldenOutput();
|
||||
// Check if the golden output exists. If not, run Model on CPU and add golden
|
||||
// output to model_. Also fills results_out with the golden output.
|
||||
MinibenchmarkStatus CheckGoldenOutput(Results* results_out);
|
||||
|
||||
std::unique_ptr<ModelLoader> model_loader_;
|
||||
const ComputeSettings* compute_settings_;
|
||||
|
|
|
|||
|
|
@ -14,10 +14,12 @@ limitations under the License.
|
|||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
||||
#include "tensorflow/lite/experimental/acceleration/configuration/configuration.pb.h"
|
||||
|
|
@ -59,17 +61,36 @@ class ValidatorTest : public ::testing::Test {
|
|||
std::unique_ptr<ModelLoader> plain_model_loader_;
|
||||
};
|
||||
|
||||
TEST_F(ValidatorTest, HappyPath) {
|
||||
TEST_F(ValidatorTest, HappyPathOnCpu) {
|
||||
flatbuffers::FlatBufferBuilder fbb;
|
||||
fbb.Finish(CreateComputeSettings(fbb));
|
||||
const ComputeSettings* settings =
|
||||
flatbuffers::GetRoot<ComputeSettings>(fbb.GetBufferPointer());
|
||||
ASSERT_EQ(validation_model_loader_->Init(), kMinibenchmarkSuccess);
|
||||
int model_output_size = validation_model_loader_->GetModel()
|
||||
->GetModel()
|
||||
->subgraphs()
|
||||
->Get(0)
|
||||
->outputs()
|
||||
->size();
|
||||
|
||||
Validator validator(std::move(validation_model_loader_), settings);
|
||||
Validator::Results results;
|
||||
EXPECT_EQ(validator.RunValidation(&results), kMinibenchmarkSuccess);
|
||||
EXPECT_TRUE(results.ok);
|
||||
EXPECT_EQ(results.delegate_error, 0);
|
||||
EXPECT_EQ(results.actual_inference_output.size(), model_output_size);
|
||||
EXPECT_EQ(results.golden_inference_output.size(), model_output_size);
|
||||
// Only compares the output value when running on forge or local host. The
|
||||
// golden output is generated at build time, while actual output is generated
|
||||
// at run time. When running on Android, these two outputs may generated on
|
||||
// different machines and have diffs.
|
||||
#ifndef __ANDROID__
|
||||
for (auto expected : results.golden_inference_output) {
|
||||
EXPECT_THAT(results.actual_inference_output[expected.first],
|
||||
testing::ContainerEq(expected.second));
|
||||
}
|
||||
#endif // __ANDROID__
|
||||
}
|
||||
|
||||
TEST_F(ValidatorTest, DelegateNotSupported) {
|
||||
|
|
@ -78,7 +99,7 @@ TEST_F(ValidatorTest, DelegateNotSupported) {
|
|||
flatbuffers::FlatBufferBuilder fbb;
|
||||
const ComputeSettings* settings = ConvertFromProto(settings_proto, &fbb);
|
||||
|
||||
Validator validator(std::move(plain_model_loader_), settings);
|
||||
Validator validator(std::move(validation_model_loader_), settings);
|
||||
Validator::Results results;
|
||||
EXPECT_EQ(validator.RunValidation(&results),
|
||||
kMinibenchmarkDelegateNotSupported);
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user