From fa3e23c9f0056a23c32c2062568428ea4ebce0a8 Mon Sep 17 00:00:00 2001
From: touchhowling <devsaransujan@gmail.com>
Date: Sun, 11 Aug 2024 16:15:54 +0530
Subject: [PATCH] Updated the interpreter to avoid confusion

---
 .../beagley-ai-object-detection-tutorial.rst   | 18 ++----------------
 1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/boards/beagley/ai/demos/beagley-ai-object-detection-tutorial.rst b/boards/beagley/ai/demos/beagley-ai-object-detection-tutorial.rst
index 3bd1ddb0..23c1b033 100644
--- a/boards/beagley/ai/demos/beagley-ai-object-detection-tutorial.rst
+++ b/boards/beagley/ai/demos/beagley-ai-object-detection-tutorial.rst
@@ -113,6 +113,7 @@ Paste the following code into the file:
    import importlib.util
    from typing import List
    import sys
+   from tflite_runtime.interpreter import Interpreter, load_delegate
 
    video_driver_id = 3
 
@@ -159,21 +160,6 @@ Paste the following code into the file:
            print(f"Error reading label map file: {e}")
            sys.exit()
 
-   def load_interpreter(model_path: str, use_tpu: bool):
-       """Loads the TensorFlow Lite model interpreter."""
-       pkg = importlib.util.find_spec('tflite_runtime')
-       if pkg:
-           from tflite_runtime.interpreter import Interpreter, load_delegate
-       else:
-           from tensorflow.lite.python.interpreter import Interpreter
-           from tensorflow.lite.python.interpreter import load_delegate
-
-       if use_tpu:
-           return Interpreter(model_path=model_path,
-                              experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
-       else:
-           return Interpreter(model_path=model_path)
-
    def main():
        # Argument parsing
        parser = argparse.ArgumentParser()
@@ -192,7 +178,7 @@ Paste the following code into the file:
 
        # Load labels and interpreter
        labels = load_labels(labelmap_path)
-       interpreter = load_interpreter(model_path, use_tpu)
+       interpreter = load_interpreter(Interpreter(model_path))
        interpreter.allocate_tensors()
 
        # Get model details
-- 
GitLab