|
import numpy as np
import cv2
import mediapipe as mp # Import the mediapipe module
from mediapipe.tasks.python import vision
from mediapipe.tasks import python
from google.colab.patches import cv2_imshow # Import cv2_imshow for Colab
# Define image path
IMAGE_FILE = 'image.jpg'
# Define visualize function
def visualize(image, detection_result):
for detection in detection_result.detections:
bbox = detection.bounding_box
start_point = (int(bbox.origin_x), int(bbox.origin_y))
end_point = (int(bbox.origin_x + bbox.width), int(bbox.origin_y + bbox.height))
cv2.rectangle(image, start_point, end_point, (0, 255, 0), 2)
return image
# Load model and image
base_options = python.BaseOptions(model_asset_path='detector.tflite')
options = vision.FaceDetectorOptions(base_options=base_options)
detector = vision.FaceDetector.create_from_options(options)
image = mp.Image.create_from_file(IMAGE_FILE)
# Run detection
detection_result = detector.detect(image)
# Visualize result
image_copy = np.copy(image.numpy_view())
annotated_image = visualize(image_copy, detection_result)
rgb_annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
# Use cv2_imshow for Colab
cv2_imshow(rgb_annotated_image)
|
|