# Copyright 2025 Robotics Group of the University of León (ULE) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import glob import cv2 import numpy as np import torch from segment_anything import sam_model_registry, SamPredictor from tqdm import tqdm from ultralytics import YOLO current_dir = os.path.dirname(os.path.abspath(__file__)) yolo_model = YOLO("yolo11x-seg.pt", verbose=False) sam_checkpoint = os.path.join(current_dir, "sam_vit_h_4b8939.pth") model_type = "vit_h" device = "cuda" if torch.cuda.is_available() else "cpu" sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) sam.to(device=device) sam_predictor = SamPredictor(sam) directories = ["train/images", "test/images", "val/images"] for directory in directories: dir_path = os.path.join(current_dir, directory) if not os.path.exists(dir_path): continue image_files = glob.glob(os.path.join(dir_path, "*.jpg")) for image_file in tqdm(image_files, desc=f"Processing {directory}"): original_image = cv2.imread(image_file) if original_image is None: continue image_rgb = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB) results = yolo_model.predict(image_file, verbose=False) result = results[0] if result.boxes is not None and len(result.boxes) > 0: blurred_image = original_image.copy() sam_predictor.set_image(image_rgb) boxes = result.boxes classes = boxes.cls for j, class_id in enumerate(classes): if int(class_id) == 0: box = boxes.xyxy[j].cpu().numpy() masks, scores, _ = sam_predictor.predict( box=box, multimask_output=False ) mask = masks[0] blurred_full = cv2.GaussianBlur(blurred_image, (51, 51), 0) mask_3channel = np.stack([mask, mask, mask], axis=2) blurred_image = np.where( mask_3channel, blurred_full, blurred_image ).astype(np.uint8) cv2.imwrite(image_file, blurred_image)