from transformers import pipeline from PIL import Image import numpy as np import cv2 # OpenCV for better mask processing # Initialize segmentation pipeline segmenter = pipeline(model="mattmdjaga/segformer_b2_clothes") def segment_clothing(img, clothes=["Hat", "Upper-clothes", "Skirt", "Pants", "Dress", "Belt", "Left-shoe", "Right-shoe", "Scarf"]): # Segment image segments = segmenter(img) # Create list of masks mask_list = [] for s in segments: if s['label'] in clothes: mask_list.append(np.array(s['mask'], dtype=np.uint8)) # Convert to numpy array and ensure it's uint8 # Initialize final mask with zeros final_mask = np.zeros_like(mask_list[0], dtype=np.uint8) # Combine masks into one for mask in mask_list: final_mask = np.maximum(final_mask, mask) # Optional: Dilate the mask to ensure coverage at edges kernel = np.ones((5, 5), np.uint8) final_mask = cv2.dilate(final_mask, kernel, iterations=2) # Optional: Erode to slightly smoothen the mask final_mask = cv2.erode(final_mask, kernel, iterations=1) # Optional: Use contour filling to ensure all areas within contours are filled contours, _ = cv2.findContours(final_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(final_mask, contours, -1, (255), thickness=cv2.FILLED) # Apply Gaussian blur to smooth edges and reduce noise final_mask = cv2.GaussianBlur(final_mask, (7, 7), 0) # Convert mask to binary (0 or 255) if needed for alpha channel _, final_mask = cv2.threshold(final_mask, 127, 255, cv2.THRESH_BINARY) # Convert final mask from np array to PIL image final_mask = Image.fromarray(final_mask) # Apply mask to original image (convert to RGBA first) img = img.convert("RGBA") img.putalpha(final_mask) return img