Spaces:
Running
Running
| from PIL import Image | |
| from sklearn.cluster import KMeans | |
| import numpy as np | |
| # K-Means ้ข่ฒ้ๅ | |
| def kmeans_quantization(image, n_colors): | |
| img = image.convert("RGB") | |
| img_np = np.array(img) | |
| w, h, d = img_np.shape | |
| img_flat = img_np.reshape((w * h, d)) | |
| kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(img_flat) | |
| centroids = kmeans.cluster_centers_.astype(int) | |
| labels = kmeans.labels_ | |
| quantized_img_flat = centroids[labels] | |
| quantized_img = quantized_img_flat.reshape((w, h, d)) | |
| return Image.fromarray(np.uint8(quantized_img)) | |
| # Median Cut ้ข่ฒ้ๅ | |
| def median_cut_quantization(image, n_colors): | |
| quantized_img = image.convert("P", palette=Image.ADAPTIVE, colors=n_colors) | |
| return quantized_img.convert("RGB") | |
| # Floyd-Steinberg Dithering | |
| def floyd_steinberg_dithering(image, n_colors): | |
| quantized_img = image.convert("P", palette=Image.ADAPTIVE, colors=n_colors) | |
| return quantized_img.convert("RGB") | |
| # Median Cut with Perceptual Weighting ้ข่ฒ้ๅ | |
| def median_cut_perceptual_weighting(image, n_colors): | |
| """ | |
| ไฝฟ็จๆ็ฅๅ ๆ็ Median Cut ้ข่ฒ้ๅใ | |
| ๅๆฐ: | |
| - image: ่พๅ ฅ็ PIL ๅพๅๅฏน่ฑก | |
| - n_colors: ็ฎๆ ้ข่ฒๆฐ้ | |
| ่ฟๅ: | |
| - ้ๅๅ็ PIL ๅพๅๅฏน่ฑก | |
| """ | |
| img = image.convert("RGB") | |
| # ๅฎไน RGB ้้็ๆ็ฅๅ ๆ | |
| perceptual_weights = np.array([0.299, 0.587, 0.114]) | |
| # ๅฐๅพๅ่ฝฌไธบ numpy ๆฐ็ป | |
| img_np = np.array(img) | |
| # ่ฎก็ฎๅ ๆๅ็้ข่ฒ่ท็ฆป | |
| weighted_img_np = img_np.astype(float) | |
| for i in range(3): # ๅฏน R, G, B ้้ๅๅซๅ ๆ | |
| weighted_img_np[:, :, i] *= perceptual_weights[i] | |
| # ไฝฟ็จ Pillow ็ Median Cut ็ฎๆณ่ฟ่กๅบไบๅ ๆ็้ข่ฒ้ๅ | |
| weighted_image = Image.fromarray(np.uint8(img_np)) # ไธๅฏนๅพๅๅ็ด ๅผ่ฟ่กๆนๅ | |
| quantized_img = weighted_image.convert("P", palette=Image.ADAPTIVE, colors=n_colors) | |
| return quantized_img.convert("RGB") |