D) le partie 2 of script
D) le partie 2 of script
def setup_mat_inpainter(self):
"""Setup MAT (Mask-Aware Transformer) inpainter"""
# Placeholder for MAT implementation
# Would require downloading MAT model weights
pass
def setup_lama_inpainter(self):
"""Setup LaMa (Large Mask Inpainting) inpainter"""
# Placeholder for LaMa implementation
# Would require downloading LaMa model weights
pass
Args:
image: Input image (H, W, 3)
mask: Binary mask (H, W) where 255 = inpaint area
method: 'auto', 'sd', 'opencv', 'telea', 'ns', 'edge_connect'
Returns:
Inpainted image
"""
if method == 'auto':
# Choose best method based on mask characteristics
method = self._choose_best_method(image, mask)
# Inpaint
result = self.inpainters['sd'](
prompt=prompt,
negative_prompt=negative_prompt,
image=pil_image,
mask_image=pil_mask,
num_inference_steps=25,
guidance_scale=7.5,
strength=0.8
).images[0]
except Exception as e:
print(f"SD inpainting failed: {e}")
return self._inpaint_with_opencv(image, mask, 'telea')
return result
except Exception as e:
print(f"OpenCV inpainting failed: {e}")
return image
# Detect edges
edges = cv2.Canny(gray, 50, 150)
# Blend results
alpha = 0.6
result = cv2.addWeighted(inpaint1, alpha, inpaint2, 1-alpha, 0)
return result
except Exception as e:
print(f"Edge connect inpainting failed: {e}")
return self._inpaint_with_opencv(image, mask, 'telea')
# Extract region
region = image[y:y+h, x:x+w]
region_mask = mask[y:y+h, x:x+w]
# Blend back
result[y:y+h, x:x+w] = filled_region
return result
except Exception as e:
print(f"Patch match inpainting failed: {e}")
return self._inpaint_with_opencv(image, mask, 'telea')
return result
if not np.any(valid_pixels):
return None
best_match = None
best_score = float('inf')
return best_match
class MangaTextRemover:
"""Main class for comprehensive manga/comic text removal"""
def __init__(self):
self.detector = AdvancedTextDetector()
self.inpainter = AdvancedInpainter()
self.processing_stats = {}
Args:
image_path: Path to input image
output_path: Path for output image (optional)
detection_confidence: Minimum confidence for text detection
inpaint_method: Inpainting method to use
expand_mask: Pixels to expand mask around detected text
show_process: Whether to show processing steps
Returns:
Dictionary with processing results and statistics
"""
print(f"🎯 Processing image: {image_path}")
start_time = time.time()
# Load image
image = cv2.imread(image_path)
if image is None:
raise ValueError(f"Could not load image: {image_path}")
original_image = image.copy()
# Step 3: Inpainting
print("🔄 Step 3: Removing text and inpainting...")
result = self.inpainter.inpaint_comprehensive(image, mask, inpaint_method)
# Step 4: Post-processing
print("✨ Step 4: Post-processing...")
result = self._post_process_result(original_image, result, mask)
# Calculate statistics
processing_time = time.time() - start_time
stats = {
'detections_count': len(detections),
'processing_time': processing_time,
'mask_area_ratio': np.sum(mask > 0) / (mask.shape[0] * mask.shape[1]),
'detection_methods': list(set([d['method'] for d in detections])),
'inpaint_method': inpaint_method,
'image_size': image.shape[:2]
}
if show_process:
self._show_processing_results(original_image, detections, mask, result,
stats)
return {
'result': result,
'original': original_image,
'mask': mask,
'detections': detections,
'stats': stats
}
# Add to mask
mask[y1:y2, x1:x2] = 255
return mask
# Smooth transition
final_result = (result * blurred_mask + original * (1 -
blurred_mask)).astype(np.uint8)
# Color correction
final_result = self._match_color_distribution(original, final_result, mask)
# Sharpening
final_result = self._apply_sharpening(final_result)
return final_result
# Calculate statistics
orig_mean = np.mean(orig_channel)
orig_std = np.std(orig_channel)
result_mean = np.mean(result_channel)
result_std = np.std(result_channel)
return result.astype(np.uint8)
except Exception as e:
print(f"Color matching failed: {e}")
return result
except Exception as e:
print(f"Sharpening failed: {e}")
return image
# Mask
axes[0, 1].imshow(mask, cmap='gray')
axes[0, 1].set_title('Inpainting Mask')
axes[0, 1].axis('off')
# Result
axes[1, 0].imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
axes[1, 0].set_title('Text Removed Result')
axes[1, 0].axis('off')
# Comparison
comparison = np.hstack([
cv2.cvtColor(original, cv2.COLOR_BGR2RGB),
cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
])
axes[1, 1].imshow(comparison)
axes[1, 1].set_title('Before vs After')
axes[1, 1].axis('off')
plt.tight_layout()
plt.show()
# Print statistics
print(f"\n📊 Processing Statistics:")
print(f" • Text regions detected: {stats['detections_count']}")
print(f" • Processing time: {stats['processing_time']:.2f} seconds")
print(f" • Mask area ratio: {stats['mask_area_ratio']:.1%}")
print(f" • Detection methods: {', '.join(stats['detection_methods'])}")
print(f" • Inpainting method: {stats['inpaint_method']}")
print(f" • Image size: {stats['image_size'][1]}x{stats['image_size']
[0]}")
class BatchProcessor:
"""Batch processing for multiple images"""
def __init__(self):
self.remover = MangaTextRemover()
results = {}
failed = []
# Process image
result = self.remover.process_image(
str(image_file),
output_path,
show_process=False,
**kwargs
)
results[str(image_file)] = result['stats']
except Exception as e:
print(f"❌ Failed to process {image_file}: {e}")
failed.append(str(image_file))
return {
'processed': len(results),
'failed': len(failed),
'failed_files': failed,
'results': results
}
def setup_environment():
"""One-click setup for Google Colab"""
print("🚀 Setting up Manga Text Removal environment...")
install_all_dependencies()
print("✅ Environment setup complete!")
Args:
image_path: Path to input image
output_path: Path for output (optional)
confidence: Detection confidence threshold
Returns:
Path to output image
"""
if output_path is None:
name, ext = os.path.splitext(image_path)
output_path = f"{name}_no_text{ext}"
remover = MangaTextRemover()
result = remover.process_image(image_path, output_path, confidence)
return output_path
Args:
input_folder: Path to input folder
output_folder: Path to output folder (optional)
confidence: Detection confidence threshold
Returns:
Processing statistics
"""
if output_folder is None:
output_folder = f"{input_folder}_cleaned"
processor = BatchProcessor()
return processor.process_folder(input_folder, output_folder,
detection_confidence=confidence)
def demo_usage():
"""Demonstrate how to use the system"""
print("""
🎯 Manga/Comic Text Removal System - Usage Examples
3. Process folder:
stats = remove_text_from_folder('manga_folder/', 'clean_manga_folder/')
4. Advanced usage:
remover = MangaTextRemover()
result = remover.process_image('image.jpg', confidence=0.4)
5. Batch processing:
processor = BatchProcessor()
stats = processor.process_folder('input/', 'output/')
📝 Tips:
- Lower confidence (0.1-0.3) detects more text but may have false positives
- Higher confidence (0.4-0.8) is more selective but may miss some text
- Use 'sd' inpainting method for best quality (requires GPU)
- Use 'telea' or 'ns' for faster processing
""")
def upload_and_process():
"""Helper function for Google Colab file upload"""
try:
from google.colab import files
# Upload files
print("📤 Please select image files to upload:")
uploaded = files.upload()
results = []
results.append({
'input': filename,
'output': output_path,
'stats': result['stats']
})
# Download results
print("\n📥 Download processed images:")
for result in results:
files.download(result['output'])
return results
except ImportError:
print("This function is only available in Google Colab")
return None
def create_gradio_interface():
"""Create Gradio web interface for easy use"""
try:
import gradio as gr
try:
# Process
remover = MangaTextRemover()
result = remover.process_image(
temp_input,
temp_output,
detection_confidence=confidence,
inpaint_method=inpaint_method,
show_process=False
)
# Load result
result_image = Image.open(temp_output)
except Exception as e:
return None, f"Error: {str(e)}"
# Create interface
interface = gr.Interface(
fn=process_image_gradio,
inputs=[
gr.Image(type="pil", label="Upload Manga/Comic Image"),
gr.Slider(0.1, 0.9, value=0.3, label="Detection Confidence"),
gr.Dropdown(
["auto", "sd", "telea", "ns", "edge_connect"],
value="auto",
label="Inpainting Method"
)
],
outputs=[
gr.Image(type="pil", label="Text Removed"),
gr.Textbox(label="Processing Stats")
],
title="Manga/Comic Text Removal",
description="Upload a manga or comic image to automatically detect and
remove text while preserving the artwork."
)
return interface
except ImportError:
print("Gradio not available. Install with: pip install gradio")
return None
if __name__ == "__main__":
# Show usage information
demo_usage()
print("\n" + "="*50)
print("🎯 MANGA/COMIC TEXT REMOVAL SYSTEM READY!")
print("="*50)
# Auto-setup if requested
setup_choice = input("\nSetup environment now? (y/n): ").lower()
if setup_choice == 'y':
setup_environment()
except ImportError:
print("