| import torch.hub
|
| from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("Loading SegFormer++ Model...")
|
|
|
| model = torch.hub.load(
|
| 'KieDani/SegformerPlusPlus',
|
| 'segformer_plusplus',
|
| pretrained=True,
|
| backbone='b5',
|
| tome_strategy='bsm_hq',
|
| checkpoint_url='https://mediastore.rz.uni-augsburg.de/get/yzE65lzm6N/',
|
| out_channels=19,
|
| )
|
| model.eval()
|
| print("Model loaded successfully.")
|
|
|
|
|
| print("Loading data transformations...")
|
| transform = torch.hub.load(
|
| 'KieDani/SegformerPlusPlus',
|
| 'data_transforms',
|
| )
|
| print("Transformations loaded successfully.")
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("Creating a dummy image for demonstration...")
|
| dummy_image = Image.new('RGB', (1300, 1300), color='red')
|
| print("Original image size:", dummy_image.size)
|
|
|
|
|
| print("Applying transformations to the image...")
|
| input_tensor = transform(dummy_image).unsqueeze(0)
|
| print("Transformed image tensor size:", input_tensor.shape)
|
|
|
|
|
| print("Running inference...")
|
| with torch.no_grad():
|
| output = model(input_tensor)
|
|
|
|
|
|
|
| output_tensor = output.squeeze(0)
|
|
|
| print(f"\nInference completed. Output tensor size: {output_tensor.shape}")
|
|
|
|
|
| segmentation_map = torch.argmax(output_tensor, dim=0)
|
| print(f"Size of the generated segmentation map: {segmentation_map.shape}")
|
|
|